ifpoll: Reorganize TX/RX polling sysctl tree
[dragonfly.git] / sys / net / if_poll.c
blob790836d01f203af7414b898c624047234ae75161
1 /*-
2 * Copyright (c) 2001-2002 Luigi Rizzo
4 * Supported by: the Xorp Project (www.xorp.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
30 #include "opt_ifpoll.h"
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
40 #include <sys/thread2.h>
41 #include <sys/msgport2.h>
43 #include <machine/atomic.h>
44 #include <machine/clock.h>
45 #include <machine/smp.h>
47 #include <net/if.h>
48 #include <net/if_poll.h>
49 #include <net/netmsg2.h>
52 * Polling support for network device drivers.
54 * Drivers which support this feature try to register one status polling
55 * handler and several TX/RX polling handlers with the polling code.
56 * If interface's if_qpoll is called with non-NULL second argument, then
57 * a register operation is requested, else a deregister operation is
58 * requested. If the requested operation is "register", driver should
59 * setup the ifpoll_info passed in accoding its own needs:
60 * ifpoll_info.ifpi_status.status_func == NULL
61 * No status polling handler will be installed on CPU(0)
62 * ifpoll_info.ifpi_rx[n].poll_func == NULL
63 * No RX polling handler will be installed on CPU(n)
64 * ifpoll_info.ifpi_tx[n].poll_func == NULL
65 * No TX polling handler will be installed on CPU(n)
67 * All of the registered polling handlers are called only if the interface
68 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's
69 * register and deregister function (ifnet.if_qpoll) will be called even
70 * if interface is not marked with 'IFF_RUNNING'.
72 * If registration is successful, the driver must disable interrupts,
73 * and further I/O is performed through the TX/RX polling handler, which
74 * are invoked (at least once per clock tick) with 3 arguments: the "arg"
75 * passed at register time, a struct ifnet pointer, and a "count" limit.
76 * The registered serializer will be held before calling the related
77 * polling handler.
79 * The count limit specifies how much work the handler can do during the
80 * call -- typically this is the number of packets to be received, or
81 * transmitted, etc. (drivers are free to interpret this number, as long
82 * as the max time spent in the function grows roughly linearly with the
83 * count).
85 * A second variable controls the sharing of CPU between polling/kernel
86 * network processing, and other activities (typically userlevel tasks):
87 * net.ifpoll.{rxX,txX}.user_frac (between 0 and 100, default 50) sets the
88 * share of CPU allocated to user tasks. CPU is allocated proportionally
89 * to the shares, by dynamically adjusting the "count" (poll_burst).
91 * Other parameters can should be left to their default values.
92 * The following constraints hold
94 * 1 <= poll_burst <= poll_burst_max
95 * 1 <= poll_each_burst <= poll_burst_max
96 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
99 #define IFPOLL_LIST_LEN 128
100 #define IFPOLL_FREQ_MAX 30000
102 #define MIN_IOPOLL_BURST_MAX 10
103 #define MAX_IOPOLL_BURST_MAX 1000
104 #define IOPOLL_BURST_MAX 150 /* good for 100Mbit net and HZ=1000 */
106 #define IOPOLL_EACH_BURST 5
108 #define IFPOLL_FREQ_DEFAULT 2000
109 #define IOPOLL_FREQ_DEFAULT IFPOLL_FREQ_DEFAULT
110 #define STPOLL_FREQ_DEFAULT 100
112 #define IFPOLL_TXFRAC_DEFAULT 1
113 #define IFPOLL_STFRAC_DEFAULT 20
115 #define IFPOLL_RX 0x1
116 #define IFPOLL_TX 0x2
118 union ifpoll_time {
119 struct timeval tv;
120 uint64_t tsc;
123 struct iopoll_rec {
124 struct lwkt_serialize *serializer;
125 struct ifnet *ifp;
126 void *arg;
127 ifpoll_iofn_t poll_func;
130 struct iopoll_ctx {
131 #ifdef IFPOLL_MULTI_SYSTIMER
132 struct systimer pollclock;
133 #endif
135 union ifpoll_time prev_t;
136 uint32_t short_ticks; /* statistics */
137 uint32_t lost_polls; /* statistics */
138 uint32_t suspect; /* statistics */
139 uint32_t stalled; /* statistics */
140 uint32_t pending_polls; /* state */
142 struct netmsg poll_netmsg;
144 int poll_cpuid;
145 #ifdef IFPOLL_MULTI_SYSTIMER
146 int pollhz; /* tunable */
147 #else
148 int poll_type; /* IFPOLL_{RX,TX} */
149 #endif
150 uint32_t phase; /* state */
151 int residual_burst; /* state */
152 uint32_t poll_each_burst; /* tunable */
153 union ifpoll_time poll_start_t; /* state */
155 uint32_t poll_handlers; /* next free entry in pr[]. */
156 struct iopoll_rec pr[IFPOLL_LIST_LEN];
158 struct netmsg poll_more_netmsg;
160 uint32_t poll_burst; /* state */
161 uint32_t poll_burst_max; /* tunable */
162 uint32_t user_frac; /* tunable */
163 uint32_t kern_frac; /* state */
165 struct sysctl_ctx_list poll_sysctl_ctx;
166 struct sysctl_oid *poll_sysctl_tree;
167 } __cachealign;
169 struct iopoll_comm {
170 struct sysctl_ctx_list sysctl_ctx;
171 struct sysctl_oid *sysctl_tree;
172 } __cachealign;
174 struct stpoll_rec {
175 struct lwkt_serialize *serializer;
176 struct ifnet *ifp;
177 ifpoll_stfn_t status_func;
180 struct stpoll_ctx {
181 #ifdef IFPOLL_MULTI_SYSTIMER
182 struct systimer pollclock;
183 #endif
185 struct netmsg poll_netmsg;
187 #ifdef IFPOLL_MULTI_SYSTIMER
188 int pollhz; /* tunable */
189 #endif
190 uint32_t poll_handlers; /* next free entry in pr[]. */
191 struct stpoll_rec pr[IFPOLL_LIST_LEN];
193 struct sysctl_ctx_list poll_sysctl_ctx;
194 struct sysctl_oid *poll_sysctl_tree;
197 struct iopoll_sysctl_netmsg {
198 struct netmsg nmsg;
199 struct iopoll_ctx *ctx;
202 #ifndef IFPOLL_MULTI_SYSTIMER
204 struct ifpoll_data {
205 struct systimer clock;
206 int txfrac_count;
207 int stfrac_count;
208 u_int tx_cpumask;
209 u_int rx_cpumask;
210 } __cachealign;
212 #endif
214 static struct stpoll_ctx stpoll_context;
215 static struct iopoll_comm *iopoll_common[IFPOLL_CTX_MAX];
216 static struct iopoll_ctx *rxpoll_context[IFPOLL_CTX_MAX];
217 static struct iopoll_ctx *txpoll_context[IFPOLL_CTX_MAX];
219 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0,
220 "Network device polling parameters");
222 static int ifpoll_ncpus = IFPOLL_CTX_MAX;
224 static int iopoll_burst_max = IOPOLL_BURST_MAX;
225 static int iopoll_each_burst = IOPOLL_EACH_BURST;
227 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max);
228 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst);
230 #ifdef IFPOLL_MULTI_SYSTIMER
232 static int stpoll_hz = STPOLL_FREQ_DEFAULT;
233 static int iopoll_hz = IOPOLL_FREQ_DEFAULT;
235 TUNABLE_INT("net.ifpoll.stpoll_hz", &stpoll_hz);
236 TUNABLE_INT("net.ifpoll.iopoll_hz", &iopoll_hz);
238 #else /* !IFPOLL_MULTI_SYSTIMER */
240 static struct ifpoll_data ifpoll0;
241 static int ifpoll_pollhz = IFPOLL_FREQ_DEFAULT;
242 static int ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT;
243 static int ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT;
244 static int ifpoll_handlers;
246 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz);
247 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac);
248 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac);
250 static void sysctl_ifpollhz_handler(struct netmsg *);
251 static int sysctl_ifpollhz(SYSCTL_HANDLER_ARGS);
253 SYSCTL_PROC(_net_ifpoll, OID_AUTO, pollhz, CTLTYPE_INT | CTLFLAG_RW,
254 0, 0, sysctl_ifpollhz, "I", "Polling frequency");
255 SYSCTL_INT(_net_ifpoll, OID_AUTO, tx_frac, CTLFLAG_RW,
256 &ifpoll_txfrac, 0, "Every this many cycles poll transmit");
257 SYSCTL_INT(_net_ifpoll, OID_AUTO, st_frac, CTLFLAG_RW,
258 &ifpoll_stfrac, 0, "Every this many cycles poll status");
260 #endif /* IFPOLL_MULTI_SYSTIMER */
262 void ifpoll_init_pcpu(int);
264 #ifndef IFPOLL_MULTI_SYSTIMER
265 static void ifpoll_start_handler(struct netmsg *);
266 static void ifpoll_stop_handler(struct netmsg *);
267 static void ifpoll_handler_addevent(void);
268 static void ifpoll_handler_delevent(void);
269 static void ifpoll_ipi_handler(void *, int);
270 static void ifpoll_systimer(systimer_t, struct intrframe *);
271 #endif
273 static void ifpoll_register_handler(struct netmsg *);
274 static void ifpoll_deregister_handler(struct netmsg *);
277 * Status polling
279 static void stpoll_init(void);
280 static void stpoll_handler(struct netmsg *);
281 static void stpoll_clock(struct stpoll_ctx *);
282 #ifdef IFPOLL_MULTI_SYSTIMER
283 static void stpoll_systimer(systimer_t, struct intrframe *);
284 #endif
285 static int stpoll_register(struct ifnet *, const struct ifpoll_status *);
286 static int stpoll_deregister(struct ifnet *);
288 #ifdef IFPOLL_MULTI_SYSTIMER
289 static void sysctl_stpollhz_handler(struct netmsg *);
290 static int sysctl_stpollhz(SYSCTL_HANDLER_ARGS);
291 #endif
294 * RX/TX polling
296 static struct iopoll_ctx *iopoll_ctx_create(int, int);
297 static struct iopoll_comm *iopoll_comm_create(int);
298 static void iopoll_init(int);
299 static void iopoll_handler(struct netmsg *);
300 static void iopollmore_handler(struct netmsg *);
301 static void iopoll_clock(struct iopoll_ctx *);
302 #ifdef IFPOLL_MULTI_SYSTIMER
303 static void iopoll_systimer(systimer_t, struct intrframe *);
304 #endif
305 static int iopoll_register(struct ifnet *, struct iopoll_ctx *,
306 const struct ifpoll_io *);
307 static int iopoll_deregister(struct ifnet *, struct iopoll_ctx *);
309 static void iopoll_add_sysctl(struct sysctl_ctx_list *,
310 struct sysctl_oid_list *, struct iopoll_ctx *);
311 #ifdef IFPOLL_MULTI_SYSTIMER
312 static void sysctl_iopollhz_handler(struct netmsg *);
313 static int sysctl_iopollhz(SYSCTL_HANDLER_ARGS);
314 #endif
315 static void sysctl_burstmax_handler(struct netmsg *);
316 static int sysctl_burstmax(SYSCTL_HANDLER_ARGS);
317 static void sysctl_eachburst_handler(struct netmsg *);
318 static int sysctl_eachburst(SYSCTL_HANDLER_ARGS);
320 static __inline void
321 ifpoll_sendmsg_oncpu(struct netmsg *msg)
323 if (msg->nm_lmsg.ms_flags & MSGF_DONE)
324 ifnet_sendmsg(&msg->nm_lmsg, mycpuid);
327 static __inline void
328 sched_stpoll(struct stpoll_ctx *st_ctx)
330 ifpoll_sendmsg_oncpu(&st_ctx->poll_netmsg);
333 static __inline void
334 sched_iopoll(struct iopoll_ctx *io_ctx)
336 ifpoll_sendmsg_oncpu(&io_ctx->poll_netmsg);
339 static __inline void
340 sched_iopollmore(struct iopoll_ctx *io_ctx)
342 ifpoll_sendmsg_oncpu(&io_ctx->poll_more_netmsg);
345 static __inline void
346 ifpoll_time_get(union ifpoll_time *t)
348 if (tsc_present)
349 t->tsc = rdtsc();
350 else
351 microuptime(&t->tv);
354 /* Return time diff in us */
355 static __inline int
356 ifpoll_time_diff(const union ifpoll_time *s, const union ifpoll_time *e)
358 if (tsc_present) {
359 return (((e->tsc - s->tsc) * 1000000) / tsc_frequency);
360 } else {
361 return ((e->tv.tv_usec - s->tv.tv_usec) +
362 (e->tv.tv_sec - s->tv.tv_sec) * 1000000);
367 * Initialize per-cpu qpolling(4) context. Called from kern_clock.c:
369 void
370 ifpoll_init_pcpu(int cpuid)
372 if (cpuid >= IFPOLL_CTX_MAX) {
373 return;
374 } else if (cpuid == 0) {
375 if (ifpoll_ncpus > ncpus)
376 ifpoll_ncpus = ncpus;
377 kprintf("ifpoll_ncpus %d\n", ifpoll_ncpus);
379 #ifndef IFPOLL_MULTI_SYSTIMER
380 systimer_init_periodic_nq(&ifpoll0.clock,
381 ifpoll_systimer, NULL, 1);
382 #endif
384 stpoll_init();
386 iopoll_init(cpuid);
389 #ifndef IFPOLL_MULTI_SYSTIMER
391 static void
392 ifpoll_ipi_handler(void *arg __unused, int poll)
394 KKASSERT(mycpuid < ifpoll_ncpus);
396 if (poll & IFPOLL_TX)
397 iopoll_clock(txpoll_context[mycpuid]);
398 if (poll & IFPOLL_RX)
399 iopoll_clock(rxpoll_context[mycpuid]);
402 static void
403 ifpoll_systimer(systimer_t info __unused, struct intrframe *frame __unused)
405 uint32_t cpumask = 0;
407 KKASSERT(mycpuid == 0);
409 if (ifpoll0.stfrac_count-- == 0) {
410 ifpoll0.stfrac_count = ifpoll_stfrac;
411 stpoll_clock(&stpoll_context);
414 if (ifpoll0.txfrac_count-- == 0) {
415 ifpoll0.txfrac_count = ifpoll_txfrac;
417 /* TODO: We may try to piggyback TX on RX */
418 cpumask = smp_active_mask & ifpoll0.tx_cpumask;
419 if (cpumask != 0) {
420 lwkt_send_ipiq2_mask(cpumask, ifpoll_ipi_handler,
421 NULL, IFPOLL_TX);
425 cpumask = smp_active_mask & ifpoll0.rx_cpumask;
426 if (cpumask != 0) {
427 lwkt_send_ipiq2_mask(cpumask, ifpoll_ipi_handler,
428 NULL, IFPOLL_RX);
432 static void
433 ifpoll_start_handler(struct netmsg *nmsg)
435 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
437 kprintf("ifpoll: start\n");
438 systimer_adjust_periodic(&ifpoll0.clock, ifpoll_pollhz);
439 lwkt_replymsg(&nmsg->nm_lmsg, 0);
442 static void
443 ifpoll_stop_handler(struct netmsg *nmsg)
445 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
447 kprintf("ifpoll: stop\n");
448 systimer_adjust_periodic(&ifpoll0.clock, 1);
449 lwkt_replymsg(&nmsg->nm_lmsg, 0);
452 static void
453 ifpoll_handler_addevent(void)
455 if (atomic_fetchadd_int(&ifpoll_handlers, 1) == 0) {
456 struct netmsg *nmsg;
458 /* Start systimer */
459 nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
460 netmsg_init(nmsg, &netisr_afree_rport, 0, ifpoll_start_handler);
461 ifnet_sendmsg(&nmsg->nm_lmsg, 0);
465 static void
466 ifpoll_handler_delevent(void)
468 KKASSERT(ifpoll_handlers > 0);
469 if (atomic_fetchadd_int(&ifpoll_handlers, -1) == 1) {
470 struct netmsg *nmsg;
472 /* Stop systimer */
473 nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
474 netmsg_init(nmsg, &netisr_afree_rport, 0, ifpoll_stop_handler);
475 ifnet_sendmsg(&nmsg->nm_lmsg, 0);
479 static void
480 sysctl_ifpollhz_handler(struct netmsg *nmsg)
482 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
485 * If there is no handler registered, don't adjust polling
486 * systimer frequency; polling systimer frequency will be
487 * adjusted once there is registered handler.
489 ifpoll_pollhz = nmsg->nm_lmsg.u.ms_result;
490 if (ifpoll_handlers)
491 systimer_adjust_periodic(&ifpoll0.clock, ifpoll_pollhz);
493 lwkt_replymsg(&nmsg->nm_lmsg, 0);
496 static int
497 sysctl_ifpollhz(SYSCTL_HANDLER_ARGS)
499 struct netmsg nmsg;
500 int error, phz;
502 phz = ifpoll_pollhz;
503 error = sysctl_handle_int(oidp, &phz, 0, req);
504 if (error || req->newptr == NULL)
505 return error;
506 if (phz <= 0)
507 return EINVAL;
508 else if (phz > IFPOLL_FREQ_MAX)
509 phz = IFPOLL_FREQ_MAX;
511 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
512 sysctl_ifpollhz_handler);
513 nmsg.nm_lmsg.u.ms_result = phz;
515 return ifnet_domsg(&nmsg.nm_lmsg, 0);
518 #endif /* !IFPOLL_MULTI_SYSTIMER */
521 ifpoll_register(struct ifnet *ifp)
523 struct ifpoll_info info;
524 struct netmsg nmsg;
525 int error;
527 if (ifp->if_qpoll == NULL) {
528 /* Device does not support polling */
529 return EOPNOTSUPP;
533 * Attempt to register. Interlock with IFF_NPOLLING.
536 ifnet_serialize_all(ifp);
538 if (ifp->if_flags & IFF_NPOLLING) {
539 /* Already polling */
540 ifnet_deserialize_all(ifp);
541 return EBUSY;
544 bzero(&info, sizeof(info));
545 info.ifpi_ifp = ifp;
547 ifp->if_flags |= IFF_NPOLLING;
548 ifp->if_qpoll(ifp, &info);
550 ifnet_deserialize_all(ifp);
552 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
553 ifpoll_register_handler);
554 nmsg.nm_lmsg.u.ms_resultp = &info;
556 error = ifnet_domsg(&nmsg.nm_lmsg, 0);
557 if (error) {
558 if (!ifpoll_deregister(ifp)) {
559 if_printf(ifp, "ifpoll_register: "
560 "ifpoll_deregister failed!\n");
563 return error;
567 ifpoll_deregister(struct ifnet *ifp)
569 struct netmsg nmsg;
570 int error;
572 if (ifp->if_qpoll == NULL)
573 return EOPNOTSUPP;
575 ifnet_serialize_all(ifp);
577 if ((ifp->if_flags & IFF_NPOLLING) == 0) {
578 ifnet_deserialize_all(ifp);
579 return EINVAL;
581 ifp->if_flags &= ~IFF_NPOLLING;
583 ifnet_deserialize_all(ifp);
585 netmsg_init(&nmsg, &curthread->td_msgport, MSGF_MPSAFE,
586 ifpoll_deregister_handler);
587 nmsg.nm_lmsg.u.ms_resultp = ifp;
589 error = ifnet_domsg(&nmsg.nm_lmsg, 0);
590 if (!error) {
591 ifnet_serialize_all(ifp);
592 ifp->if_qpoll(ifp, NULL);
593 ifnet_deserialize_all(ifp);
595 return error;
598 static void
599 ifpoll_register_handler(struct netmsg *nmsg)
601 const struct ifpoll_info *info = nmsg->nm_lmsg.u.ms_resultp;
602 int cpuid = mycpuid, nextcpu;
603 int error;
605 KKASSERT(cpuid < ifpoll_ncpus);
606 KKASSERT(&curthread->td_msgport == ifnet_portfn(cpuid));
608 if (cpuid == 0) {
609 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
610 if (error)
611 goto failed;
614 error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
615 &info->ifpi_rx[cpuid]);
616 if (error)
617 goto failed;
619 error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
620 &info->ifpi_tx[cpuid]);
621 if (error)
622 goto failed;
624 nextcpu = cpuid + 1;
625 if (nextcpu < ifpoll_ncpus)
626 ifnet_forwardmsg(&nmsg->nm_lmsg, nextcpu);
627 else
628 lwkt_replymsg(&nmsg->nm_lmsg, 0);
629 return;
630 failed:
631 lwkt_replymsg(&nmsg->nm_lmsg, error);
634 static void
635 ifpoll_deregister_handler(struct netmsg *nmsg)
637 struct ifnet *ifp = nmsg->nm_lmsg.u.ms_resultp;
638 int cpuid = mycpuid, nextcpu;
640 KKASSERT(cpuid < ifpoll_ncpus);
641 KKASSERT(&curthread->td_msgport == ifnet_portfn(cpuid));
643 /* Ignore errors */
644 if (cpuid == 0)
645 stpoll_deregister(ifp);
646 iopoll_deregister(ifp, rxpoll_context[cpuid]);
647 iopoll_deregister(ifp, txpoll_context[cpuid]);
649 nextcpu = cpuid + 1;
650 if (nextcpu < ifpoll_ncpus)
651 ifnet_forwardmsg(&nmsg->nm_lmsg, nextcpu);
652 else
653 lwkt_replymsg(&nmsg->nm_lmsg, 0);
656 static void
657 stpoll_init(void)
659 struct stpoll_ctx *st_ctx = &stpoll_context;
661 #ifdef IFPOLL_MULTI_SYSTIMER
662 st_ctx->pollhz = stpoll_hz;
663 #endif
665 sysctl_ctx_init(&st_ctx->poll_sysctl_ctx);
666 st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx,
667 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
668 OID_AUTO, "status", CTLFLAG_RD, 0, "");
670 #ifdef IFPOLL_MULTI_SYSTIMER
671 SYSCTL_ADD_PROC(&st_ctx->poll_sysctl_ctx,
672 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
673 OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW,
674 st_ctx, 0, sysctl_stpollhz, "I",
675 "Status polling frequency");
676 #endif
678 SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx,
679 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree),
680 OID_AUTO, "handlers", CTLFLAG_RD,
681 &st_ctx->poll_handlers, 0,
682 "Number of registered status poll handlers");
684 netmsg_init(&st_ctx->poll_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
685 stpoll_handler);
687 #ifdef IFPOLL_MULTI_SYSTIMER
688 systimer_init_periodic_nq(&st_ctx->pollclock,
689 stpoll_systimer, st_ctx, 1);
690 #endif
693 #ifdef IFPOLL_MULTI_SYSTIMER
695 static void
696 sysctl_stpollhz_handler(struct netmsg *msg)
698 struct stpoll_ctx *st_ctx = &stpoll_context;
700 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
703 * If there is no handler registered, don't adjust polling
704 * systimer frequency; polling systimer frequency will be
705 * adjusted once there is registered handler.
707 st_ctx->pollhz = msg->nm_lmsg.u.ms_result;
708 if (st_ctx->poll_handlers)
709 systimer_adjust_periodic(&st_ctx->pollclock, st_ctx->pollhz);
711 lwkt_replymsg(&msg->nm_lmsg, 0);
714 static int
715 sysctl_stpollhz(SYSCTL_HANDLER_ARGS)
717 struct stpoll_ctx *st_ctx = arg1;
718 struct netmsg msg;
719 int error, phz;
721 phz = st_ctx->pollhz;
722 error = sysctl_handle_int(oidp, &phz, 0, req);
723 if (error || req->newptr == NULL)
724 return error;
725 if (phz <= 0)
726 return EINVAL;
727 else if (phz > IFPOLL_FREQ_MAX)
728 phz = IFPOLL_FREQ_MAX;
730 netmsg_init(&msg, &curthread->td_msgport, MSGF_MPSAFE,
731 sysctl_stpollhz_handler);
732 msg.nm_lmsg.u.ms_result = phz;
734 return ifnet_domsg(&msg.nm_lmsg, 0);
737 #endif /* IFPOLL_MULTI_SYSTIMER */
740 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
741 * once per polling systimer tick.
743 static void
744 stpoll_handler(struct netmsg *msg)
746 struct stpoll_ctx *st_ctx = &stpoll_context;
747 struct thread *td = curthread;
748 int i, poll_hz;
750 KKASSERT(&td->td_msgport == ifnet_portfn(0));
752 crit_enter_quick(td);
754 /* Reply ASAP */
755 lwkt_replymsg(&msg->nm_lmsg, 0);
757 if (st_ctx->poll_handlers == 0) {
758 crit_exit_quick(td);
759 return;
762 #ifdef IFPOLL_MULTI_SYSTIMER
763 poll_hz = st_ctx->pollhz;
764 #else
765 poll_hz = ifpoll_pollhz / (ifpoll_stfrac + 1);
766 #endif
768 for (i = 0; i < st_ctx->poll_handlers; ++i) {
769 const struct stpoll_rec *rec = &st_ctx->pr[i];
770 struct ifnet *ifp = rec->ifp;
772 if (!lwkt_serialize_try(rec->serializer))
773 continue;
775 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
776 (IFF_RUNNING | IFF_NPOLLING))
777 rec->status_func(ifp, poll_hz);
779 lwkt_serialize_exit(rec->serializer);
782 crit_exit_quick(td);
786 * Hook from status poll systimer. Tries to schedule an status poll.
788 static void
789 stpoll_clock(struct stpoll_ctx *st_ctx)
791 globaldata_t gd = mycpu;
793 KKASSERT(gd->gd_cpuid == 0);
795 if (st_ctx->poll_handlers == 0)
796 return;
798 crit_enter_gd(gd);
799 sched_stpoll(st_ctx);
800 crit_exit_gd(gd);
803 #ifdef IFPOLL_MULTI_SYSTIMER
804 static void
805 stpoll_systimer(systimer_t info, struct intrframe *frame __unused)
807 stpoll_clock(info->data);
809 #endif
811 static int
812 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
814 struct stpoll_ctx *st_ctx = &stpoll_context;
815 int error;
817 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
819 if (st_rec->status_func == NULL)
820 return 0;
823 * Check if there is room.
825 if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
827 * List full, cannot register more entries.
828 * This should never happen; if it does, it is probably a
829 * broken driver trying to register multiple times. Checking
830 * this at runtime is expensive, and won't solve the problem
831 * anyways, so just report a few times and then give up.
833 static int verbose = 10; /* XXX */
835 if (verbose > 0) {
836 kprintf("status poll handlers list full, "
837 "maybe a broken driver ?\n");
838 verbose--;
840 error = ENOENT;
841 } else {
842 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];
844 rec->ifp = ifp;
845 rec->serializer = st_rec->serializer;
846 rec->status_func = st_rec->status_func;
848 st_ctx->poll_handlers++;
850 #ifdef IFPOLL_MULTI_SYSTIMER
851 if (st_ctx->poll_handlers == 1) {
852 systimer_adjust_periodic(&st_ctx->pollclock,
853 st_ctx->pollhz);
855 #else
856 ifpoll_handler_addevent();
857 #endif
858 error = 0;
860 return error;
863 static int
864 stpoll_deregister(struct ifnet *ifp)
866 struct stpoll_ctx *st_ctx = &stpoll_context;
867 int i, error;
869 KKASSERT(&curthread->td_msgport == ifnet_portfn(0));
871 for (i = 0; i < st_ctx->poll_handlers; ++i) {
872 if (st_ctx->pr[i].ifp == ifp) /* Found it */
873 break;
875 if (i == st_ctx->poll_handlers) {
876 kprintf("stpoll_deregister: ifp not found!!!\n");
877 error = ENOENT;
878 } else {
879 st_ctx->poll_handlers--;
880 if (i < st_ctx->poll_handlers) {
881 /* Last entry replaces this one. */
882 st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
885 #ifdef IFPOLL_MULTI_SYSTIMER
886 if (st_ctx->poll_handlers == 0)
887 systimer_adjust_periodic(&st_ctx->pollclock, 1);
888 #else
889 ifpoll_handler_delevent();
890 #endif
891 error = 0;
893 return error;
896 #ifndef IFPOLL_MULTI_SYSTIMER
897 static __inline int
898 iopoll_hz(struct iopoll_ctx *io_ctx)
900 int poll_hz;
902 poll_hz = ifpoll_pollhz;
903 if (io_ctx->poll_type == IFPOLL_TX)
904 poll_hz /= ifpoll_txfrac + 1;
905 return poll_hz;
907 #endif
909 static __inline void
910 iopoll_reset_state(struct iopoll_ctx *io_ctx)
912 crit_enter();
913 io_ctx->poll_burst = 5;
914 io_ctx->pending_polls = 0;
915 io_ctx->residual_burst = 0;
916 io_ctx->phase = 0;
917 io_ctx->kern_frac = 0;
918 bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t));
919 bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t));
920 crit_exit();
923 static void
924 iopoll_init(int cpuid)
926 KKASSERT(cpuid < IFPOLL_CTX_MAX);
928 /* Create iopoll_comm context before TX/RX poll context */
929 iopoll_common[cpuid] = iopoll_comm_create(cpuid);
931 rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX);
932 txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX);
935 static struct iopoll_comm *
936 iopoll_comm_create(int cpuid)
938 struct iopoll_comm *comm;
939 char cpuid_str[16];
941 comm = kmalloc(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO);
943 ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid);
945 sysctl_ctx_init(&comm->sysctl_ctx);
946 comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx,
947 SYSCTL_STATIC_CHILDREN(_net_ifpoll),
948 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
950 return comm;
953 static struct iopoll_ctx *
954 iopoll_ctx_create(int cpuid, int poll_type)
956 struct iopoll_comm *comm;
957 struct iopoll_ctx *io_ctx;
958 const char *poll_type_str;
960 KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX);
963 * Make sure that tunables are in sane state
965 if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX)
966 iopoll_burst_max = MIN_IOPOLL_BURST_MAX;
967 else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX)
968 iopoll_burst_max = MAX_IOPOLL_BURST_MAX;
970 if (iopoll_each_burst > iopoll_burst_max)
971 iopoll_each_burst = iopoll_burst_max;
974 * Create the per-cpu polling context
976 io_ctx = kmalloc(sizeof(*io_ctx), M_DEVBUF, M_WAITOK | M_ZERO);
978 io_ctx->poll_each_burst = iopoll_each_burst;
979 io_ctx->poll_burst_max = iopoll_burst_max;
980 io_ctx->user_frac = 50;
981 #ifdef IFPOLL_MULTI_SYSTIMER
982 io_ctx->pollhz = iopoll_hz;
983 #else
984 io_ctx->poll_type = poll_type;
985 #endif
986 io_ctx->poll_cpuid = cpuid;
987 iopoll_reset_state(io_ctx);
989 netmsg_init(&io_ctx->poll_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
990 iopoll_handler);
991 io_ctx->poll_netmsg.nm_lmsg.u.ms_resultp = io_ctx;
993 netmsg_init(&io_ctx->poll_more_netmsg, &netisr_adone_rport, MSGF_MPSAFE,
994 iopollmore_handler);
995 io_ctx->poll_more_netmsg.nm_lmsg.u.ms_resultp = io_ctx;
998 * Initialize per-cpu sysctl nodes
1000 if (poll_type == IFPOLL_RX)
1001 poll_type_str = "rx";
1002 else
1003 poll_type_str = "tx";
1005 comm = iopoll_common[cpuid];
1006 sysctl_ctx_init(&io_ctx->poll_sysctl_ctx);
1007 io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx,
1008 SYSCTL_CHILDREN(comm->sysctl_tree),
1009 OID_AUTO, poll_type_str, CTLFLAG_RD, 0, "");
1010 iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx,
1011 SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx);
1013 #ifdef IFPOLL_MULTI_SYSTIMER
1015 * Initialize systimer
1017 systimer_init_periodic_nq(&io_ctx->pollclock,
1018 iopoll_systimer, io_ctx, 1);
1019 #endif
1021 return io_ctx;
1025 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps
1026 * track of lost ticks due to the previous handler taking too long.
1027 * Normally, this should not happen, because polling handler should
1028 * run for a short time. However, in some cases (e.g. when there are
1029 * changes in link status etc.) the drivers take a very long time
1030 * (even in the order of milliseconds) to reset and reconfigure the
1031 * device, causing apparent lost polls.
1033 * The first part of the code is just for debugging purposes, and tries
1034 * to count how often hardclock ticks are shorter than they should,
1035 * meaning either stray interrupts or delayed events.
1037 * WARNING! called from fastint or IPI, the MP lock might not be held.
1039 static void
1040 iopoll_clock(struct iopoll_ctx *io_ctx)
1042 globaldata_t gd = mycpu;
1043 union ifpoll_time t;
1044 int delta, poll_hz;
1046 KKASSERT(gd->gd_cpuid == io_ctx->poll_cpuid);
1048 if (io_ctx->poll_handlers == 0)
1049 return;
1051 #ifdef IFPOLL_MULTI_SYSTIMER
1052 poll_hz = io_ctx->pollhz;
1053 #else
1054 poll_hz = iopoll_hz(io_ctx);
1055 #endif
1057 ifpoll_time_get(&t);
1058 delta = ifpoll_time_diff(&io_ctx->prev_t, &t);
1059 if (delta * poll_hz < 500000)
1060 io_ctx->short_ticks++;
1061 else
1062 io_ctx->prev_t = t;
1064 if (io_ctx->pending_polls > 100) {
1066 * Too much, assume it has stalled (not always true
1067 * see comment above).
1069 io_ctx->stalled++;
1070 io_ctx->pending_polls = 0;
1071 io_ctx->phase = 0;
1074 if (io_ctx->phase <= 2) {
1075 if (io_ctx->phase != 0)
1076 io_ctx->suspect++;
1077 io_ctx->phase = 1;
1078 crit_enter_gd(gd);
1079 sched_iopoll(io_ctx);
1080 crit_exit_gd(gd);
1081 io_ctx->phase = 2;
1083 if (io_ctx->pending_polls++ > 0)
1084 io_ctx->lost_polls++;
1087 #ifdef IFPOLL_MULTI_SYSTIMER
1088 static void
1089 iopoll_systimer(systimer_t info, struct intrframe *frame __unused)
1091 iopoll_clock(info->data);
1093 #endif
1096 * iopoll_handler is scheduled by sched_iopoll when appropriate, typically
1097 * once per polling systimer tick.
1099 * Note that the message is replied immediately in order to allow a new
1100 * ISR to be scheduled in the handler.
1102 static void
1103 iopoll_handler(struct netmsg *msg)
1105 struct iopoll_ctx *io_ctx;
1106 struct thread *td = curthread;
1107 int i, cycles;
1109 io_ctx = msg->nm_lmsg.u.ms_resultp;
1110 KKASSERT(&td->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1112 crit_enter_quick(td);
1114 /* Reply ASAP */
1115 lwkt_replymsg(&msg->nm_lmsg, 0);
1117 if (io_ctx->poll_handlers == 0) {
1118 crit_exit_quick(td);
1119 return;
1122 io_ctx->phase = 3;
1123 if (io_ctx->residual_burst == 0) {
1124 /* First call in this tick */
1125 ifpoll_time_get(&io_ctx->poll_start_t);
1126 io_ctx->residual_burst = io_ctx->poll_burst;
1128 cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
1129 io_ctx->residual_burst : io_ctx->poll_each_burst;
1130 io_ctx->residual_burst -= cycles;
1132 for (i = 0; i < io_ctx->poll_handlers; i++) {
1133 const struct iopoll_rec *rec = &io_ctx->pr[i];
1134 struct ifnet *ifp = rec->ifp;
1136 if (!lwkt_serialize_try(rec->serializer))
1137 continue;
1139 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
1140 (IFF_RUNNING | IFF_NPOLLING))
1141 rec->poll_func(ifp, rec->arg, cycles);
1143 lwkt_serialize_exit(rec->serializer);
1147 * Do a quick exit/enter to catch any higher-priority
1148 * interrupt sources.
1150 crit_exit_quick(td);
1151 crit_enter_quick(td);
1153 sched_iopollmore(io_ctx);
1154 io_ctx->phase = 4;
1156 crit_exit_quick(td);
1160 * iopollmore_handler is called after other netisr's, possibly scheduling
1161 * another iopoll_handler call, or adapting the burst size for the next cycle.
1163 * It is very bad to fetch large bursts of packets from a single card at once,
1164 * because the burst could take a long time to be completely processed leading
1165 * to unfairness. To reduce the problem, and also to account better for time
1166 * spent in network-related processing, we split the burst in smaller chunks
1167 * of fixed size, giving control to the other netisr's between chunks. This
1168 * helps in improving the fairness, reducing livelock and accounting for the
1169 * work performed in low level handling.
1171 static void
1172 iopollmore_handler(struct netmsg *msg)
1174 struct thread *td = curthread;
1175 struct iopoll_ctx *io_ctx;
1176 union ifpoll_time t;
1177 int kern_load, poll_hz;
1178 uint32_t pending_polls;
1180 io_ctx = msg->nm_lmsg.u.ms_resultp;
1181 KKASSERT(&td->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1183 crit_enter_quick(td);
1185 /* Replay ASAP */
1186 lwkt_replymsg(&msg->nm_lmsg, 0);
1188 if (io_ctx->poll_handlers == 0) {
1189 crit_exit_quick(td);
1190 return;
1193 #ifdef IFPOLL_MULTI_SYSTIMER
1194 poll_hz = io_ctx->pollhz;
1195 #else
1196 poll_hz = iopoll_hz(io_ctx);
1197 #endif
1199 io_ctx->phase = 5;
1200 if (io_ctx->residual_burst > 0) {
1201 sched_iopoll(io_ctx);
1202 crit_exit_quick(td);
1203 /* Will run immediately on return, followed by netisrs */
1204 return;
1207 /* Here we can account time spent in iopoll's in this tick */
1208 ifpoll_time_get(&t);
1209 kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t);
1210 kern_load = (kern_load * poll_hz) / 10000; /* 0..100 */
1211 io_ctx->kern_frac = kern_load;
1213 if (kern_load > (100 - io_ctx->user_frac)) {
1214 /* Try decrease ticks */
1215 if (io_ctx->poll_burst > 1)
1216 io_ctx->poll_burst--;
1217 } else {
1218 if (io_ctx->poll_burst < io_ctx->poll_burst_max)
1219 io_ctx->poll_burst++;
1222 io_ctx->pending_polls--;
1223 pending_polls = io_ctx->pending_polls;
1225 if (pending_polls == 0) {
1226 /* We are done */
1227 io_ctx->phase = 0;
1228 } else {
1230 * Last cycle was long and caused us to miss one or more
1231 * hardclock ticks. Restart processing again, but slightly
1232 * reduce the burst size to prevent that this happens again.
1234 io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
1235 if (io_ctx->poll_burst < 1)
1236 io_ctx->poll_burst = 1;
1237 sched_iopoll(io_ctx);
1238 io_ctx->phase = 6;
1241 crit_exit_quick(td);
1244 static void
1245 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent,
1246 struct iopoll_ctx *io_ctx)
1248 #ifdef IFPOLL_MULTI_SYSTIMER
1249 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "pollhz",
1250 CTLTYPE_INT | CTLFLAG_RW, io_ctx, 0, sysctl_iopollhz,
1251 "I", "Device polling frequency");
1252 #endif
1254 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max",
1255 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax,
1256 "IU", "Max Polling burst size");
1258 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst",
1259 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst,
1260 "IU", "Max size of each burst");
1262 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD,
1263 &io_ctx->phase, 0, "Polling phase");
1265 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW,
1266 &io_ctx->suspect, 0, "suspect event");
1268 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW,
1269 &io_ctx->stalled, 0, "potential stalls");
1271 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD,
1272 &io_ctx->poll_burst, 0, "Current polling burst size");
1274 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW,
1275 &io_ctx->user_frac, 0,
1276 "Desired user fraction of cpu time");
1278 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD,
1279 &io_ctx->kern_frac, 0,
1280 "Kernel fraction of cpu time");
1282 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW,
1283 &io_ctx->short_ticks, 0,
1284 "Hardclock ticks shorter than they should be");
1286 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW,
1287 &io_ctx->lost_polls, 0,
1288 "How many times we would have lost a poll tick");
1290 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD,
1291 &io_ctx->pending_polls, 0, "Do we need to poll again");
1293 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD,
1294 &io_ctx->residual_burst, 0,
1295 "# of residual cycles in burst");
1297 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD,
1298 &io_ctx->poll_handlers, 0,
1299 "Number of registered poll handlers");
1302 #ifdef IFPOLL_MULTI_SYSTIMER
1304 static int
1305 sysctl_iopollhz(SYSCTL_HANDLER_ARGS)
1307 struct iopoll_ctx *io_ctx = arg1;
1308 struct iopoll_sysctl_netmsg msg;
1309 struct netmsg *nmsg;
1310 int error, phz;
1312 phz = io_ctx->pollhz;
1313 error = sysctl_handle_int(oidp, &phz, 0, req);
1314 if (error || req->newptr == NULL)
1315 return error;
1316 if (phz <= 0)
1317 return EINVAL;
1318 else if (phz > IFPOLL_FREQ_MAX)
1319 phz = IFPOLL_FREQ_MAX;
1321 nmsg = &msg.nmsg;
1322 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1323 sysctl_iopollhz_handler);
1324 nmsg->nm_lmsg.u.ms_result = phz;
1325 msg.ctx = io_ctx;
1327 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1330 static void
1331 sysctl_iopollhz_handler(struct netmsg *nmsg)
1333 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1334 struct iopoll_ctx *io_ctx;
1336 io_ctx = msg->ctx;
1337 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1340 * If polling is disabled or there is no polling handler
1341 * registered, don't adjust polling systimer frequency.
1342 * Polling systimer frequency will be adjusted once there
1343 * are registered handlers.
1345 io_ctx->pollhz = nmsg->nm_lmsg.u.ms_result;
1346 if (io_ctx->poll_handlers)
1347 systimer_adjust_periodic(&io_ctx->pollclock, io_ctx->pollhz);
1349 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1352 #endif /* IFPOLL_MULTI_SYSTIMER */
1354 static void
1355 sysctl_burstmax_handler(struct netmsg *nmsg)
1357 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1358 struct iopoll_ctx *io_ctx;
1360 io_ctx = msg->ctx;
1361 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1363 io_ctx->poll_burst_max = nmsg->nm_lmsg.u.ms_result;
1364 if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
1365 io_ctx->poll_each_burst = io_ctx->poll_burst_max;
1366 if (io_ctx->poll_burst > io_ctx->poll_burst_max)
1367 io_ctx->poll_burst = io_ctx->poll_burst_max;
1368 if (io_ctx->residual_burst > io_ctx->poll_burst_max)
1369 io_ctx->residual_burst = io_ctx->poll_burst_max;
1371 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1374 static int
1375 sysctl_burstmax(SYSCTL_HANDLER_ARGS)
1377 struct iopoll_ctx *io_ctx = arg1;
1378 struct iopoll_sysctl_netmsg msg;
1379 struct netmsg *nmsg;
1380 uint32_t burst_max;
1381 int error;
1383 burst_max = io_ctx->poll_burst_max;
1384 error = sysctl_handle_int(oidp, &burst_max, 0, req);
1385 if (error || req->newptr == NULL)
1386 return error;
1387 if (burst_max < MIN_IOPOLL_BURST_MAX)
1388 burst_max = MIN_IOPOLL_BURST_MAX;
1389 else if (burst_max > MAX_IOPOLL_BURST_MAX)
1390 burst_max = MAX_IOPOLL_BURST_MAX;
1392 nmsg = &msg.nmsg;
1393 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1394 sysctl_burstmax_handler);
1395 nmsg->nm_lmsg.u.ms_result = burst_max;
1396 msg.ctx = io_ctx;
1398 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1401 static void
1402 sysctl_eachburst_handler(struct netmsg *nmsg)
1404 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
1405 struct iopoll_ctx *io_ctx;
1406 uint32_t each_burst;
1408 io_ctx = msg->ctx;
1409 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1411 each_burst = nmsg->nm_lmsg.u.ms_result;
1412 if (each_burst > io_ctx->poll_burst_max)
1413 each_burst = io_ctx->poll_burst_max;
1414 else if (each_burst < 1)
1415 each_burst = 1;
1416 io_ctx->poll_each_burst = each_burst;
1418 lwkt_replymsg(&nmsg->nm_lmsg, 0);
1421 static int
1422 sysctl_eachburst(SYSCTL_HANDLER_ARGS)
1424 struct iopoll_ctx *io_ctx = arg1;
1425 struct iopoll_sysctl_netmsg msg;
1426 struct netmsg *nmsg;
1427 uint32_t each_burst;
1428 int error;
1430 each_burst = io_ctx->poll_each_burst;
1431 error = sysctl_handle_int(oidp, &each_burst, 0, req);
1432 if (error || req->newptr == NULL)
1433 return error;
1435 nmsg = &msg.nmsg;
1436 netmsg_init(nmsg, &curthread->td_msgport, MSGF_MPSAFE,
1437 sysctl_eachburst_handler);
1438 nmsg->nm_lmsg.u.ms_result = each_burst;
1439 msg.ctx = io_ctx;
1441 return ifnet_domsg(&nmsg->nm_lmsg, io_ctx->poll_cpuid);
1444 static int
1445 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx,
1446 const struct ifpoll_io *io_rec)
1448 int error;
1450 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1452 if (io_rec->poll_func == NULL)
1453 return 0;
1456 * Check if there is room.
1458 if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
1460 * List full, cannot register more entries.
1461 * This should never happen; if it does, it is probably a
1462 * broken driver trying to register multiple times. Checking
1463 * this at runtime is expensive, and won't solve the problem
1464 * anyways, so just report a few times and then give up.
1466 static int verbose = 10; /* XXX */
1467 if (verbose > 0) {
1468 kprintf("io poll handlers list full, "
1469 "maybe a broken driver ?\n");
1470 verbose--;
1472 error = ENOENT;
1473 } else {
1474 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers];
1476 rec->ifp = ifp;
1477 rec->serializer = io_rec->serializer;
1478 rec->arg = io_rec->arg;
1479 rec->poll_func = io_rec->poll_func;
1481 io_ctx->poll_handlers++;
1482 if (io_ctx->poll_handlers == 1) {
1483 #ifdef IFPOLL_MULTI_SYSTIMER
1484 systimer_adjust_periodic(&io_ctx->pollclock,
1485 io_ctx->pollhz);
1486 #else
1487 u_int *mask;
1489 if (io_ctx->poll_type == IFPOLL_RX)
1490 mask = &ifpoll0.rx_cpumask;
1491 else
1492 mask = &ifpoll0.tx_cpumask;
1493 KKASSERT((*mask & mycpu->gd_cpumask) == 0);
1494 atomic_set_int(mask, mycpu->gd_cpumask);
1495 #endif
1497 #ifndef IFPOLL_MULTI_SYSTIMER
1498 ifpoll_handler_addevent();
1499 #endif
1500 error = 0;
1502 return error;
1505 static int
1506 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
1508 int i, error;
1510 KKASSERT(&curthread->td_msgport == ifnet_portfn(io_ctx->poll_cpuid));
1512 for (i = 0; i < io_ctx->poll_handlers; ++i) {
1513 if (io_ctx->pr[i].ifp == ifp) /* Found it */
1514 break;
1516 if (i == io_ctx->poll_handlers) {
1517 error = ENOENT;
1518 } else {
1519 io_ctx->poll_handlers--;
1520 if (i < io_ctx->poll_handlers) {
1521 /* Last entry replaces this one. */
1522 io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
1525 if (io_ctx->poll_handlers == 0) {
1526 #ifdef IFPOLL_MULTI_SYSTIMER
1527 systimer_adjust_periodic(&io_ctx->pollclock, 1);
1528 #else
1529 u_int *mask;
1531 if (io_ctx->poll_type == IFPOLL_RX)
1532 mask = &ifpoll0.rx_cpumask;
1533 else
1534 mask = &ifpoll0.tx_cpumask;
1535 KKASSERT(*mask & mycpu->gd_cpumask);
1536 atomic_clear_int(mask, mycpu->gd_cpumask);
1537 #endif
1538 iopoll_reset_state(io_ctx);
1540 #ifndef IFPOLL_MULTI_SYSTIMER
1541 ifpoll_handler_delevent();
1542 #endif
1543 error = 0;
1545 return error;