2 * Copyright (c) 2001-2002 Luigi Rizzo
4 * Supported by: the Xorp Project (www.xorp.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
28 * $DragonFly: src/sys/kern/kern_poll.c,v 1.47 2008/09/23 14:14:20 sephe Exp $
31 #include "opt_polling.h"
33 #include <sys/param.h>
34 #include <sys/kernel.h>
36 #include <sys/socket.h> /* needed by net/if.h */
37 #include <sys/sysctl.h>
39 #include <sys/thread2.h>
40 #include <sys/msgport2.h>
42 #include <net/if.h> /* for IFF_* flags */
43 #include <net/netmsg2.h>
46 * Polling support for [network] device drivers.
48 * Drivers which support this feature try to register with the
51 * If registration is successful, the driver must disable interrupts,
52 * and further I/O is performed through the handler, which is invoked
53 * (at least once per clock tick) with 3 arguments: the "arg" passed at
54 * register time (a struct ifnet pointer), a command, and a "count" limit.
56 * The command can be one of the following:
57 * POLL_ONLY: quick move of "count" packets from input/output queues.
58 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do
59 * other more expensive operations. This command is issued periodically
60 * but less frequently than POLL_ONLY.
61 * POLL_DEREGISTER: deregister and return to interrupt mode.
62 * POLL_REGISTER: register and disable interrupts
64 * The first two commands are only issued if the interface is marked as
65 * 'IFF_UP, IFF_RUNNING and IFF_POLLING', the last two only if IFF_RUNNING
68 * The count limit specifies how much work the handler can do during the
69 * call -- typically this is the number of packets to be received, or
70 * transmitted, etc. (drivers are free to interpret this number, as long
71 * as the max time spent in the function grows roughly linearly with the
74 * Deregistration can be requested by the driver itself (typically in the
75 * *_stop() routine), or by the polling code, by invoking the handler.
77 * Polling can be enabled or disabled on particular CPU_X with the sysctl
78 * variable kern.polling.X.enable (default is 1, enabled)
80 * A second variable controls the sharing of CPU between polling/kernel
81 * network processing, and other activities (typically userlevel tasks):
82 * kern.polling.X.user_frac (between 0 and 100, default 50) sets the share
83 * of CPU allocated to user tasks. CPU is allocated proportionally to the
84 * shares, by dynamically adjusting the "count" (poll_burst).
86 * Other parameters can should be left to their default values.
87 * The following constraints hold
89 * 1 <= poll_burst <= poll_burst_max
90 * 1 <= poll_each_burst <= poll_burst_max
91 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
94 #define MIN_POLL_BURST_MAX 10
95 #define MAX_POLL_BURST_MAX 1000
96 #define POLL_BURST_MAX 150 /* good for 100Mbit net and HZ=1000 */
97 #define POLL_EACH_BURST 5
99 #ifndef DEVICE_POLLING_FREQ_MAX
100 #define DEVICE_POLLING_FREQ_MAX 30000
102 #define DEVICE_POLLING_FREQ_DEFAULT 2000
104 #define POLL_LIST_LEN 128
109 #define POLLCTX_MAX 32
112 struct sysctl_ctx_list poll_sysctl_ctx
;
113 struct sysctl_oid
*poll_sysctl_tree
;
115 uint32_t poll_burst
; /* state */
116 uint32_t poll_each_burst
; /* tunable */
117 uint32_t poll_burst_max
; /* tunable */
118 uint32_t user_frac
; /* tunable */
119 int reg_frac_count
; /* state */
120 uint32_t reg_frac
; /* tunable */
121 uint32_t short_ticks
; /* statistics */
122 uint32_t lost_polls
; /* statistics */
123 uint32_t pending_polls
; /* state */
124 int residual_burst
; /* state */
125 uint32_t phase
; /* state */
126 uint32_t suspect
; /* statistics */
127 uint32_t stalled
; /* statistics */
128 struct timeval poll_start_t
; /* state */
129 struct timeval prev_t
; /* state */
131 uint32_t poll_handlers
; /* next free entry in pr[]. */
132 struct pollrec pr
[POLL_LIST_LEN
];
135 struct systimer pollclock
;
136 int polling_enabled
; /* tunable */
137 int pollhz
; /* tunable */
139 struct netmsg poll_netmsg
;
140 struct netmsg poll_more_netmsg
;
143 static struct pollctx
*poll_context
[POLLCTX_MAX
];
145 SYSCTL_NODE(_kern
, OID_AUTO
, polling
, CTLFLAG_RW
, 0,
146 "Device polling parameters");
148 static int poll_defcpu
= -1;
149 SYSCTL_INT(_kern_polling
, OID_AUTO
, defcpu
, CTLFLAG_RD
,
150 &poll_defcpu
, 0, "default CPU to run device polling");
152 static uint32_t poll_cpumask0
= 0xffffffff;
153 TUNABLE_INT("kern.polling.cpumask", (int *)&poll_cpumask0
);
155 static uint32_t poll_cpumask
;
156 SYSCTL_INT(_kern_polling
, OID_AUTO
, cpumask
, CTLFLAG_RD
,
157 &poll_cpumask
, 0, "CPUs that can run device polling");
159 static int polling_enabled
= 1; /* global polling enable */
160 TUNABLE_INT("kern.polling.enable", &polling_enabled
);
162 static int pollhz
= DEVICE_POLLING_FREQ_DEFAULT
;
163 TUNABLE_INT("kern.polling.pollhz", &pollhz
);
165 static int poll_burst_max
= POLL_BURST_MAX
;
166 TUNABLE_INT("kern.polling.burst_max", &poll_burst_max
);
168 static int poll_each_burst
= POLL_EACH_BURST
;
169 TUNABLE_INT("kern.polling.each_burst", &poll_each_burst
);
171 /* Netisr handlers */
172 static void netisr_poll(struct netmsg
*);
173 static void netisr_pollmore(struct netmsg
*);
174 static void poll_register(struct netmsg
*);
175 static void poll_deregister(struct netmsg
*);
176 static void poll_sysctl_pollhz(struct netmsg
*);
177 static void poll_sysctl_polling(struct netmsg
*);
178 static void poll_sysctl_regfrac(struct netmsg
*);
179 static void poll_sysctl_burstmax(struct netmsg
*);
180 static void poll_sysctl_eachburst(struct netmsg
*);
182 /* Systimer handler */
183 static void pollclock(systimer_t
, struct intrframe
*);
185 /* Sysctl handlers */
186 static int sysctl_pollhz(SYSCTL_HANDLER_ARGS
);
187 static int sysctl_polling(SYSCTL_HANDLER_ARGS
);
188 static int sysctl_regfrac(SYSCTL_HANDLER_ARGS
);
189 static int sysctl_burstmax(SYSCTL_HANDLER_ARGS
);
190 static int sysctl_eachburst(SYSCTL_HANDLER_ARGS
);
191 static void poll_add_sysctl(struct sysctl_ctx_list
*,
192 struct sysctl_oid_list
*, struct pollctx
*);
194 static void schedpoll_oncpu(struct netmsg
*);
196 void init_device_poll_pcpu(int); /* per-cpu init routine */
198 #define POLL_KTR_STRING "ifp=%p"
199 #define POLL_KTR_ARG_SIZE (sizeof(void *))
202 #define KTR_POLLING KTR_ALL
204 KTR_INFO_MASTER(poll
);
205 KTR_INFO(KTR_POLLING
, poll
, beg
, 0, POLL_KTR_STRING
, POLL_KTR_ARG_SIZE
);
206 KTR_INFO(KTR_POLLING
, poll
, end
, 1, POLL_KTR_STRING
, POLL_KTR_ARG_SIZE
);
208 #define logpoll(name, arg) KTR_LOG(poll_ ## name, arg)
211 poll_reset_state(struct pollctx
*pctx
)
214 pctx
->poll_burst
= 5;
215 pctx
->reg_frac_count
= 0;
216 pctx
->pending_polls
= 0;
217 pctx
->residual_burst
= 0;
219 bzero(&pctx
->poll_start_t
, sizeof(pctx
->poll_start_t
));
220 bzero(&pctx
->prev_t
, sizeof(pctx
->prev_t
));
225 * Initialize per-cpu polling(4) context. Called from kern_clock.c:
228 init_device_poll_pcpu(int cpuid
)
230 struct pollctx
*pctx
;
233 if (cpuid
>= POLLCTX_MAX
)
236 if (((1 << cpuid
) & poll_cpumask0
) == 0)
239 if (poll_burst_max
< MIN_POLL_BURST_MAX
)
240 poll_burst_max
= MIN_POLL_BURST_MAX
;
241 else if (poll_burst_max
> MAX_POLL_BURST_MAX
)
242 poll_burst_max
= MAX_POLL_BURST_MAX
;
244 if (poll_each_burst
> poll_burst_max
)
245 poll_each_burst
= poll_burst_max
;
247 poll_cpumask
|= (1 << cpuid
);
249 pctx
= kmalloc(sizeof(*pctx
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
251 pctx
->poll_each_burst
= poll_each_burst
;
252 pctx
->poll_burst_max
= poll_burst_max
;
253 pctx
->user_frac
= 50;
255 pctx
->polling_enabled
= polling_enabled
;
256 pctx
->pollhz
= pollhz
;
257 pctx
->poll_cpuid
= cpuid
;
258 poll_reset_state(pctx
);
260 netmsg_init(&pctx
->poll_netmsg
, &netisr_adone_rport
, 0,
263 pctx
->poll_netmsg
.nm_lmsg
.u
.ms_resultp
= pctx
;
266 netmsg_init(&pctx
->poll_more_netmsg
, &netisr_adone_rport
, 0,
269 pctx
->poll_more_netmsg
.nm_lmsg
.u
.ms_resultp
= pctx
;
272 KASSERT(cpuid
< POLLCTX_MAX
, ("cpu id must < %d", cpuid
));
273 poll_context
[cpuid
] = pctx
;
275 if (poll_defcpu
< 0) {
279 * Initialize global sysctl nodes, for compat
281 poll_add_sysctl(NULL
, SYSCTL_STATIC_CHILDREN(_kern_polling
),
286 * Initialize per-cpu sysctl nodes
288 ksnprintf(cpuid_str
, sizeof(cpuid_str
), "%d", pctx
->poll_cpuid
);
290 sysctl_ctx_init(&pctx
->poll_sysctl_ctx
);
291 pctx
->poll_sysctl_tree
= SYSCTL_ADD_NODE(&pctx
->poll_sysctl_ctx
,
292 SYSCTL_STATIC_CHILDREN(_kern_polling
),
293 OID_AUTO
, cpuid_str
, CTLFLAG_RD
, 0, "");
294 poll_add_sysctl(&pctx
->poll_sysctl_ctx
,
295 SYSCTL_CHILDREN(pctx
->poll_sysctl_tree
), pctx
);
298 * Initialize systimer
300 systimer_init_periodic_nq(&pctx
->pollclock
, pollclock
, pctx
, 1);
304 schedpoll_oncpu(struct netmsg
*msg
)
306 if (msg
->nm_lmsg
.ms_flags
& MSGF_DONE
)
307 lwkt_sendmsg(cpu_portfn(mycpuid
), &msg
->nm_lmsg
);
311 schedpoll(struct pollctx
*pctx
)
314 schedpoll_oncpu(&pctx
->poll_netmsg
);
319 schedpollmore(struct pollctx
*pctx
)
321 schedpoll_oncpu(&pctx
->poll_more_netmsg
);
325 * Set the polling frequency
328 sysctl_pollhz(SYSCTL_HANDLER_ARGS
)
330 struct pollctx
*pctx
= arg1
;
336 error
= sysctl_handle_int(oidp
, &phz
, 0, req
);
337 if (error
|| req
->newptr
== NULL
)
341 else if (phz
> DEVICE_POLLING_FREQ_MAX
)
342 phz
= DEVICE_POLLING_FREQ_MAX
;
344 netmsg_init(&msg
, &curthread
->td_msgport
, 0, poll_sysctl_pollhz
);
345 msg
.nm_lmsg
.u
.ms_result
= phz
;
347 port
= cpu_portfn(pctx
->poll_cpuid
);
348 lwkt_domsg(port
, &msg
.nm_lmsg
, 0);
356 sysctl_polling(SYSCTL_HANDLER_ARGS
)
358 struct pollctx
*pctx
= arg1
;
363 enabled
= pctx
->polling_enabled
;
364 error
= sysctl_handle_int(oidp
, &enabled
, 0, req
);
365 if (error
|| req
->newptr
== NULL
)
368 netmsg_init(&msg
, &curthread
->td_msgport
, 0, poll_sysctl_polling
);
369 msg
.nm_lmsg
.u
.ms_result
= enabled
;
371 port
= cpu_portfn(pctx
->poll_cpuid
);
372 lwkt_domsg(port
, &msg
.nm_lmsg
, 0);
377 sysctl_regfrac(SYSCTL_HANDLER_ARGS
)
379 struct pollctx
*pctx
= arg1
;
385 reg_frac
= pctx
->reg_frac
;
386 error
= sysctl_handle_int(oidp
, ®_frac
, 0, req
);
387 if (error
|| req
->newptr
== NULL
)
390 netmsg_init(&msg
, &curthread
->td_msgport
, 0, poll_sysctl_regfrac
);
391 msg
.nm_lmsg
.u
.ms_result
= reg_frac
;
393 port
= cpu_portfn(pctx
->poll_cpuid
);
394 lwkt_domsg(port
, &msg
.nm_lmsg
, 0);
399 sysctl_burstmax(SYSCTL_HANDLER_ARGS
)
401 struct pollctx
*pctx
= arg1
;
407 burst_max
= pctx
->poll_burst_max
;
408 error
= sysctl_handle_int(oidp
, &burst_max
, 0, req
);
409 if (error
|| req
->newptr
== NULL
)
411 if (burst_max
< MIN_POLL_BURST_MAX
)
412 burst_max
= MIN_POLL_BURST_MAX
;
413 else if (burst_max
> MAX_POLL_BURST_MAX
)
414 burst_max
= MAX_POLL_BURST_MAX
;
416 netmsg_init(&msg
, &curthread
->td_msgport
, 0, poll_sysctl_burstmax
);
417 msg
.nm_lmsg
.u
.ms_result
= burst_max
;
419 port
= cpu_portfn(pctx
->poll_cpuid
);
420 lwkt_domsg(port
, &msg
.nm_lmsg
, 0);
425 sysctl_eachburst(SYSCTL_HANDLER_ARGS
)
427 struct pollctx
*pctx
= arg1
;
433 each_burst
= pctx
->poll_each_burst
;
434 error
= sysctl_handle_int(oidp
, &each_burst
, 0, req
);
435 if (error
|| req
->newptr
== NULL
)
438 netmsg_init(&msg
, &curthread
->td_msgport
, 0, poll_sysctl_eachburst
);
439 msg
.nm_lmsg
.u
.ms_result
= each_burst
;
441 port
= cpu_portfn(pctx
->poll_cpuid
);
442 lwkt_domsg(port
, &msg
.nm_lmsg
, 0);
447 * Hook from polling systimer. Tries to schedule a netisr, but keeps
448 * track of lost ticks due to the previous handler taking too long.
449 * Normally, this should not happen, because polling handler should
450 * run for a short time. However, in some cases (e.g. when there are
451 * changes in link status etc.) the drivers take a very long time
452 * (even in the order of milliseconds) to reset and reconfigure the
453 * device, causing apparent lost polls.
455 * The first part of the code is just for debugging purposes, and tries
456 * to count how often hardclock ticks are shorter than they should,
457 * meaning either stray interrupts or delayed events.
459 * WARNING! called from fastint or IPI, the MP lock might not be held.
462 pollclock(systimer_t info
, struct intrframe
*frame __unused
)
464 struct pollctx
*pctx
= info
->data
;
468 if (pctx
->poll_handlers
== 0)
472 delta
= (t
.tv_usec
- pctx
->prev_t
.tv_usec
) +
473 (t
.tv_sec
- pctx
->prev_t
.tv_sec
)*1000000;
474 if (delta
* pctx
->pollhz
< 500000)
479 if (pctx
->pending_polls
> 100) {
481 * Too much, assume it has stalled (not always true
482 * see comment above).
485 pctx
->pending_polls
= 0;
489 if (pctx
->phase
<= 2) {
490 if (pctx
->phase
!= 0)
496 if (pctx
->pending_polls
++ > 0)
501 * netisr_pollmore is called after other netisr's, possibly scheduling
502 * another NETISR_POLL call, or adapting the burst size for the next cycle.
504 * It is very bad to fetch large bursts of packets from a single card at once,
505 * because the burst could take a long time to be completely processed leading
506 * to unfairness. To reduce the problem, and also to account better for time
507 * spent in network-related processing, we split the burst in smaller chunks
508 * of fixed size, giving control to the other netisr's between chunks. This
509 * helps in improving the fairness, reducing livelock (because we emulate more
510 * closely the "process to completion" that we have with fastforwarding) and
511 * accounting for the work performed in low level handling and forwarding.
516 netisr_pollmore(struct netmsg
*msg
)
518 struct pollctx
*pctx
;
520 int kern_load
, cpuid
;
521 uint32_t pending_polls
;
523 cpuid
= mycpu
->gd_cpuid
;
524 KKASSERT(cpuid
< POLLCTX_MAX
);
526 pctx
= poll_context
[cpuid
];
527 KKASSERT(pctx
!= NULL
);
528 KKASSERT(pctx
->poll_cpuid
== cpuid
);
529 KKASSERT(pctx
== msg
->nm_lmsg
.u
.ms_resultp
);
531 lwkt_replymsg(&msg
->nm_lmsg
, 0);
533 if (pctx
->poll_handlers
== 0)
536 KASSERT(pctx
->polling_enabled
,
537 ("# of registered poll handlers are not zero, "
538 "but polling is not enabled\n"));
541 if (pctx
->residual_burst
> 0) {
543 /* will run immediately on return, followed by netisrs */
546 /* here we can account time spent in netisr's in this tick */
548 kern_load
= (t
.tv_usec
- pctx
->poll_start_t
.tv_usec
) +
549 (t
.tv_sec
- pctx
->poll_start_t
.tv_sec
)*1000000; /* us */
550 kern_load
= (kern_load
* pctx
->pollhz
) / 10000; /* 0..100 */
551 if (kern_load
> (100 - pctx
->user_frac
)) { /* try decrease ticks */
552 if (pctx
->poll_burst
> 1)
555 if (pctx
->poll_burst
< pctx
->poll_burst_max
)
560 pctx
->pending_polls
--;
561 pending_polls
= pctx
->pending_polls
;
564 if (pending_polls
== 0) { /* we are done */
568 * Last cycle was long and caused us to miss one or more
569 * hardclock ticks. Restart processing again, but slightly
570 * reduce the burst size to prevent that this happens again.
572 pctx
->poll_burst
-= (pctx
->poll_burst
/ 8);
573 if (pctx
->poll_burst
< 1)
574 pctx
->poll_burst
= 1;
581 * netisr_poll is scheduled by schedpoll when appropriate, typically once
582 * per polling systimer tick.
584 * Note that the message is replied immediately in order to allow a new
585 * ISR to be scheduled in the handler.
587 * XXX each registration should indicate whether it needs a critical
588 * section to operate.
592 netisr_poll(struct netmsg
*msg
)
594 struct pollctx
*pctx
;
595 int i
, cycles
, cpuid
;
596 enum poll_cmd arg
= POLL_ONLY
;
598 cpuid
= mycpu
->gd_cpuid
;
599 KKASSERT(cpuid
< POLLCTX_MAX
);
601 pctx
= poll_context
[cpuid
];
602 KKASSERT(pctx
!= NULL
);
603 KKASSERT(pctx
->poll_cpuid
== cpuid
);
604 KKASSERT(pctx
== msg
->nm_lmsg
.u
.ms_resultp
);
607 lwkt_replymsg(&msg
->nm_lmsg
, 0);
610 if (pctx
->poll_handlers
== 0)
613 KASSERT(pctx
->polling_enabled
,
614 ("# of registered poll handlers are not zero, "
615 "but polling is not enabled\n"));
618 if (pctx
->residual_burst
== 0) { /* first call in this tick */
619 microuptime(&pctx
->poll_start_t
);
621 if (pctx
->reg_frac_count
-- == 0) {
622 arg
= POLL_AND_CHECK_STATUS
;
623 pctx
->reg_frac_count
= pctx
->reg_frac
- 1;
626 pctx
->residual_burst
= pctx
->poll_burst
;
628 cycles
= (pctx
->residual_burst
< pctx
->poll_each_burst
) ?
629 pctx
->residual_burst
: pctx
->poll_each_burst
;
630 pctx
->residual_burst
-= cycles
;
632 for (i
= 0 ; i
< pctx
->poll_handlers
; i
++) {
633 struct ifnet
*ifp
= pctx
->pr
[i
].ifp
;
635 if (!lwkt_serialize_try(ifp
->if_serializer
))
638 if ((ifp
->if_flags
& (IFF_UP
|IFF_RUNNING
|IFF_POLLING
))
639 == (IFF_UP
|IFF_RUNNING
|IFF_POLLING
)) {
642 ifp
->if_poll(ifp
, arg
, cycles
);
647 lwkt_serialize_exit(ifp
->if_serializer
);
655 poll_register(struct netmsg
*msg
)
657 struct ifnet
*ifp
= msg
->nm_lmsg
.u
.ms_resultp
;
658 struct pollctx
*pctx
;
661 cpuid
= mycpu
->gd_cpuid
;
662 KKASSERT(cpuid
< POLLCTX_MAX
);
664 pctx
= poll_context
[cpuid
];
665 KKASSERT(pctx
!= NULL
);
666 KKASSERT(pctx
->poll_cpuid
== cpuid
);
668 if (pctx
->polling_enabled
== 0) {
669 /* Polling disabled, cannot register */
675 * Check if there is room.
677 if (pctx
->poll_handlers
>= POLL_LIST_LEN
) {
679 * List full, cannot register more entries.
680 * This should never happen; if it does, it is probably a
681 * broken driver trying to register multiple times. Checking
682 * this at runtime is expensive, and won't solve the problem
683 * anyways, so just report a few times and then give up.
685 static int verbose
= 10; /* XXX */
687 kprintf("poll handlers list full, "
688 "maybe a broken driver ?\n");
693 pctx
->pr
[pctx
->poll_handlers
].ifp
= ifp
;
694 pctx
->poll_handlers
++;
697 if (pctx
->poll_handlers
== 1) {
698 KKASSERT(pctx
->polling_enabled
);
699 systimer_adjust_periodic(&pctx
->pollclock
,
704 lwkt_replymsg(&msg
->nm_lmsg
, rc
);
708 * Try to register routine for polling. Returns 1 if successful
709 * (and polling should be enabled), 0 otherwise.
711 * Called from mainline code only, not called from an interrupt.
714 ether_poll_register(struct ifnet
*ifp
)
718 KKASSERT(poll_defcpu
< POLLCTX_MAX
);
720 return ether_pollcpu_register(ifp
, poll_defcpu
);
724 ether_pollcpu_register(struct ifnet
*ifp
, int cpuid
)
730 if (ifp
->if_poll
== NULL
) {
731 /* Device does not support polling */
735 if (cpuid
< 0 || cpuid
>= POLLCTX_MAX
)
738 if (((1 << cpuid
) & poll_cpumask
) == 0) {
739 /* Polling is not supported on 'cpuid' */
742 KKASSERT(poll_context
[cpuid
] != NULL
);
745 * Attempt to register. Interlock with IFF_POLLING.
747 crit_enter(); /* XXX MP - not mp safe */
749 lwkt_serialize_enter(ifp
->if_serializer
);
750 if (ifp
->if_flags
& IFF_POLLING
) {
751 /* Already polling */
752 KKASSERT(ifp
->if_poll_cpuid
>= 0);
753 lwkt_serialize_exit(ifp
->if_serializer
);
757 KKASSERT(ifp
->if_poll_cpuid
< 0);
758 ifp
->if_flags
|= IFF_POLLING
;
759 ifp
->if_poll_cpuid
= cpuid
;
760 if (ifp
->if_flags
& IFF_RUNNING
)
761 ifp
->if_poll(ifp
, POLL_REGISTER
, 0);
762 lwkt_serialize_exit(ifp
->if_serializer
);
764 netmsg_init(&msg
, &curthread
->td_msgport
, 0, poll_register
);
765 msg
.nm_lmsg
.u
.ms_resultp
= ifp
;
767 port
= cpu_portfn(cpuid
);
768 lwkt_domsg(port
, &msg
.nm_lmsg
, 0);
770 if (msg
.nm_lmsg
.ms_error
) {
771 lwkt_serialize_enter(ifp
->if_serializer
);
772 ifp
->if_flags
&= ~IFF_POLLING
;
773 ifp
->if_poll_cpuid
= -1;
774 if (ifp
->if_flags
& IFF_RUNNING
)
775 ifp
->if_poll(ifp
, POLL_DEREGISTER
, 0);
776 lwkt_serialize_exit(ifp
->if_serializer
);
787 poll_deregister(struct netmsg
*msg
)
789 struct ifnet
*ifp
= msg
->nm_lmsg
.u
.ms_resultp
;
790 struct pollctx
*pctx
;
793 cpuid
= mycpu
->gd_cpuid
;
794 KKASSERT(cpuid
< POLLCTX_MAX
);
796 pctx
= poll_context
[cpuid
];
797 KKASSERT(pctx
!= NULL
);
798 KKASSERT(pctx
->poll_cpuid
== cpuid
);
800 for (i
= 0 ; i
< pctx
->poll_handlers
; i
++) {
801 if (pctx
->pr
[i
].ifp
== ifp
) /* Found it */
804 if (i
== pctx
->poll_handlers
) {
805 kprintf("ether_poll_deregister: ifp not found!!!\n");
808 pctx
->poll_handlers
--;
809 if (i
< pctx
->poll_handlers
) {
810 /* Last entry replaces this one. */
811 pctx
->pr
[i
].ifp
= pctx
->pr
[pctx
->poll_handlers
].ifp
;
814 if (pctx
->poll_handlers
== 0) {
815 systimer_adjust_periodic(&pctx
->pollclock
, 1);
816 poll_reset_state(pctx
);
820 lwkt_replymsg(&msg
->nm_lmsg
, rc
);
824 * Remove interface from the polling list. Occurs when polling is turned
825 * off. Called from mainline code only, not called from an interrupt.
828 ether_poll_deregister(struct ifnet
*ifp
)
834 KKASSERT(ifp
!= NULL
);
836 if (ifp
->if_poll
== NULL
)
841 lwkt_serialize_enter(ifp
->if_serializer
);
842 if ((ifp
->if_flags
& IFF_POLLING
) == 0) {
843 KKASSERT(ifp
->if_poll_cpuid
< 0);
844 lwkt_serialize_exit(ifp
->if_serializer
);
849 cpuid
= ifp
->if_poll_cpuid
;
850 KKASSERT(cpuid
>= 0);
851 KKASSERT(poll_context
[cpuid
] != NULL
);
853 ifp
->if_flags
&= ~IFF_POLLING
;
854 ifp
->if_poll_cpuid
= -1;
855 lwkt_serialize_exit(ifp
->if_serializer
);
857 netmsg_init(&msg
, &curthread
->td_msgport
, 0, poll_deregister
);
858 msg
.nm_lmsg
.u
.ms_resultp
= ifp
;
860 port
= cpu_portfn(cpuid
);
861 lwkt_domsg(port
, &msg
.nm_lmsg
, 0);
863 if (!msg
.nm_lmsg
.ms_error
) {
864 lwkt_serialize_enter(ifp
->if_serializer
);
865 if (ifp
->if_flags
& IFF_RUNNING
)
866 ifp
->if_poll(ifp
, POLL_DEREGISTER
, 1);
867 lwkt_serialize_exit(ifp
->if_serializer
);
878 poll_add_sysctl(struct sysctl_ctx_list
*ctx
, struct sysctl_oid_list
*parent
,
879 struct pollctx
*pctx
)
881 SYSCTL_ADD_PROC(ctx
, parent
, OID_AUTO
, "enable",
882 CTLTYPE_INT
| CTLFLAG_RW
, pctx
, 0, sysctl_polling
,
883 "I", "Polling enabled");
885 SYSCTL_ADD_PROC(ctx
, parent
, OID_AUTO
, "pollhz",
886 CTLTYPE_INT
| CTLFLAG_RW
, pctx
, 0, sysctl_pollhz
,
887 "I", "Device polling frequency");
889 SYSCTL_ADD_PROC(ctx
, parent
, OID_AUTO
, "reg_frac",
890 CTLTYPE_UINT
| CTLFLAG_RW
, pctx
, 0, sysctl_regfrac
,
891 "IU", "Every this many cycles poll register");
893 SYSCTL_ADD_PROC(ctx
, parent
, OID_AUTO
, "burst_max",
894 CTLTYPE_UINT
| CTLFLAG_RW
, pctx
, 0, sysctl_burstmax
,
895 "IU", "Max Polling burst size");
897 SYSCTL_ADD_PROC(ctx
, parent
, OID_AUTO
, "each_burst",
898 CTLTYPE_UINT
| CTLFLAG_RW
, pctx
, 0, sysctl_eachburst
,
899 "IU", "Max size of each burst");
901 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "phase", CTLFLAG_RD
,
902 &pctx
->phase
, 0, "Polling phase");
904 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "suspect", CTLFLAG_RW
,
905 &pctx
->suspect
, 0, "suspect event");
907 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "stalled", CTLFLAG_RW
,
908 &pctx
->stalled
, 0, "potential stalls");
910 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "burst", CTLFLAG_RD
,
911 &pctx
->poll_burst
, 0, "Current polling burst size");
913 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "user_frac", CTLFLAG_RW
,
915 "Desired user fraction of cpu time");
917 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "short_ticks", CTLFLAG_RW
,
918 &pctx
->short_ticks
, 0,
919 "Hardclock ticks shorter than they should be");
921 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "lost_polls", CTLFLAG_RW
,
922 &pctx
->lost_polls
, 0,
923 "How many times we would have lost a poll tick");
925 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "pending_polls", CTLFLAG_RD
,
926 &pctx
->pending_polls
, 0, "Do we need to poll again");
928 SYSCTL_ADD_INT(ctx
, parent
, OID_AUTO
, "residual_burst", CTLFLAG_RD
,
929 &pctx
->residual_burst
, 0,
930 "# of residual cycles in burst");
932 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "handlers", CTLFLAG_RD
,
933 &pctx
->poll_handlers
, 0,
934 "Number of registered poll handlers");
938 poll_sysctl_pollhz(struct netmsg
*msg
)
940 struct pollctx
*pctx
;
943 cpuid
= mycpu
->gd_cpuid
;
944 KKASSERT(cpuid
< POLLCTX_MAX
);
946 pctx
= poll_context
[cpuid
];
947 KKASSERT(pctx
!= NULL
);
948 KKASSERT(pctx
->poll_cpuid
== cpuid
);
951 * If polling is disabled or there is no device registered,
952 * don't adjust polling systimer frequency.
953 * Polling systimer frequency will be adjusted once polling
954 * is enabled and there are registered devices.
956 pctx
->pollhz
= msg
->nm_lmsg
.u
.ms_result
;
957 if (pctx
->polling_enabled
&& pctx
->poll_handlers
)
958 systimer_adjust_periodic(&pctx
->pollclock
, pctx
->pollhz
);
961 * Make sure that reg_frac and reg_frac_count are within valid range.
963 if (pctx
->reg_frac
> pctx
->pollhz
) {
964 pctx
->reg_frac
= pctx
->pollhz
;
965 if (pctx
->reg_frac_count
> pctx
->reg_frac
)
966 pctx
->reg_frac_count
= pctx
->reg_frac
- 1;
969 lwkt_replymsg(&msg
->nm_lmsg
, 0);
973 poll_sysctl_polling(struct netmsg
*msg
)
975 struct pollctx
*pctx
;
978 cpuid
= mycpu
->gd_cpuid
;
979 KKASSERT(cpuid
< POLLCTX_MAX
);
981 pctx
= poll_context
[cpuid
];
982 KKASSERT(pctx
!= NULL
);
983 KKASSERT(pctx
->poll_cpuid
== cpuid
);
986 * If polling is disabled or there is no device registered,
987 * cut the polling systimer frequency to 1hz.
989 pctx
->polling_enabled
= msg
->nm_lmsg
.u
.ms_result
;
990 if (pctx
->polling_enabled
&& pctx
->poll_handlers
) {
991 systimer_adjust_periodic(&pctx
->pollclock
, pctx
->pollhz
);
993 systimer_adjust_periodic(&pctx
->pollclock
, 1);
994 poll_reset_state(pctx
);
997 if (!pctx
->polling_enabled
&& pctx
->poll_handlers
!= 0) {
1000 for (i
= 0 ; i
< pctx
->poll_handlers
; i
++) {
1001 struct ifnet
*ifp
= pctx
->pr
[i
].ifp
;
1003 lwkt_serialize_enter(ifp
->if_serializer
);
1005 if ((ifp
->if_flags
& IFF_POLLING
) == 0) {
1006 KKASSERT(ifp
->if_poll_cpuid
< 0);
1007 lwkt_serialize_exit(ifp
->if_serializer
);
1010 ifp
->if_flags
&= ~IFF_POLLING
;
1011 ifp
->if_poll_cpuid
= -1;
1014 * Only call the interface deregistration
1015 * function if the interface is still
1018 if (ifp
->if_flags
& IFF_RUNNING
)
1019 ifp
->if_poll(ifp
, POLL_DEREGISTER
, 1);
1021 lwkt_serialize_exit(ifp
->if_serializer
);
1023 pctx
->poll_handlers
= 0;
1026 lwkt_replymsg(&msg
->nm_lmsg
, 0);
1030 poll_sysctl_regfrac(struct netmsg
*msg
)
1032 struct pollctx
*pctx
;
1036 cpuid
= mycpu
->gd_cpuid
;
1037 KKASSERT(cpuid
< POLLCTX_MAX
);
1039 pctx
= poll_context
[cpuid
];
1040 KKASSERT(pctx
!= NULL
);
1041 KKASSERT(pctx
->poll_cpuid
== cpuid
);
1043 reg_frac
= msg
->nm_lmsg
.u
.ms_result
;
1044 if (reg_frac
> pctx
->pollhz
)
1045 reg_frac
= pctx
->pollhz
;
1046 else if (reg_frac
< 1)
1049 pctx
->reg_frac
= reg_frac
;
1050 if (pctx
->reg_frac_count
> pctx
->reg_frac
)
1051 pctx
->reg_frac_count
= pctx
->reg_frac
- 1;
1053 lwkt_replymsg(&msg
->nm_lmsg
, 0);
1057 poll_sysctl_burstmax(struct netmsg
*msg
)
1059 struct pollctx
*pctx
;
1062 cpuid
= mycpu
->gd_cpuid
;
1063 KKASSERT(cpuid
< POLLCTX_MAX
);
1065 pctx
= poll_context
[cpuid
];
1066 KKASSERT(pctx
!= NULL
);
1067 KKASSERT(pctx
->poll_cpuid
== cpuid
);
1069 pctx
->poll_burst_max
= msg
->nm_lmsg
.u
.ms_result
;
1070 if (pctx
->poll_each_burst
> pctx
->poll_burst_max
)
1071 pctx
->poll_each_burst
= pctx
->poll_burst_max
;
1072 if (pctx
->poll_burst
> pctx
->poll_burst_max
)
1073 pctx
->poll_burst
= pctx
->poll_burst_max
;
1074 if (pctx
->residual_burst
> pctx
->poll_burst_max
)
1075 pctx
->residual_burst
= pctx
->poll_burst_max
;
1077 lwkt_replymsg(&msg
->nm_lmsg
, 0);
1081 poll_sysctl_eachburst(struct netmsg
*msg
)
1083 struct pollctx
*pctx
;
1084 uint32_t each_burst
;
1087 cpuid
= mycpu
->gd_cpuid
;
1088 KKASSERT(cpuid
< POLLCTX_MAX
);
1090 pctx
= poll_context
[cpuid
];
1091 KKASSERT(pctx
!= NULL
);
1092 KKASSERT(pctx
->poll_cpuid
== cpuid
);
1094 each_burst
= msg
->nm_lmsg
.u
.ms_result
;
1095 if (each_burst
> pctx
->poll_burst_max
)
1096 each_burst
= pctx
->poll_burst_max
;
1097 else if (each_burst
< 1)
1099 pctx
->poll_each_burst
= each_burst
;
1101 lwkt_replymsg(&msg
->nm_lmsg
, 0);