2 * Copyright (c) 2001-2002 Luigi Rizzo
4 * Supported by: the Xorp Project (www.xorp.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
30 #include "opt_ifpoll.h"
32 #include <sys/param.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 #include <sys/microtime_pcpu.h>
41 #include <sys/thread2.h>
42 #include <sys/msgport2.h>
45 #include <net/if_poll.h>
46 #include <net/netmsg2.h>
47 #include <net/netisr2.h>
50 * Polling support for network device drivers.
52 * Drivers which support this feature try to register one status polling
53 * handler and several TX/RX polling handlers with the polling code.
54 * If interface's if_npoll is called with non-NULL second argument, then
55 * a register operation is requested, else a deregister operation is
56 * requested. If the requested operation is "register", driver should
57 * setup the ifpoll_info passed in accoding its own needs:
58 * ifpoll_info.ifpi_status.status_func == NULL
59 * No status polling handler will be installed on CPU(0)
60 * ifpoll_info.ifpi_rx[n].poll_func == NULL
61 * No RX polling handler will be installed on CPU(n)
62 * ifpoll_info.ifpi_tx[n].poll_func == NULL
63 * No TX polling handler will be installed on CPU(n)
65 * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
66 * TX and status polling could be done at lower frequency than RX frequency
67 * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac). To avoid systimer
68 * staggering at high frequency, RX systimer gives TX and status polling a
71 * All of the registered polling handlers are called only if the interface
72 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's
73 * register and deregister function (ifnet.if_npoll) will be called even
74 * if interface is not marked with 'IFF_RUNNING'.
76 * If registration is successful, the driver must disable interrupts,
77 * and further I/O is performed through the TX/RX polling handler, which
78 * are invoked (at least once per clock tick) with 3 arguments: the "arg"
79 * passed at register time, a struct ifnet pointer, and a "count" limit.
80 * The registered serializer will be held before calling the related
83 * The count limit specifies how much work the handler can do during the
84 * call -- typically this is the number of packets to be received, or
85 * transmitted, etc. (drivers are free to interpret this number, as long
86 * as the max time spent in the function grows roughly linearly with the
89 * A second variable controls the sharing of CPU between polling/kernel
90 * network processing, and other activities (typically userlevel tasks):
91 * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
92 * share of CPU allocated to user tasks. CPU is allocated proportionally
93 * to the shares, by dynamically adjusting the "count" (poll_burst).
95 * Other parameters can should be left to their default values.
96 * The following constraints hold
98 * 1 <= poll_burst <= poll_burst_max
99 * 1 <= poll_each_burst <= poll_burst_max
100 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
103 #define IFPOLL_LIST_LEN 128
104 #define IFPOLL_FREQ_MAX 30000
106 #define MIN_IOPOLL_BURST_MAX 10
107 #define MAX_IOPOLL_BURST_MAX 5000
108 #define IOPOLL_BURST_MAX 250 /* good for 1000Mbit net and HZ=6000 */
110 #define IOPOLL_EACH_BURST 50
111 #define IOPOLL_USER_FRAC 50
113 #define IFPOLL_FREQ_DEFAULT 6000
115 #define IFPOLL_TXFRAC_DEFAULT 1 /* 1/1 of the pollhz */
116 #define IFPOLL_STFRAC_DEFAULT 120 /* 1/120 of the pollhz */
118 #define IFPOLL_RX 0x1
119 #define IFPOLL_TX 0x2
122 struct lwkt_serialize
*serializer
;
125 ifpoll_iofn_t poll_func
;
129 union microtime_pcpu prev_t
;
130 u_long short_ticks
; /* statistics */
131 u_long lost_polls
; /* statistics */
132 u_long suspect
; /* statistics */
133 u_long stalled
; /* statistics */
134 uint32_t pending_polls
; /* state */
136 struct netmsg_base poll_netmsg
;
137 struct netmsg_base poll_more_netmsg
;
141 uint32_t phase
; /* state */
142 int residual_burst
; /* state */
143 uint32_t poll_each_burst
; /* tunable */
144 union microtime_pcpu poll_start_t
; /* state */
146 uint32_t poll_burst
; /* state */
147 uint32_t poll_burst_max
; /* tunable */
148 uint32_t user_frac
; /* tunable */
149 uint32_t kern_frac
; /* state */
151 uint32_t poll_handlers
; /* next free entry in pr[]. */
152 struct iopoll_rec pr
[IFPOLL_LIST_LEN
];
154 struct sysctl_ctx_list poll_sysctl_ctx
;
155 struct sysctl_oid
*poll_sysctl_tree
;
159 struct systimer pollclock
;
162 int stfrac_count
; /* state */
163 int poll_stfrac
; /* tunable */
165 int txfrac_count
; /* state */
166 int poll_txfrac
; /* tunable */
168 int pollhz
; /* tunable */
170 struct sysctl_ctx_list sysctl_ctx
;
171 struct sysctl_oid
*sysctl_tree
;
175 struct lwkt_serialize
*serializer
;
177 ifpoll_stfn_t status_func
;
181 struct netmsg_base poll_netmsg
;
183 uint32_t poll_handlers
; /* next free entry in pr[]. */
184 struct stpoll_rec pr
[IFPOLL_LIST_LEN
];
186 struct sysctl_ctx_list poll_sysctl_ctx
;
187 struct sysctl_oid
*poll_sysctl_tree
;
190 struct iopoll_sysctl_netmsg
{
191 struct netmsg_base base
;
192 struct iopoll_ctx
*ctx
;
195 static void ifpoll_init_pcpu(int);
196 static void ifpoll_register_handler(netmsg_t
);
197 static void ifpoll_deregister_handler(netmsg_t
);
202 static void stpoll_init(void);
203 static void stpoll_handler(netmsg_t
);
204 static void stpoll_clock(struct stpoll_ctx
*);
205 static int stpoll_register(struct ifnet
*, const struct ifpoll_status
*);
206 static int stpoll_deregister(struct ifnet
*);
211 static struct iopoll_ctx
*iopoll_ctx_create(int, int);
212 static void iopoll_init(int);
213 static void rxpoll_handler(netmsg_t
);
214 static void txpoll_handler(netmsg_t
);
215 static void rxpollmore_handler(netmsg_t
);
216 static void txpollmore_handler(netmsg_t
);
217 static void iopoll_clock(struct iopoll_ctx
*);
218 static int iopoll_register(struct ifnet
*, struct iopoll_ctx
*,
219 const struct ifpoll_io
*);
220 static int iopoll_deregister(struct ifnet
*, struct iopoll_ctx
*);
222 static void iopoll_add_sysctl(struct sysctl_ctx_list
*,
223 struct sysctl_oid_list
*, struct iopoll_ctx
*, int);
224 static void sysctl_burstmax_handler(netmsg_t
);
225 static int sysctl_burstmax(SYSCTL_HANDLER_ARGS
);
226 static void sysctl_eachburst_handler(netmsg_t
);
227 static int sysctl_eachburst(SYSCTL_HANDLER_ARGS
);
232 static void poll_comm_init(int);
233 static void poll_comm_start(int);
234 static void poll_comm_adjust_pollhz(struct poll_comm
*);
235 static void poll_comm_systimer0(systimer_t
, int, struct intrframe
*);
236 static void poll_comm_systimer(systimer_t
, int, struct intrframe
*);
237 static void sysctl_pollhz_handler(netmsg_t
);
238 static void sysctl_stfrac_handler(netmsg_t
);
239 static void sysctl_txfrac_handler(netmsg_t
);
240 static int sysctl_pollhz(SYSCTL_HANDLER_ARGS
);
241 static int sysctl_stfrac(SYSCTL_HANDLER_ARGS
);
242 static int sysctl_txfrac(SYSCTL_HANDLER_ARGS
);
243 static int sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS
);
244 static int sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS
);
246 static struct stpoll_ctx stpoll_context
;
247 static struct poll_comm
*poll_common
[MAXCPU
];
248 static struct iopoll_ctx
*rxpoll_context
[MAXCPU
];
249 static struct iopoll_ctx
*txpoll_context
[MAXCPU
];
251 SYSCTL_NODE(_net
, OID_AUTO
, ifpoll
, CTLFLAG_RW
, 0,
252 "Network device polling parameters");
254 static int iopoll_burst_max
= IOPOLL_BURST_MAX
;
255 static int iopoll_each_burst
= IOPOLL_EACH_BURST
;
256 static int iopoll_user_frac
= IOPOLL_USER_FRAC
;
258 static int ifpoll_pollhz
= IFPOLL_FREQ_DEFAULT
;
259 static int ifpoll_stfrac
= IFPOLL_STFRAC_DEFAULT
;
260 static int ifpoll_txfrac
= IFPOLL_TXFRAC_DEFAULT
;
262 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max
);
263 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst
);
264 TUNABLE_INT("net.ifpoll.user_frac", &iopoll_user_frac
);
265 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz
);
266 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac
);
267 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac
);
269 #if !defined(KTR_IF_POLL)
270 #define KTR_IF_POLL KTR_ALL
272 KTR_INFO_MASTER(if_poll
);
273 KTR_INFO(KTR_IF_POLL
, if_poll
, rx_start
, 0, "rx start");
274 KTR_INFO(KTR_IF_POLL
, if_poll
, rx_end
, 1, "rx end");
275 KTR_INFO(KTR_IF_POLL
, if_poll
, tx_start
, 2, "tx start");
276 KTR_INFO(KTR_IF_POLL
, if_poll
, tx_end
, 3, "tx end");
277 KTR_INFO(KTR_IF_POLL
, if_poll
, rx_mstart
, 4, "rx more start");
278 KTR_INFO(KTR_IF_POLL
, if_poll
, rx_mend
, 5, "rx more end");
279 KTR_INFO(KTR_IF_POLL
, if_poll
, tx_mstart
, 6, "tx more start");
280 KTR_INFO(KTR_IF_POLL
, if_poll
, tx_mend
, 7, "tx more end");
281 KTR_INFO(KTR_IF_POLL
, if_poll
, ioclock_start
, 8, "ioclock start");
282 KTR_INFO(KTR_IF_POLL
, if_poll
, ioclock_end
, 9, "ioclock end");
283 #define logpoll(name) KTR_LOG(if_poll_ ## name)
285 #define IFPOLL_FREQ_ADJ(comm) (((comm)->poll_cpuid * 3) % 50)
288 poll_comm_pollhz_div(const struct poll_comm
*comm
, int pollhz
)
290 return pollhz
+ IFPOLL_FREQ_ADJ(comm
);
294 poll_comm_pollhz_conv(const struct poll_comm
*comm
, int pollhz
)
296 return pollhz
- IFPOLL_FREQ_ADJ(comm
);
300 ifpoll_sendmsg_oncpu(netmsg_t msg
)
302 if (msg
->lmsg
.ms_flags
& MSGF_DONE
)
303 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid
), &msg
->lmsg
);
307 sched_stpoll(struct stpoll_ctx
*st_ctx
)
309 ifpoll_sendmsg_oncpu((netmsg_t
)&st_ctx
->poll_netmsg
);
313 sched_iopoll(struct iopoll_ctx
*io_ctx
)
315 ifpoll_sendmsg_oncpu((netmsg_t
)&io_ctx
->poll_netmsg
);
319 sched_iopollmore(struct iopoll_ctx
*io_ctx
)
321 ifpoll_sendmsg_oncpu((netmsg_t
)&io_ctx
->poll_more_netmsg
);
325 * Initialize per-cpu polling(4) context.
328 ifpoll_init_pcpu(int cpuid
)
331 poll_comm_init(cpuid
);
337 poll_comm_start(cpuid
);
341 ifpoll_init_handler(netmsg_t msg
)
343 int cpu
= mycpuid
, nextcpu
;
345 ifpoll_init_pcpu(cpu
);
348 if (nextcpu
< netisr_ncpus
)
349 lwkt_forwardmsg(netisr_cpuport(nextcpu
), &msg
->base
.lmsg
);
351 lwkt_replymsg(&msg
->base
.lmsg
, 0);
355 ifpoll_sysinit(void *dummy __unused
)
357 struct netmsg_base msg
;
359 netmsg_init(&msg
, NULL
, &curthread
->td_msgport
, 0, ifpoll_init_handler
);
360 lwkt_domsg(netisr_cpuport(0), &msg
.lmsg
, 0);
362 SYSINIT(ifpoll
, SI_SUB_PRE_DRIVERS
, SI_ORDER_ANY
, ifpoll_sysinit
, NULL
);
365 ifpoll_register(struct ifnet
*ifp
)
367 struct ifpoll_info
*info
;
368 struct netmsg_base nmsg
;
371 if (ifp
->if_npoll
== NULL
) {
372 /* Device does not support polling */
376 info
= kmalloc(sizeof(*info
), M_TEMP
, M_WAITOK
| M_ZERO
);
379 * Attempt to register. Interlock with IFF_NPOLLING.
382 ifnet_serialize_all(ifp
);
384 if (ifp
->if_flags
& IFF_NPOLLING
) {
385 /* Already polling */
386 ifnet_deserialize_all(ifp
);
391 info
->ifpi_ifp
= ifp
;
393 ifp
->if_flags
|= IFF_NPOLLING
;
394 ifp
->if_npoll(ifp
, info
);
396 ifnet_deserialize_all(ifp
);
398 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
399 0, ifpoll_register_handler
);
400 nmsg
.lmsg
.u
.ms_resultp
= info
;
402 error
= lwkt_domsg(netisr_cpuport(0), &nmsg
.lmsg
, 0);
404 if (!ifpoll_deregister(ifp
)) {
405 if_printf(ifp
, "ifpoll_register: "
406 "ifpoll_deregister failed!\n");
415 ifpoll_deregister(struct ifnet
*ifp
)
417 struct netmsg_base nmsg
;
420 if (ifp
->if_npoll
== NULL
)
423 ifnet_serialize_all(ifp
);
425 if ((ifp
->if_flags
& IFF_NPOLLING
) == 0) {
426 ifnet_deserialize_all(ifp
);
429 ifp
->if_flags
&= ~IFF_NPOLLING
;
431 ifnet_deserialize_all(ifp
);
433 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
434 0, ifpoll_deregister_handler
);
435 nmsg
.lmsg
.u
.ms_resultp
= ifp
;
437 error
= lwkt_domsg(netisr_cpuport(0), &nmsg
.lmsg
, 0);
439 ifnet_serialize_all(ifp
);
440 ifp
->if_npoll(ifp
, NULL
);
441 ifnet_deserialize_all(ifp
);
447 ifpoll_register_handler(netmsg_t nmsg
)
449 const struct ifpoll_info
*info
= nmsg
->lmsg
.u
.ms_resultp
;
450 int cpuid
= mycpuid
, nextcpu
;
453 KKASSERT(cpuid
< netisr_ncpus
);
454 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(cpuid
));
457 error
= stpoll_register(info
->ifpi_ifp
, &info
->ifpi_status
);
462 error
= iopoll_register(info
->ifpi_ifp
, rxpoll_context
[cpuid
],
463 &info
->ifpi_rx
[cpuid
]);
467 error
= iopoll_register(info
->ifpi_ifp
, txpoll_context
[cpuid
],
468 &info
->ifpi_tx
[cpuid
]);
472 /* Adjust polling frequency, after all registration is done */
473 poll_comm_adjust_pollhz(poll_common
[cpuid
]);
476 if (nextcpu
< netisr_ncpus
)
477 lwkt_forwardmsg(netisr_cpuport(nextcpu
), &nmsg
->lmsg
);
479 lwkt_replymsg(&nmsg
->lmsg
, 0);
482 lwkt_replymsg(&nmsg
->lmsg
, error
);
486 ifpoll_deregister_handler(netmsg_t nmsg
)
488 struct ifnet
*ifp
= nmsg
->lmsg
.u
.ms_resultp
;
489 int cpuid
= mycpuid
, nextcpu
;
491 KKASSERT(cpuid
< netisr_ncpus
);
492 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(cpuid
));
496 stpoll_deregister(ifp
);
497 iopoll_deregister(ifp
, rxpoll_context
[cpuid
]);
498 iopoll_deregister(ifp
, txpoll_context
[cpuid
]);
500 /* Adjust polling frequency, after all deregistration is done */
501 poll_comm_adjust_pollhz(poll_common
[cpuid
]);
504 if (nextcpu
< netisr_ncpus
)
505 lwkt_forwardmsg(netisr_cpuport(nextcpu
), &nmsg
->lmsg
);
507 lwkt_replymsg(&nmsg
->lmsg
, 0);
513 struct stpoll_ctx
*st_ctx
= &stpoll_context
;
514 const struct poll_comm
*comm
= poll_common
[0];
516 sysctl_ctx_init(&st_ctx
->poll_sysctl_ctx
);
517 st_ctx
->poll_sysctl_tree
= SYSCTL_ADD_NODE(&st_ctx
->poll_sysctl_ctx
,
518 SYSCTL_CHILDREN(comm
->sysctl_tree
),
519 OID_AUTO
, "status", CTLFLAG_RD
, 0, "");
521 SYSCTL_ADD_UINT(&st_ctx
->poll_sysctl_ctx
,
522 SYSCTL_CHILDREN(st_ctx
->poll_sysctl_tree
),
523 OID_AUTO
, "handlers", CTLFLAG_RD
,
524 &st_ctx
->poll_handlers
, 0,
525 "Number of registered status poll handlers");
527 netmsg_init(&st_ctx
->poll_netmsg
, NULL
, &netisr_adone_rport
,
532 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
533 * once per polling systimer tick.
536 stpoll_handler(netmsg_t msg
)
538 struct stpoll_ctx
*st_ctx
= &stpoll_context
;
539 struct thread
*td
= curthread
;
542 ASSERT_IS_NETISR(td
, 0);
544 crit_enter_quick(td
);
547 lwkt_replymsg(&msg
->lmsg
, 0);
549 if (st_ctx
->poll_handlers
== 0) {
554 for (i
= 0; i
< st_ctx
->poll_handlers
; ++i
) {
555 const struct stpoll_rec
*rec
= &st_ctx
->pr
[i
];
556 struct ifnet
*ifp
= rec
->ifp
;
558 if (!lwkt_serialize_try(rec
->serializer
))
561 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_NPOLLING
)) ==
562 (IFF_RUNNING
| IFF_NPOLLING
))
563 rec
->status_func(ifp
);
565 lwkt_serialize_exit(rec
->serializer
);
572 * Hook from status poll systimer. Tries to schedule an status poll.
573 * NOTE: Caller should hold critical section.
576 stpoll_clock(struct stpoll_ctx
*st_ctx
)
578 KKASSERT(mycpuid
== 0);
580 if (st_ctx
->poll_handlers
== 0)
582 sched_stpoll(st_ctx
);
586 stpoll_register(struct ifnet
*ifp
, const struct ifpoll_status
*st_rec
)
588 struct stpoll_ctx
*st_ctx
= &stpoll_context
;
593 if (st_rec
->status_func
== NULL
)
597 * Check if there is room.
599 if (st_ctx
->poll_handlers
>= IFPOLL_LIST_LEN
) {
601 * List full, cannot register more entries.
602 * This should never happen; if it does, it is probably a
603 * broken driver trying to register multiple times. Checking
604 * this at runtime is expensive, and won't solve the problem
605 * anyways, so just report a few times and then give up.
607 static int verbose
= 10; /* XXX */
610 kprintf("status poll handlers list full, "
611 "maybe a broken driver ?\n");
616 struct stpoll_rec
*rec
= &st_ctx
->pr
[st_ctx
->poll_handlers
];
619 rec
->serializer
= st_rec
->serializer
;
620 rec
->status_func
= st_rec
->status_func
;
622 st_ctx
->poll_handlers
++;
629 stpoll_deregister(struct ifnet
*ifp
)
631 struct stpoll_ctx
*st_ctx
= &stpoll_context
;
636 for (i
= 0; i
< st_ctx
->poll_handlers
; ++i
) {
637 if (st_ctx
->pr
[i
].ifp
== ifp
) /* Found it */
640 if (i
== st_ctx
->poll_handlers
) {
643 st_ctx
->poll_handlers
--;
644 if (i
< st_ctx
->poll_handlers
) {
645 /* Last entry replaces this one. */
646 st_ctx
->pr
[i
] = st_ctx
->pr
[st_ctx
->poll_handlers
];
654 iopoll_reset_state(struct iopoll_ctx
*io_ctx
)
657 io_ctx
->poll_burst
= io_ctx
->poll_each_burst
;
658 io_ctx
->pending_polls
= 0;
659 io_ctx
->residual_burst
= 0;
661 io_ctx
->kern_frac
= 0;
662 bzero(&io_ctx
->poll_start_t
, sizeof(io_ctx
->poll_start_t
));
663 bzero(&io_ctx
->prev_t
, sizeof(io_ctx
->prev_t
));
668 iopoll_init(int cpuid
)
670 KKASSERT(cpuid
< netisr_ncpus
);
672 rxpoll_context
[cpuid
] = iopoll_ctx_create(cpuid
, IFPOLL_RX
);
673 txpoll_context
[cpuid
] = iopoll_ctx_create(cpuid
, IFPOLL_TX
);
676 static struct iopoll_ctx
*
677 iopoll_ctx_create(int cpuid
, int poll_type
)
679 struct poll_comm
*comm
;
680 struct iopoll_ctx
*io_ctx
;
681 const char *poll_type_str
;
682 netisr_fn_t handler
, more_handler
;
684 KKASSERT(poll_type
== IFPOLL_RX
|| poll_type
== IFPOLL_TX
);
687 * Make sure that tunables are in sane state
689 if (iopoll_burst_max
< MIN_IOPOLL_BURST_MAX
)
690 iopoll_burst_max
= MIN_IOPOLL_BURST_MAX
;
691 else if (iopoll_burst_max
> MAX_IOPOLL_BURST_MAX
)
692 iopoll_burst_max
= MAX_IOPOLL_BURST_MAX
;
694 if (iopoll_each_burst
> iopoll_burst_max
)
695 iopoll_each_burst
= iopoll_burst_max
;
697 comm
= poll_common
[cpuid
];
700 * Create the per-cpu polling context
702 io_ctx
= kmalloc_cachealign(sizeof(*io_ctx
), M_DEVBUF
,
705 io_ctx
->poll_each_burst
= iopoll_each_burst
;
706 io_ctx
->poll_burst_max
= iopoll_burst_max
;
707 io_ctx
->user_frac
= iopoll_user_frac
;
708 if (poll_type
== IFPOLL_RX
)
709 io_ctx
->pollhz
= comm
->pollhz
;
711 io_ctx
->pollhz
= comm
->pollhz
/ (comm
->poll_txfrac
+ 1);
712 io_ctx
->poll_cpuid
= cpuid
;
713 iopoll_reset_state(io_ctx
);
715 if (poll_type
== IFPOLL_RX
) {
716 handler
= rxpoll_handler
;
717 more_handler
= rxpollmore_handler
;
719 handler
= txpoll_handler
;
720 more_handler
= txpollmore_handler
;
723 netmsg_init(&io_ctx
->poll_netmsg
, NULL
, &netisr_adone_rport
,
725 io_ctx
->poll_netmsg
.lmsg
.u
.ms_resultp
= io_ctx
;
727 netmsg_init(&io_ctx
->poll_more_netmsg
, NULL
, &netisr_adone_rport
,
729 io_ctx
->poll_more_netmsg
.lmsg
.u
.ms_resultp
= io_ctx
;
732 * Initialize per-cpu sysctl nodes
734 if (poll_type
== IFPOLL_RX
)
735 poll_type_str
= "rx";
737 poll_type_str
= "tx";
739 sysctl_ctx_init(&io_ctx
->poll_sysctl_ctx
);
740 io_ctx
->poll_sysctl_tree
= SYSCTL_ADD_NODE(&io_ctx
->poll_sysctl_ctx
,
741 SYSCTL_CHILDREN(comm
->sysctl_tree
),
742 OID_AUTO
, poll_type_str
, CTLFLAG_RD
, 0, "");
743 iopoll_add_sysctl(&io_ctx
->poll_sysctl_ctx
,
744 SYSCTL_CHILDREN(io_ctx
->poll_sysctl_tree
), io_ctx
, poll_type
);
750 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps
751 * track of lost ticks due to the previous handler taking too long.
752 * Normally, this should not happen, because polling handler should
753 * run for a short time. However, in some cases (e.g. when there are
754 * changes in link status etc.) the drivers take a very long time
755 * (even in the order of milliseconds) to reset and reconfigure the
756 * device, causing apparent lost polls.
758 * The first part of the code is just for debugging purposes, and tries
759 * to count how often hardclock ticks are shorter than they should,
760 * meaning either stray interrupts or delayed events.
762 * WARNING! called from fastint or IPI, the MP lock might not be held.
763 * NOTE: Caller should hold critical section.
766 iopoll_clock(struct iopoll_ctx
*io_ctx
)
768 union microtime_pcpu t
;
771 KKASSERT(mycpuid
== io_ctx
->poll_cpuid
);
773 if (io_ctx
->poll_handlers
== 0)
776 logpoll(ioclock_start
);
778 microtime_pcpu_get(&t
);
779 delta
= microtime_pcpu_diff(&io_ctx
->prev_t
, &t
);
780 if (delta
* io_ctx
->pollhz
< 500000)
781 io_ctx
->short_ticks
++;
785 if (io_ctx
->pending_polls
> 100) {
787 * Too much, assume it has stalled (not always true
788 * see comment above).
791 io_ctx
->pending_polls
= 0;
795 if (io_ctx
->phase
<= 2) {
796 if (io_ctx
->phase
!= 0)
799 sched_iopoll(io_ctx
);
802 if (io_ctx
->pending_polls
++ > 0)
803 io_ctx
->lost_polls
++;
805 logpoll(ioclock_end
);
809 * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
810 * appropriate, typically once per polling systimer tick.
812 * Note that the message is replied immediately in order to allow a new
813 * ISR to be scheduled in the handler.
816 rxpoll_handler(netmsg_t msg
)
818 struct iopoll_ctx
*io_ctx
;
819 struct thread
*td
= curthread
;
824 io_ctx
= msg
->lmsg
.u
.ms_resultp
;
825 KKASSERT(&td
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
827 crit_enter_quick(td
);
830 lwkt_replymsg(&msg
->lmsg
, 0);
832 if (io_ctx
->poll_handlers
== 0) {
839 if (io_ctx
->residual_burst
== 0) {
840 /* First call in this tick */
841 microtime_pcpu_get(&io_ctx
->poll_start_t
);
842 io_ctx
->residual_burst
= io_ctx
->poll_burst
;
844 cycles
= (io_ctx
->residual_burst
< io_ctx
->poll_each_burst
) ?
845 io_ctx
->residual_burst
: io_ctx
->poll_each_burst
;
846 io_ctx
->residual_burst
-= cycles
;
848 for (i
= 0; i
< io_ctx
->poll_handlers
; i
++) {
849 const struct iopoll_rec
*rec
= &io_ctx
->pr
[i
];
850 struct ifnet
*ifp
= rec
->ifp
;
852 if (!lwkt_serialize_try(rec
->serializer
))
855 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_NPOLLING
)) ==
856 (IFF_RUNNING
| IFF_NPOLLING
))
857 rec
->poll_func(ifp
, rec
->arg
, cycles
);
859 lwkt_serialize_exit(rec
->serializer
);
863 * Do a quick exit/enter to catch any higher-priority
867 crit_enter_quick(td
);
869 sched_iopollmore(io_ctx
);
878 txpoll_handler(netmsg_t msg
)
880 struct iopoll_ctx
*io_ctx
;
881 struct thread
*td
= curthread
;
886 io_ctx
= msg
->lmsg
.u
.ms_resultp
;
887 KKASSERT(&td
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
889 crit_enter_quick(td
);
892 lwkt_replymsg(&msg
->lmsg
, 0);
894 if (io_ctx
->poll_handlers
== 0) {
902 for (i
= 0; i
< io_ctx
->poll_handlers
; i
++) {
903 const struct iopoll_rec
*rec
= &io_ctx
->pr
[i
];
904 struct ifnet
*ifp
= rec
->ifp
;
906 if (!lwkt_serialize_try(rec
->serializer
))
909 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_NPOLLING
)) ==
910 (IFF_RUNNING
| IFF_NPOLLING
))
911 rec
->poll_func(ifp
, rec
->arg
, -1);
913 lwkt_serialize_exit(rec
->serializer
);
917 * Do a quick exit/enter to catch any higher-priority
921 crit_enter_quick(td
);
923 sched_iopollmore(io_ctx
);
932 * rxpollmore_handler and txpollmore_handler are called after other netisr's,
933 * possibly scheduling another rxpoll_handler or txpoll_handler call, or
934 * adapting the burst size for the next cycle.
936 * It is very bad to fetch large bursts of packets from a single card at once,
937 * because the burst could take a long time to be completely processed leading
938 * to unfairness. To reduce the problem, and also to account better for time
939 * spent in network-related processing, we split the burst in smaller chunks
940 * of fixed size, giving control to the other netisr's between chunks. This
941 * helps in improving the fairness, reducing livelock and accounting for the
942 * work performed in low level handling.
945 rxpollmore_handler(netmsg_t msg
)
947 struct thread
*td
= curthread
;
948 struct iopoll_ctx
*io_ctx
;
949 union microtime_pcpu t
;
951 uint32_t pending_polls
;
955 io_ctx
= msg
->lmsg
.u
.ms_resultp
;
956 KKASSERT(&td
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
958 crit_enter_quick(td
);
961 lwkt_replymsg(&msg
->lmsg
, 0);
963 if (io_ctx
->poll_handlers
== 0) {
970 if (io_ctx
->residual_burst
> 0) {
971 sched_iopoll(io_ctx
);
973 /* Will run immediately on return, followed by netisrs */
978 /* Here we can account time spent in iopoll's in this tick */
979 microtime_pcpu_get(&t
);
980 kern_load
= microtime_pcpu_diff(&io_ctx
->poll_start_t
, &t
);
981 kern_load
= (kern_load
* io_ctx
->pollhz
) / 10000; /* 0..100 */
982 io_ctx
->kern_frac
= kern_load
;
984 if (kern_load
> (100 - io_ctx
->user_frac
)) {
985 /* Try decrease ticks */
986 if (io_ctx
->poll_burst
> 1)
987 io_ctx
->poll_burst
--;
989 if (io_ctx
->poll_burst
< io_ctx
->poll_burst_max
)
990 io_ctx
->poll_burst
++;
993 io_ctx
->pending_polls
--;
994 pending_polls
= io_ctx
->pending_polls
;
996 if (pending_polls
== 0) {
1001 * Last cycle was long and caused us to miss one or more
1002 * hardclock ticks. Restart processing again, but slightly
1003 * reduce the burst size to prevent that this happens again.
1005 io_ctx
->poll_burst
-= (io_ctx
->poll_burst
/ 8);
1006 if (io_ctx
->poll_burst
< 1)
1007 io_ctx
->poll_burst
= 1;
1008 sched_iopoll(io_ctx
);
1012 crit_exit_quick(td
);
1018 txpollmore_handler(netmsg_t msg
)
1020 struct thread
*td
= curthread
;
1021 struct iopoll_ctx
*io_ctx
;
1022 uint32_t pending_polls
;
1026 io_ctx
= msg
->lmsg
.u
.ms_resultp
;
1027 KKASSERT(&td
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
1029 crit_enter_quick(td
);
1032 lwkt_replymsg(&msg
->lmsg
, 0);
1034 if (io_ctx
->poll_handlers
== 0) {
1035 crit_exit_quick(td
);
1042 io_ctx
->pending_polls
--;
1043 pending_polls
= io_ctx
->pending_polls
;
1045 if (pending_polls
== 0) {
1050 * Last cycle was long and caused us to miss one or more
1051 * hardclock ticks. Restart processing again.
1053 sched_iopoll(io_ctx
);
1057 crit_exit_quick(td
);
1063 iopoll_add_sysctl(struct sysctl_ctx_list
*ctx
, struct sysctl_oid_list
*parent
,
1064 struct iopoll_ctx
*io_ctx
, int poll_type
)
1066 if (poll_type
== IFPOLL_RX
) {
1067 SYSCTL_ADD_PROC(ctx
, parent
, OID_AUTO
, "burst_max",
1068 CTLTYPE_UINT
| CTLFLAG_RW
, io_ctx
, 0, sysctl_burstmax
,
1069 "IU", "Max Polling burst size");
1071 SYSCTL_ADD_PROC(ctx
, parent
, OID_AUTO
, "each_burst",
1072 CTLTYPE_UINT
| CTLFLAG_RW
, io_ctx
, 0, sysctl_eachburst
,
1073 "IU", "Max size of each burst");
1075 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "burst", CTLFLAG_RD
,
1076 &io_ctx
->poll_burst
, 0, "Current polling burst size");
1078 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "user_frac", CTLFLAG_RW
,
1079 &io_ctx
->user_frac
, 0, "Desired user fraction of cpu time");
1081 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "kern_frac", CTLFLAG_RD
,
1082 &io_ctx
->kern_frac
, 0, "Kernel fraction of cpu time");
1084 SYSCTL_ADD_INT(ctx
, parent
, OID_AUTO
, "residual_burst", CTLFLAG_RD
,
1085 &io_ctx
->residual_burst
, 0,
1086 "# of residual cycles in burst");
1089 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "phase", CTLFLAG_RD
,
1090 &io_ctx
->phase
, 0, "Polling phase");
1092 SYSCTL_ADD_ULONG(ctx
, parent
, OID_AUTO
, "suspect", CTLFLAG_RW
,
1093 &io_ctx
->suspect
, "Suspected events");
1095 SYSCTL_ADD_ULONG(ctx
, parent
, OID_AUTO
, "stalled", CTLFLAG_RW
,
1096 &io_ctx
->stalled
, "Potential stalls");
1098 SYSCTL_ADD_ULONG(ctx
, parent
, OID_AUTO
, "short_ticks", CTLFLAG_RW
,
1099 &io_ctx
->short_ticks
,
1100 "Hardclock ticks shorter than they should be");
1102 SYSCTL_ADD_ULONG(ctx
, parent
, OID_AUTO
, "lost_polls", CTLFLAG_RW
,
1103 &io_ctx
->lost_polls
,
1104 "How many times we would have lost a poll tick");
1106 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "pending_polls", CTLFLAG_RD
,
1107 &io_ctx
->pending_polls
, 0, "Do we need to poll again");
1109 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "handlers", CTLFLAG_RD
,
1110 &io_ctx
->poll_handlers
, 0, "Number of registered poll handlers");
1114 sysctl_burstmax_handler(netmsg_t nmsg
)
1116 struct iopoll_sysctl_netmsg
*msg
= (struct iopoll_sysctl_netmsg
*)nmsg
;
1117 struct iopoll_ctx
*io_ctx
;
1120 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
1122 io_ctx
->poll_burst_max
= nmsg
->lmsg
.u
.ms_result
;
1123 if (io_ctx
->poll_each_burst
> io_ctx
->poll_burst_max
)
1124 io_ctx
->poll_each_burst
= io_ctx
->poll_burst_max
;
1125 if (io_ctx
->poll_burst
> io_ctx
->poll_burst_max
)
1126 io_ctx
->poll_burst
= io_ctx
->poll_burst_max
;
1127 if (io_ctx
->residual_burst
> io_ctx
->poll_burst_max
)
1128 io_ctx
->residual_burst
= io_ctx
->poll_burst_max
;
1130 lwkt_replymsg(&nmsg
->lmsg
, 0);
1134 sysctl_burstmax(SYSCTL_HANDLER_ARGS
)
1136 struct iopoll_ctx
*io_ctx
= arg1
;
1137 struct iopoll_sysctl_netmsg msg
;
1141 burst_max
= io_ctx
->poll_burst_max
;
1142 error
= sysctl_handle_int(oidp
, &burst_max
, 0, req
);
1143 if (error
|| req
->newptr
== NULL
)
1145 if (burst_max
< MIN_IOPOLL_BURST_MAX
)
1146 burst_max
= MIN_IOPOLL_BURST_MAX
;
1147 else if (burst_max
> MAX_IOPOLL_BURST_MAX
)
1148 burst_max
= MAX_IOPOLL_BURST_MAX
;
1150 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
1151 0, sysctl_burstmax_handler
);
1152 msg
.base
.lmsg
.u
.ms_result
= burst_max
;
1155 return lwkt_domsg(netisr_cpuport(io_ctx
->poll_cpuid
),
1160 sysctl_eachburst_handler(netmsg_t nmsg
)
1162 struct iopoll_sysctl_netmsg
*msg
= (struct iopoll_sysctl_netmsg
*)nmsg
;
1163 struct iopoll_ctx
*io_ctx
;
1164 uint32_t each_burst
;
1167 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
1169 each_burst
= nmsg
->lmsg
.u
.ms_result
;
1170 if (each_burst
> io_ctx
->poll_burst_max
)
1171 each_burst
= io_ctx
->poll_burst_max
;
1172 else if (each_burst
< 1)
1174 io_ctx
->poll_each_burst
= each_burst
;
1176 lwkt_replymsg(&nmsg
->lmsg
, 0);
1180 sysctl_eachburst(SYSCTL_HANDLER_ARGS
)
1182 struct iopoll_ctx
*io_ctx
= arg1
;
1183 struct iopoll_sysctl_netmsg msg
;
1184 uint32_t each_burst
;
1187 each_burst
= io_ctx
->poll_each_burst
;
1188 error
= sysctl_handle_int(oidp
, &each_burst
, 0, req
);
1189 if (error
|| req
->newptr
== NULL
)
1192 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
1193 0, sysctl_eachburst_handler
);
1194 msg
.base
.lmsg
.u
.ms_result
= each_burst
;
1197 return lwkt_domsg(netisr_cpuport(io_ctx
->poll_cpuid
),
1202 iopoll_register(struct ifnet
*ifp
, struct iopoll_ctx
*io_ctx
,
1203 const struct ifpoll_io
*io_rec
)
1207 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
1209 if (io_rec
->poll_func
== NULL
)
1213 * Check if there is room.
1215 if (io_ctx
->poll_handlers
>= IFPOLL_LIST_LEN
) {
1217 * List full, cannot register more entries.
1218 * This should never happen; if it does, it is probably a
1219 * broken driver trying to register multiple times. Checking
1220 * this at runtime is expensive, and won't solve the problem
1221 * anyways, so just report a few times and then give up.
1223 static int verbose
= 10; /* XXX */
1225 kprintf("io poll handlers list full, "
1226 "maybe a broken driver ?\n");
1231 struct iopoll_rec
*rec
= &io_ctx
->pr
[io_ctx
->poll_handlers
];
1234 rec
->serializer
= io_rec
->serializer
;
1235 rec
->arg
= io_rec
->arg
;
1236 rec
->poll_func
= io_rec
->poll_func
;
1238 io_ctx
->poll_handlers
++;
1245 iopoll_deregister(struct ifnet
*ifp
, struct iopoll_ctx
*io_ctx
)
1249 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
1251 for (i
= 0; i
< io_ctx
->poll_handlers
; ++i
) {
1252 if (io_ctx
->pr
[i
].ifp
== ifp
) /* Found it */
1255 if (i
== io_ctx
->poll_handlers
) {
1258 io_ctx
->poll_handlers
--;
1259 if (i
< io_ctx
->poll_handlers
) {
1260 /* Last entry replaces this one. */
1261 io_ctx
->pr
[i
] = io_ctx
->pr
[io_ctx
->poll_handlers
];
1264 if (io_ctx
->poll_handlers
== 0)
1265 iopoll_reset_state(io_ctx
);
1272 poll_comm_init(int cpuid
)
1274 struct poll_comm
*comm
;
1277 comm
= kmalloc_cachealign(sizeof(*comm
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
1279 if (ifpoll_stfrac
< 1)
1280 ifpoll_stfrac
= IFPOLL_STFRAC_DEFAULT
;
1281 if (ifpoll_txfrac
< 1)
1282 ifpoll_txfrac
= IFPOLL_TXFRAC_DEFAULT
;
1284 comm
->poll_cpuid
= cpuid
;
1285 comm
->pollhz
= poll_comm_pollhz_div(comm
, ifpoll_pollhz
);
1286 comm
->poll_stfrac
= ifpoll_stfrac
- 1;
1287 comm
->poll_txfrac
= ifpoll_txfrac
- 1;
1289 ksnprintf(cpuid_str
, sizeof(cpuid_str
), "%d", cpuid
);
1291 sysctl_ctx_init(&comm
->sysctl_ctx
);
1292 comm
->sysctl_tree
= SYSCTL_ADD_NODE(&comm
->sysctl_ctx
,
1293 SYSCTL_STATIC_CHILDREN(_net_ifpoll
),
1294 OID_AUTO
, cpuid_str
, CTLFLAG_RD
, 0, "");
1296 SYSCTL_ADD_PROC(&comm
->sysctl_ctx
, SYSCTL_CHILDREN(comm
->sysctl_tree
),
1297 OID_AUTO
, "pollhz", CTLTYPE_INT
| CTLFLAG_RW
,
1298 comm
, 0, sysctl_pollhz
,
1299 "I", "Device polling frequency");
1302 SYSCTL_ADD_PROC(&comm
->sysctl_ctx
,
1303 SYSCTL_CHILDREN(comm
->sysctl_tree
),
1304 OID_AUTO
, "status_frac",
1305 CTLTYPE_INT
| CTLFLAG_RW
,
1306 comm
, 0, sysctl_stfrac
,
1307 "I", "# of cycles before status is polled");
1309 SYSCTL_ADD_PROC(&comm
->sysctl_ctx
, SYSCTL_CHILDREN(comm
->sysctl_tree
),
1310 OID_AUTO
, "tx_frac", CTLTYPE_INT
| CTLFLAG_RW
,
1311 comm
, 0, sysctl_txfrac
,
1312 "I", "# of cycles before TX is polled");
1314 poll_common
[cpuid
] = comm
;
1318 poll_comm_start(int cpuid
)
1320 struct poll_comm
*comm
= poll_common
[cpuid
];
1321 systimer_func_t func
;
1324 * Initialize systimer
1327 func
= poll_comm_systimer0
;
1329 func
= poll_comm_systimer
;
1330 systimer_init_periodic_nq(&comm
->pollclock
, func
, comm
, 1);
1334 _poll_comm_systimer(struct poll_comm
*comm
)
1336 iopoll_clock(rxpoll_context
[comm
->poll_cpuid
]);
1337 if (comm
->txfrac_count
-- == 0) {
1338 comm
->txfrac_count
= comm
->poll_txfrac
;
1339 iopoll_clock(txpoll_context
[comm
->poll_cpuid
]);
1344 poll_comm_systimer0(systimer_t info
, int in_ipi __unused
,
1345 struct intrframe
*frame __unused
)
1347 struct poll_comm
*comm
= info
->data
;
1348 globaldata_t gd
= mycpu
;
1350 KKASSERT(comm
->poll_cpuid
== gd
->gd_cpuid
&& gd
->gd_cpuid
== 0);
1354 if (comm
->stfrac_count
-- == 0) {
1355 comm
->stfrac_count
= comm
->poll_stfrac
;
1356 stpoll_clock(&stpoll_context
);
1358 _poll_comm_systimer(comm
);
1364 poll_comm_systimer(systimer_t info
, int in_ipi __unused
,
1365 struct intrframe
*frame __unused
)
1367 struct poll_comm
*comm
= info
->data
;
1368 globaldata_t gd
= mycpu
;
1370 KKASSERT(comm
->poll_cpuid
== gd
->gd_cpuid
&& gd
->gd_cpuid
!= 0);
1373 _poll_comm_systimer(comm
);
1378 poll_comm_adjust_pollhz(struct poll_comm
*comm
)
1383 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(comm
->poll_cpuid
));
1386 * If there is no polling handler registered, set systimer
1387 * frequency to the lowest value. Polling systimer frequency
1388 * will be adjusted to the requested value, once there are
1389 * registered handlers.
1391 handlers
= rxpoll_context
[mycpuid
]->poll_handlers
+
1392 txpoll_context
[mycpuid
]->poll_handlers
;
1393 if (comm
->poll_cpuid
== 0)
1394 handlers
+= stpoll_context
.poll_handlers
;
1396 pollhz
= comm
->pollhz
;
1397 systimer_adjust_periodic(&comm
->pollclock
, pollhz
);
1401 sysctl_pollhz(SYSCTL_HANDLER_ARGS
)
1403 struct poll_comm
*comm
= arg1
;
1404 struct netmsg_base nmsg
;
1407 phz
= poll_comm_pollhz_conv(comm
, comm
->pollhz
);
1408 error
= sysctl_handle_int(oidp
, &phz
, 0, req
);
1409 if (error
|| req
->newptr
== NULL
)
1413 else if (phz
> IFPOLL_FREQ_MAX
)
1414 phz
= IFPOLL_FREQ_MAX
;
1416 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
1417 0, sysctl_pollhz_handler
);
1418 nmsg
.lmsg
.u
.ms_result
= phz
;
1420 return lwkt_domsg(netisr_cpuport(comm
->poll_cpuid
), &nmsg
.lmsg
, 0);
1424 sysctl_pollhz_handler(netmsg_t nmsg
)
1426 struct poll_comm
*comm
= poll_common
[mycpuid
];
1428 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(comm
->poll_cpuid
));
1430 /* Save polling frequency */
1431 comm
->pollhz
= poll_comm_pollhz_div(comm
, nmsg
->lmsg
.u
.ms_result
);
1434 * Adjust cached pollhz
1436 rxpoll_context
[mycpuid
]->pollhz
= comm
->pollhz
;
1437 txpoll_context
[mycpuid
]->pollhz
=
1438 comm
->pollhz
/ (comm
->poll_txfrac
+ 1);
1441 * Adjust polling frequency
1443 poll_comm_adjust_pollhz(comm
);
1445 lwkt_replymsg(&nmsg
->lmsg
, 0);
1449 sysctl_stfrac(SYSCTL_HANDLER_ARGS
)
1451 struct poll_comm
*comm
= arg1
;
1452 struct netmsg_base nmsg
;
1455 KKASSERT(comm
->poll_cpuid
== 0);
1457 stfrac
= comm
->poll_stfrac
+ 1;
1458 error
= sysctl_handle_int(oidp
, &stfrac
, 0, req
);
1459 if (error
|| req
->newptr
== NULL
)
1464 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
1465 0, sysctl_stfrac_handler
);
1466 nmsg
.lmsg
.u
.ms_result
= stfrac
- 1;
1468 return lwkt_domsg(netisr_cpuport(comm
->poll_cpuid
), &nmsg
.lmsg
, 0);
1472 sysctl_stfrac_handler(netmsg_t nmsg
)
1474 struct poll_comm
*comm
= poll_common
[mycpuid
];
1475 int stfrac
= nmsg
->lmsg
.u
.ms_result
;
1477 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(comm
->poll_cpuid
));
1480 comm
->poll_stfrac
= stfrac
;
1481 if (comm
->stfrac_count
> comm
->poll_stfrac
)
1482 comm
->stfrac_count
= comm
->poll_stfrac
;
1485 lwkt_replymsg(&nmsg
->lmsg
, 0);
1489 sysctl_txfrac(SYSCTL_HANDLER_ARGS
)
1491 struct poll_comm
*comm
= arg1
;
1492 struct netmsg_base nmsg
;
1495 txfrac
= comm
->poll_txfrac
+ 1;
1496 error
= sysctl_handle_int(oidp
, &txfrac
, 0, req
);
1497 if (error
|| req
->newptr
== NULL
)
1502 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
1503 0, sysctl_txfrac_handler
);
1504 nmsg
.lmsg
.u
.ms_result
= txfrac
- 1;
1506 return lwkt_domsg(netisr_cpuport(comm
->poll_cpuid
), &nmsg
.lmsg
, 0);
1510 sysctl_txfrac_handler(netmsg_t nmsg
)
1512 struct poll_comm
*comm
= poll_common
[mycpuid
];
1513 int txfrac
= nmsg
->lmsg
.u
.ms_result
;
1515 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(comm
->poll_cpuid
));
1518 comm
->poll_txfrac
= txfrac
;
1519 if (comm
->txfrac_count
> comm
->poll_txfrac
)
1520 comm
->txfrac_count
= comm
->poll_txfrac
;
1523 lwkt_replymsg(&nmsg
->lmsg
, 0);
1527 ifpoll_compat_setup(struct ifpoll_compat
*cp
,
1528 struct sysctl_ctx_list
*sysctl_ctx
,
1529 struct sysctl_oid
*sysctl_tree
,
1530 int unit
, struct lwkt_serialize
*slz
)
1532 cp
->ifpc_stcount
= 0;
1533 cp
->ifpc_stfrac
= ((poll_common
[0]->poll_stfrac
+ 1) *
1534 howmany(IOPOLL_BURST_MAX
, IOPOLL_EACH_BURST
)) - 1;
1536 cp
->ifpc_cpuid
= unit
% netisr_ncpus
;
1537 cp
->ifpc_serializer
= slz
;
1539 if (sysctl_ctx
!= NULL
&& sysctl_tree
!= NULL
) {
1540 SYSCTL_ADD_PROC(sysctl_ctx
, SYSCTL_CHILDREN(sysctl_tree
),
1541 OID_AUTO
, "npoll_stfrac", CTLTYPE_INT
| CTLFLAG_RW
,
1542 cp
, 0, sysctl_compat_npoll_stfrac
, "I",
1543 "polling status frac");
1544 SYSCTL_ADD_PROC(sysctl_ctx
, SYSCTL_CHILDREN(sysctl_tree
),
1545 OID_AUTO
, "npoll_cpuid", CTLTYPE_INT
| CTLFLAG_RW
,
1546 cp
, 0, sysctl_compat_npoll_cpuid
, "I",
1552 sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS
)
1554 struct ifpoll_compat
*cp
= arg1
;
1555 int error
= 0, stfrac
;
1557 lwkt_serialize_enter(cp
->ifpc_serializer
);
1559 stfrac
= cp
->ifpc_stfrac
+ 1;
1560 error
= sysctl_handle_int(oidp
, &stfrac
, 0, req
);
1561 if (!error
&& req
->newptr
!= NULL
) {
1565 cp
->ifpc_stfrac
= stfrac
- 1;
1566 if (cp
->ifpc_stcount
> cp
->ifpc_stfrac
)
1567 cp
->ifpc_stcount
= cp
->ifpc_stfrac
;
1571 lwkt_serialize_exit(cp
->ifpc_serializer
);
1576 sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS
)
1578 struct ifpoll_compat
*cp
= arg1
;
1579 int error
= 0, cpuid
;
1581 lwkt_serialize_enter(cp
->ifpc_serializer
);
1583 cpuid
= cp
->ifpc_cpuid
;
1584 error
= sysctl_handle_int(oidp
, &cpuid
, 0, req
);
1585 if (!error
&& req
->newptr
!= NULL
) {
1586 if (cpuid
< 0 || cpuid
>= netisr_ncpus
)
1589 cp
->ifpc_cpuid
= cpuid
;
1592 lwkt_serialize_exit(cp
->ifpc_serializer
);