2 * Copyright (c) 2001-2002 Luigi Rizzo
4 * Supported by: the Xorp Project (www.xorp.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $
30 #include "opt_ifpoll.h"
32 #include <sys/param.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/serialize.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 #include <sys/microtime_pcpu.h>
41 #include <sys/thread2.h>
42 #include <sys/msgport2.h>
45 #include <net/if_poll.h>
46 #include <net/netmsg2.h>
47 #include <net/netisr2.h>
50 * Polling support for network device drivers.
52 * Drivers which support this feature try to register one status polling
53 * handler and several TX/RX polling handlers with the polling code.
54 * If interface's if_npoll is called with non-NULL second argument, then
55 * a register operation is requested, else a deregister operation is
56 * requested. If the requested operation is "register", driver should
57 * setup the ifpoll_info passed in accoding its own needs:
58 * ifpoll_info.ifpi_status.status_func == NULL
59 * No status polling handler will be installed on CPU(0)
60 * ifpoll_info.ifpi_rx[n].poll_func == NULL
61 * No RX polling handler will be installed on CPU(n)
62 * ifpoll_info.ifpi_tx[n].poll_func == NULL
63 * No TX polling handler will be installed on CPU(n)
65 * Serializer field of ifpoll_info.ifpi_status and ifpoll_info.ifpi_tx[n]
66 * must _not_ be NULL. The serializer will be held before the status_func
67 * and poll_func being called. Serializer field of ifpoll_info.ifpi_rx[n]
68 * can be NULL, but the interface's if_flags must have IFF_IDIRECT set,
69 * which indicates that the network processing of the input packets is
70 * running directly instead of being redispatched.
72 * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz).
73 * TX and status polling could be done at lower frequency than RX frequency
74 * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac). To avoid systimer
75 * staggering at high frequency, RX systimer gives TX and status polling a
78 * All of the registered polling handlers are called only if the interface
79 * is marked as IFF_UP, IFF_RUNNING and IFF_NPOLLING. However, the
80 * interface's register and deregister function (ifnet.if_npoll) will be
81 * called even if interface is not marked with IFF_RUNNING or IFF_UP.
83 * If registration is successful, the driver must disable interrupts,
84 * and further I/O is performed through the TX/RX polling handler, which
85 * are invoked (at least once per clock tick) with 3 arguments: the "arg"
86 * passed at register time, a struct ifnet pointer, and a "count" limit.
87 * The registered serializer will be held before calling the related
90 * The count limit specifies how much work the handler can do during the
91 * call -- typically this is the number of packets to be received, or
92 * transmitted, etc. (drivers are free to interpret this number, as long
93 * as the max time spent in the function grows roughly linearly with the
96 * A second variable controls the sharing of CPU between polling/kernel
97 * network processing, and other activities (typically userlevel tasks):
98 * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the
99 * share of CPU allocated to user tasks. CPU is allocated proportionally
100 * to the shares, by dynamically adjusting the "count" (poll_burst).
102 * Other parameters can should be left to their default values.
103 * The following constraints hold
105 * 1 <= poll_burst <= poll_burst_max
106 * 1 <= poll_each_burst <= poll_burst_max
107 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
110 #define IFPOLL_LIST_LEN 128
111 #define IFPOLL_FREQ_MAX 30000
113 #define MIN_IOPOLL_BURST_MAX 10
114 #define MAX_IOPOLL_BURST_MAX 5000
115 #define IOPOLL_BURST_MAX 250 /* good for 1000Mbit net and HZ=6000 */
117 #define IOPOLL_EACH_BURST 50
118 #define IOPOLL_USER_FRAC 50
120 #define IFPOLL_FREQ_DEFAULT 6000
122 #define IFPOLL_TXFRAC_DEFAULT 1 /* 1/1 of the pollhz */
123 #define IFPOLL_STFRAC_DEFAULT 120 /* 1/120 of the pollhz */
125 #define IFPOLL_RX 0x1
126 #define IFPOLL_TX 0x2
129 struct lwkt_serialize
*serializer
;
132 ifpoll_iofn_t poll_func
;
136 union microtime_pcpu prev_t
;
137 u_long short_ticks
; /* statistics */
138 u_long lost_polls
; /* statistics */
139 u_long suspect
; /* statistics */
140 u_long stalled
; /* statistics */
141 uint32_t pending_polls
; /* state */
143 struct netmsg_base poll_netmsg
;
144 struct netmsg_base poll_more_netmsg
;
148 uint32_t phase
; /* state */
149 int residual_burst
; /* state */
150 uint32_t poll_each_burst
; /* tunable */
151 union microtime_pcpu poll_start_t
; /* state */
153 uint32_t poll_burst
; /* state */
154 uint32_t poll_burst_max
; /* tunable */
155 uint32_t user_frac
; /* tunable */
156 uint32_t kern_frac
; /* state */
158 uint32_t poll_handlers
; /* next free entry in pr[]. */
159 struct iopoll_rec pr
[IFPOLL_LIST_LEN
];
161 struct sysctl_ctx_list poll_sysctl_ctx
;
162 struct sysctl_oid
*poll_sysctl_tree
;
166 struct systimer pollclock
;
169 int stfrac_count
; /* state */
170 int poll_stfrac
; /* tunable */
172 int txfrac_count
; /* state */
173 int poll_txfrac
; /* tunable */
175 int pollhz
; /* tunable */
177 struct sysctl_ctx_list sysctl_ctx
;
178 struct sysctl_oid
*sysctl_tree
;
182 struct lwkt_serialize
*serializer
;
184 ifpoll_stfn_t status_func
;
188 struct netmsg_base poll_netmsg
;
190 uint32_t poll_handlers
; /* next free entry in pr[]. */
191 struct stpoll_rec pr
[IFPOLL_LIST_LEN
];
193 struct sysctl_ctx_list poll_sysctl_ctx
;
194 struct sysctl_oid
*poll_sysctl_tree
;
197 struct iopoll_sysctl_netmsg
{
198 struct netmsg_base base
;
199 struct iopoll_ctx
*ctx
;
202 static void ifpoll_init_pcpu(int);
203 static void ifpoll_register_handler(netmsg_t
);
204 static void ifpoll_deregister_handler(netmsg_t
);
209 static void stpoll_init(void);
210 static void stpoll_handler(netmsg_t
);
211 static void stpoll_clock(struct stpoll_ctx
*);
212 static int stpoll_register(struct ifnet
*, const struct ifpoll_status
*);
213 static int stpoll_deregister(struct ifnet
*);
218 static struct iopoll_ctx
*iopoll_ctx_create(int, int);
219 static void iopoll_init(int);
220 static void rxpoll_handler(netmsg_t
);
221 static void txpoll_handler(netmsg_t
);
222 static void rxpollmore_handler(netmsg_t
);
223 static void txpollmore_handler(netmsg_t
);
224 static void iopoll_clock(struct iopoll_ctx
*);
225 static int iopoll_register(struct ifnet
*, struct iopoll_ctx
*,
226 const struct ifpoll_io
*);
227 static int iopoll_deregister(struct ifnet
*, struct iopoll_ctx
*);
229 static void iopoll_add_sysctl(struct sysctl_ctx_list
*,
230 struct sysctl_oid_list
*, struct iopoll_ctx
*, int);
231 static void sysctl_burstmax_handler(netmsg_t
);
232 static int sysctl_burstmax(SYSCTL_HANDLER_ARGS
);
233 static void sysctl_eachburst_handler(netmsg_t
);
234 static int sysctl_eachburst(SYSCTL_HANDLER_ARGS
);
239 static void poll_comm_init(int);
240 static void poll_comm_start(int);
241 static void poll_comm_adjust_pollhz(struct poll_comm
*);
242 static void poll_comm_systimer0(systimer_t
, int, struct intrframe
*);
243 static void poll_comm_systimer(systimer_t
, int, struct intrframe
*);
244 static void sysctl_pollhz_handler(netmsg_t
);
245 static void sysctl_stfrac_handler(netmsg_t
);
246 static void sysctl_txfrac_handler(netmsg_t
);
247 static int sysctl_pollhz(SYSCTL_HANDLER_ARGS
);
248 static int sysctl_stfrac(SYSCTL_HANDLER_ARGS
);
249 static int sysctl_txfrac(SYSCTL_HANDLER_ARGS
);
250 static int sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS
);
251 static int sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS
);
253 static struct stpoll_ctx stpoll_context
;
254 static struct poll_comm
*poll_common
[MAXCPU
];
255 static struct iopoll_ctx
*rxpoll_context
[MAXCPU
];
256 static struct iopoll_ctx
*txpoll_context
[MAXCPU
];
258 SYSCTL_NODE(_net
, OID_AUTO
, ifpoll
, CTLFLAG_RW
, 0,
259 "Network device polling parameters");
261 static int iopoll_burst_max
= IOPOLL_BURST_MAX
;
262 static int iopoll_each_burst
= IOPOLL_EACH_BURST
;
263 static int iopoll_user_frac
= IOPOLL_USER_FRAC
;
265 static int ifpoll_pollhz
= IFPOLL_FREQ_DEFAULT
;
266 static int ifpoll_stfrac
= IFPOLL_STFRAC_DEFAULT
;
267 static int ifpoll_txfrac
= IFPOLL_TXFRAC_DEFAULT
;
269 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max
);
270 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst
);
271 TUNABLE_INT("net.ifpoll.user_frac", &iopoll_user_frac
);
272 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz
);
273 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac
);
274 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac
);
276 #if !defined(KTR_IF_POLL)
277 #define KTR_IF_POLL KTR_ALL
279 KTR_INFO_MASTER(if_poll
);
280 KTR_INFO(KTR_IF_POLL
, if_poll
, rx_start
, 0, "rx start");
281 KTR_INFO(KTR_IF_POLL
, if_poll
, rx_end
, 1, "rx end");
282 KTR_INFO(KTR_IF_POLL
, if_poll
, tx_start
, 2, "tx start");
283 KTR_INFO(KTR_IF_POLL
, if_poll
, tx_end
, 3, "tx end");
284 KTR_INFO(KTR_IF_POLL
, if_poll
, rx_mstart
, 4, "rx more start");
285 KTR_INFO(KTR_IF_POLL
, if_poll
, rx_mend
, 5, "rx more end");
286 KTR_INFO(KTR_IF_POLL
, if_poll
, tx_mstart
, 6, "tx more start");
287 KTR_INFO(KTR_IF_POLL
, if_poll
, tx_mend
, 7, "tx more end");
288 KTR_INFO(KTR_IF_POLL
, if_poll
, ioclock_start
, 8, "ioclock start");
289 KTR_INFO(KTR_IF_POLL
, if_poll
, ioclock_end
, 9, "ioclock end");
290 #define logpoll(name) KTR_LOG(if_poll_ ## name)
292 #define IFPOLL_FREQ_ADJ(comm) (((comm)->poll_cpuid * 3) % 50)
295 poll_comm_pollhz_div(const struct poll_comm
*comm
, int pollhz
)
297 return pollhz
+ IFPOLL_FREQ_ADJ(comm
);
301 poll_comm_pollhz_conv(const struct poll_comm
*comm
, int pollhz
)
303 return pollhz
- IFPOLL_FREQ_ADJ(comm
);
307 ifpoll_sendmsg_oncpu(netmsg_t msg
)
309 if (msg
->lmsg
.ms_flags
& MSGF_DONE
)
310 netisr_sendmsg_oncpu(&msg
->base
);
314 sched_stpoll(struct stpoll_ctx
*st_ctx
)
316 ifpoll_sendmsg_oncpu((netmsg_t
)&st_ctx
->poll_netmsg
);
320 sched_iopoll(struct iopoll_ctx
*io_ctx
)
322 ifpoll_sendmsg_oncpu((netmsg_t
)&io_ctx
->poll_netmsg
);
326 sched_iopollmore(struct iopoll_ctx
*io_ctx
, boolean_t direct
)
330 ifpoll_sendmsg_oncpu((netmsg_t
)&io_ctx
->poll_more_netmsg
);
332 struct netmsg_base
*nmsg
= &io_ctx
->poll_more_netmsg
;
334 nmsg
->lmsg
.ms_flags
&= ~(MSGF_REPLY
| MSGF_DONE
);
335 nmsg
->lmsg
.ms_flags
|= MSGF_SYNC
;
336 nmsg
->nm_dispatch((netmsg_t
)nmsg
);
337 KKASSERT(nmsg
->lmsg
.ms_flags
& MSGF_DONE
);
342 * Initialize per-cpu polling(4) context.
345 ifpoll_init_pcpu(int cpuid
)
348 poll_comm_init(cpuid
);
354 poll_comm_start(cpuid
);
358 ifpoll_init_handler(netmsg_t msg
)
362 ifpoll_init_pcpu(cpu
);
363 netisr_forwardmsg(&msg
->base
, cpu
+ 1);
367 ifpoll_sysinit(void *dummy __unused
)
369 struct netmsg_base msg
;
371 netmsg_init(&msg
, NULL
, &curthread
->td_msgport
, 0, ifpoll_init_handler
);
372 netisr_domsg_global(&msg
);
374 SYSINIT(ifpoll
, SI_SUB_PRE_DRIVERS
, SI_ORDER_ANY
, ifpoll_sysinit
, NULL
);
377 ifpoll_register(struct ifnet
*ifp
)
379 struct ifpoll_info
*info
;
380 struct netmsg_base nmsg
;
383 if (ifp
->if_npoll
== NULL
) {
384 /* Device does not support polling */
388 info
= kmalloc(sizeof(*info
), M_TEMP
, M_WAITOK
| M_ZERO
);
391 * Attempt to register. Interlock with IFF_NPOLLING.
394 ifnet_serialize_all(ifp
);
396 if (ifp
->if_flags
& IFF_NPOLLING
) {
397 /* Already polling */
398 ifnet_deserialize_all(ifp
);
403 info
->ifpi_ifp
= ifp
;
405 ifp
->if_flags
|= IFF_NPOLLING
;
406 ifp
->if_npoll(ifp
, info
);
408 ifnet_deserialize_all(ifp
);
410 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
411 0, ifpoll_register_handler
);
412 nmsg
.lmsg
.u
.ms_resultp
= info
;
414 error
= netisr_domsg_global(&nmsg
);
416 if (!ifpoll_deregister(ifp
)) {
417 if_printf(ifp
, "ifpoll_register: "
418 "ifpoll_deregister failed!\n");
427 ifpoll_deregister(struct ifnet
*ifp
)
429 struct netmsg_base nmsg
;
432 if (ifp
->if_npoll
== NULL
)
435 ifnet_serialize_all(ifp
);
437 if ((ifp
->if_flags
& IFF_NPOLLING
) == 0) {
438 ifnet_deserialize_all(ifp
);
441 ifp
->if_flags
&= ~IFF_NPOLLING
;
443 ifnet_deserialize_all(ifp
);
445 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
446 0, ifpoll_deregister_handler
);
447 nmsg
.lmsg
.u
.ms_resultp
= ifp
;
449 error
= netisr_domsg_global(&nmsg
);
451 ifnet_serialize_all(ifp
);
452 ifp
->if_npoll(ifp
, NULL
);
453 ifnet_deserialize_all(ifp
);
459 ifpoll_register_handler(netmsg_t nmsg
)
461 const struct ifpoll_info
*info
= nmsg
->lmsg
.u
.ms_resultp
;
465 KKASSERT(cpuid
< netisr_ncpus
);
466 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(cpuid
));
469 error
= stpoll_register(info
->ifpi_ifp
, &info
->ifpi_status
);
474 error
= iopoll_register(info
->ifpi_ifp
, rxpoll_context
[cpuid
],
475 &info
->ifpi_rx
[cpuid
]);
479 error
= iopoll_register(info
->ifpi_ifp
, txpoll_context
[cpuid
],
480 &info
->ifpi_tx
[cpuid
]);
484 /* Adjust polling frequency, after all registration is done */
485 poll_comm_adjust_pollhz(poll_common
[cpuid
]);
487 netisr_forwardmsg(&nmsg
->base
, cpuid
+ 1);
490 netisr_replymsg(&nmsg
->base
, error
);
494 ifpoll_deregister_handler(netmsg_t nmsg
)
496 struct ifnet
*ifp
= nmsg
->lmsg
.u
.ms_resultp
;
499 KKASSERT(cpuid
< netisr_ncpus
);
500 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(cpuid
));
504 stpoll_deregister(ifp
);
505 iopoll_deregister(ifp
, rxpoll_context
[cpuid
]);
506 iopoll_deregister(ifp
, txpoll_context
[cpuid
]);
508 /* Adjust polling frequency, after all deregistration is done */
509 poll_comm_adjust_pollhz(poll_common
[cpuid
]);
511 netisr_forwardmsg(&nmsg
->base
, cpuid
+ 1);
517 struct stpoll_ctx
*st_ctx
= &stpoll_context
;
518 const struct poll_comm
*comm
= poll_common
[0];
520 sysctl_ctx_init(&st_ctx
->poll_sysctl_ctx
);
521 st_ctx
->poll_sysctl_tree
= SYSCTL_ADD_NODE(&st_ctx
->poll_sysctl_ctx
,
522 SYSCTL_CHILDREN(comm
->sysctl_tree
),
523 OID_AUTO
, "status", CTLFLAG_RD
, 0, "");
525 SYSCTL_ADD_UINT(&st_ctx
->poll_sysctl_ctx
,
526 SYSCTL_CHILDREN(st_ctx
->poll_sysctl_tree
),
527 OID_AUTO
, "handlers", CTLFLAG_RD
,
528 &st_ctx
->poll_handlers
, 0,
529 "Number of registered status poll handlers");
531 netmsg_init(&st_ctx
->poll_netmsg
, NULL
, &netisr_adone_rport
,
536 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
537 * once per polling systimer tick.
540 stpoll_handler(netmsg_t msg
)
542 struct stpoll_ctx
*st_ctx
= &stpoll_context
;
543 struct thread
*td
= curthread
;
548 crit_enter_quick(td
);
551 netisr_replymsg(&msg
->base
, 0);
553 if (st_ctx
->poll_handlers
== 0) {
558 for (i
= 0; i
< st_ctx
->poll_handlers
; ++i
) {
559 const struct stpoll_rec
*rec
= &st_ctx
->pr
[i
];
560 struct ifnet
*ifp
= rec
->ifp
;
562 if (!lwkt_serialize_try(rec
->serializer
))
565 if ((ifp
->if_flags
& (IFF_RUNNING
| IFF_NPOLLING
)) ==
566 (IFF_RUNNING
| IFF_NPOLLING
))
567 rec
->status_func(ifp
);
569 lwkt_serialize_exit(rec
->serializer
);
576 * Hook from status poll systimer. Tries to schedule an status poll.
577 * NOTE: Caller should hold critical section.
580 stpoll_clock(struct stpoll_ctx
*st_ctx
)
582 KKASSERT(mycpuid
== 0);
584 if (st_ctx
->poll_handlers
== 0)
586 sched_stpoll(st_ctx
);
590 stpoll_register(struct ifnet
*ifp
, const struct ifpoll_status
*st_rec
)
592 struct stpoll_ctx
*st_ctx
= &stpoll_context
;
597 if (st_rec
->status_func
== NULL
)
601 * Check if there is room.
603 if (st_ctx
->poll_handlers
>= IFPOLL_LIST_LEN
) {
605 * List full, cannot register more entries.
606 * This should never happen; if it does, it is probably a
607 * broken driver trying to register multiple times. Checking
608 * this at runtime is expensive, and won't solve the problem
609 * anyways, so just report a few times and then give up.
611 static int verbose
= 10; /* XXX */
614 kprintf("status poll handlers list full, "
615 "maybe a broken driver ?\n");
620 struct stpoll_rec
*rec
= &st_ctx
->pr
[st_ctx
->poll_handlers
];
623 rec
->serializer
= st_rec
->serializer
;
624 rec
->status_func
= st_rec
->status_func
;
626 st_ctx
->poll_handlers
++;
633 stpoll_deregister(struct ifnet
*ifp
)
635 struct stpoll_ctx
*st_ctx
= &stpoll_context
;
640 for (i
= 0; i
< st_ctx
->poll_handlers
; ++i
) {
641 if (st_ctx
->pr
[i
].ifp
== ifp
) /* Found it */
644 if (i
== st_ctx
->poll_handlers
) {
647 st_ctx
->poll_handlers
--;
648 if (i
< st_ctx
->poll_handlers
) {
649 /* Last entry replaces this one. */
650 st_ctx
->pr
[i
] = st_ctx
->pr
[st_ctx
->poll_handlers
];
658 iopoll_reset_state(struct iopoll_ctx
*io_ctx
)
661 io_ctx
->poll_burst
= io_ctx
->poll_each_burst
;
662 io_ctx
->pending_polls
= 0;
663 io_ctx
->residual_burst
= 0;
665 io_ctx
->kern_frac
= 0;
666 bzero(&io_ctx
->poll_start_t
, sizeof(io_ctx
->poll_start_t
));
667 bzero(&io_ctx
->prev_t
, sizeof(io_ctx
->prev_t
));
672 iopoll_init(int cpuid
)
674 KKASSERT(cpuid
< netisr_ncpus
);
676 rxpoll_context
[cpuid
] = iopoll_ctx_create(cpuid
, IFPOLL_RX
);
677 txpoll_context
[cpuid
] = iopoll_ctx_create(cpuid
, IFPOLL_TX
);
680 static struct iopoll_ctx
*
681 iopoll_ctx_create(int cpuid
, int poll_type
)
683 struct poll_comm
*comm
;
684 struct iopoll_ctx
*io_ctx
;
685 const char *poll_type_str
;
686 netisr_fn_t handler
, more_handler
;
688 KKASSERT(poll_type
== IFPOLL_RX
|| poll_type
== IFPOLL_TX
);
691 * Make sure that tunables are in sane state
693 if (iopoll_burst_max
< MIN_IOPOLL_BURST_MAX
)
694 iopoll_burst_max
= MIN_IOPOLL_BURST_MAX
;
695 else if (iopoll_burst_max
> MAX_IOPOLL_BURST_MAX
)
696 iopoll_burst_max
= MAX_IOPOLL_BURST_MAX
;
698 if (iopoll_each_burst
> iopoll_burst_max
)
699 iopoll_each_burst
= iopoll_burst_max
;
701 comm
= poll_common
[cpuid
];
704 * Create the per-cpu polling context
706 io_ctx
= kmalloc(sizeof(*io_ctx
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
708 io_ctx
->poll_each_burst
= iopoll_each_burst
;
709 io_ctx
->poll_burst_max
= iopoll_burst_max
;
710 io_ctx
->user_frac
= iopoll_user_frac
;
711 if (poll_type
== IFPOLL_RX
)
712 io_ctx
->pollhz
= comm
->pollhz
;
714 io_ctx
->pollhz
= comm
->pollhz
/ (comm
->poll_txfrac
+ 1);
715 io_ctx
->poll_cpuid
= cpuid
;
716 iopoll_reset_state(io_ctx
);
718 if (poll_type
== IFPOLL_RX
) {
719 handler
= rxpoll_handler
;
720 more_handler
= rxpollmore_handler
;
722 handler
= txpoll_handler
;
723 more_handler
= txpollmore_handler
;
726 netmsg_init(&io_ctx
->poll_netmsg
, NULL
, &netisr_adone_rport
,
728 io_ctx
->poll_netmsg
.lmsg
.u
.ms_resultp
= io_ctx
;
730 netmsg_init(&io_ctx
->poll_more_netmsg
, NULL
, &netisr_adone_rport
,
732 io_ctx
->poll_more_netmsg
.lmsg
.u
.ms_resultp
= io_ctx
;
735 * Initialize per-cpu sysctl nodes
737 if (poll_type
== IFPOLL_RX
)
738 poll_type_str
= "rx";
740 poll_type_str
= "tx";
742 sysctl_ctx_init(&io_ctx
->poll_sysctl_ctx
);
743 io_ctx
->poll_sysctl_tree
= SYSCTL_ADD_NODE(&io_ctx
->poll_sysctl_ctx
,
744 SYSCTL_CHILDREN(comm
->sysctl_tree
),
745 OID_AUTO
, poll_type_str
, CTLFLAG_RD
, 0, "");
746 iopoll_add_sysctl(&io_ctx
->poll_sysctl_ctx
,
747 SYSCTL_CHILDREN(io_ctx
->poll_sysctl_tree
), io_ctx
, poll_type
);
753 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps
754 * track of lost ticks due to the previous handler taking too long.
755 * Normally, this should not happen, because polling handler should
756 * run for a short time. However, in some cases (e.g. when there are
757 * changes in link status etc.) the drivers take a very long time
758 * (even in the order of milliseconds) to reset and reconfigure the
759 * device, causing apparent lost polls.
761 * The first part of the code is just for debugging purposes, and tries
762 * to count how often hardclock ticks are shorter than they should,
763 * meaning either stray interrupts or delayed events.
765 * WARNING! called from fastint or IPI, the MP lock might not be held.
766 * NOTE: Caller should hold critical section.
769 iopoll_clock(struct iopoll_ctx
*io_ctx
)
771 union microtime_pcpu t
;
774 KKASSERT(mycpuid
== io_ctx
->poll_cpuid
);
776 if (io_ctx
->poll_handlers
== 0)
779 logpoll(ioclock_start
);
781 microtime_pcpu_get(&t
);
782 delta
= microtime_pcpu_diff(&io_ctx
->prev_t
, &t
);
783 if (delta
* io_ctx
->pollhz
< 500000)
784 io_ctx
->short_ticks
++;
788 if (io_ctx
->pending_polls
> 100) {
790 * Too much, assume it has stalled (not always true
791 * see comment above).
794 io_ctx
->pending_polls
= 0;
798 if (io_ctx
->phase
<= 2) {
799 if (io_ctx
->phase
!= 0)
802 sched_iopoll(io_ctx
);
805 if (io_ctx
->pending_polls
++ > 0)
806 io_ctx
->lost_polls
++;
808 logpoll(ioclock_end
);
812 * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
813 * appropriate, typically once per polling systimer tick.
815 * Note that the message is replied immediately in order to allow a new
816 * ISR to be scheduled in the handler.
819 rxpoll_handler(netmsg_t msg
)
821 struct iopoll_ctx
*io_ctx
;
822 struct thread
*td
= curthread
;
823 boolean_t direct
= TRUE
, crit
;
828 io_ctx
= msg
->lmsg
.u
.ms_resultp
;
829 KKASSERT(&td
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
832 crit_enter_quick(td
);
835 netisr_replymsg(&msg
->base
, 0);
837 if (io_ctx
->poll_handlers
== 0) {
844 if (io_ctx
->residual_burst
== 0) {
845 /* First call in this tick */
846 microtime_pcpu_get(&io_ctx
->poll_start_t
);
847 io_ctx
->residual_burst
= io_ctx
->poll_burst
;
849 cycles
= (io_ctx
->residual_burst
< io_ctx
->poll_each_burst
) ?
850 io_ctx
->residual_burst
: io_ctx
->poll_each_burst
;
851 io_ctx
->residual_burst
-= cycles
;
853 for (i
= 0; i
< io_ctx
->poll_handlers
; i
++) {
854 const struct iopoll_rec
*rec
= &io_ctx
->pr
[i
];
855 struct ifnet
*ifp
= rec
->ifp
;
857 if (rec
->serializer
!= NULL
) {
860 crit_enter_quick(td
);
863 !lwkt_serialize_try(rec
->serializer
))) {
864 /* RX serializer generally will not fail. */
869 * Exit critical section, if the RX polling
870 * handler does not require serialization,
871 * i.e. RX polling is doing direct input.
877 if ((ifp
->if_flags
& IFF_IDIRECT
) == 0) {
879 KASSERT(rec
->serializer
!= NULL
,
880 ("rx polling handler is not serialized"));
884 KASSERT(rec
->serializer
== NULL
,
885 ("serialized direct input"));
889 if ((ifp
->if_flags
& (IFF_UP
| IFF_RUNNING
| IFF_NPOLLING
)) ==
890 (IFF_UP
| IFF_RUNNING
| IFF_NPOLLING
))
891 rec
->poll_func(ifp
, rec
->arg
, cycles
);
893 if (rec
->serializer
!= NULL
)
894 lwkt_serialize_exit(rec
->serializer
);
899 * Do a quick exit/enter to catch any higher-priority
904 crit_enter_quick(td
);
907 sched_iopollmore(io_ctx
, direct
);
915 txpoll_handler(netmsg_t msg
)
917 struct iopoll_ctx
*io_ctx
;
918 struct thread
*td
= curthread
;
923 io_ctx
= msg
->lmsg
.u
.ms_resultp
;
924 KKASSERT(&td
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
926 crit_enter_quick(td
);
929 netisr_replymsg(&msg
->base
, 0);
931 if (io_ctx
->poll_handlers
== 0) {
939 for (i
= 0; i
< io_ctx
->poll_handlers
; i
++) {
940 const struct iopoll_rec
*rec
= &io_ctx
->pr
[i
];
941 struct ifnet
*ifp
= rec
->ifp
;
943 if (!lwkt_serialize_try(rec
->serializer
))
946 if ((ifp
->if_flags
& (IFF_UP
| IFF_RUNNING
| IFF_NPOLLING
)) ==
947 (IFF_UP
| IFF_RUNNING
| IFF_NPOLLING
))
948 rec
->poll_func(ifp
, rec
->arg
, -1);
950 lwkt_serialize_exit(rec
->serializer
);
954 * Do a quick exit/enter to catch any higher-priority
958 crit_enter_quick(td
);
961 sched_iopollmore(io_ctx
, TRUE
);
969 * rxpollmore_handler and txpollmore_handler are called after other netisr's,
970 * possibly scheduling another rxpoll_handler or txpoll_handler call, or
971 * adapting the burst size for the next cycle.
973 * It is very bad to fetch large bursts of packets from a single card at once,
974 * because the burst could take a long time to be completely processed leading
975 * to unfairness. To reduce the problem, and also to account better for time
976 * spent in network-related processing, we split the burst in smaller chunks
977 * of fixed size, giving control to the other netisr's between chunks. This
978 * helps in improving the fairness, reducing livelock and accounting for the
979 * work performed in low level handling.
982 rxpollmore_handler(netmsg_t msg
)
984 struct thread
*td
= curthread
;
985 struct iopoll_ctx
*io_ctx
;
986 union microtime_pcpu t
;
988 uint32_t pending_polls
;
992 io_ctx
= msg
->lmsg
.u
.ms_resultp
;
993 KKASSERT(&td
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
995 crit_enter_quick(td
);
998 netisr_replymsg(&msg
->base
, 0);
1000 if (io_ctx
->poll_handlers
== 0) {
1001 crit_exit_quick(td
);
1007 if (io_ctx
->residual_burst
> 0) {
1008 sched_iopoll(io_ctx
);
1009 crit_exit_quick(td
);
1010 /* Will run immediately on return, followed by netisrs */
1015 /* Here we can account time spent in iopoll's in this tick */
1016 microtime_pcpu_get(&t
);
1017 kern_load
= microtime_pcpu_diff(&io_ctx
->poll_start_t
, &t
);
1018 kern_load
= (kern_load
* io_ctx
->pollhz
) / 10000; /* 0..100 */
1019 io_ctx
->kern_frac
= kern_load
;
1021 if (kern_load
> (100 - io_ctx
->user_frac
)) {
1022 /* Try decrease ticks */
1023 if (io_ctx
->poll_burst
> 1)
1024 io_ctx
->poll_burst
--;
1026 if (io_ctx
->poll_burst
< io_ctx
->poll_burst_max
)
1027 io_ctx
->poll_burst
++;
1030 io_ctx
->pending_polls
--;
1031 pending_polls
= io_ctx
->pending_polls
;
1033 if (pending_polls
== 0) {
1038 * Last cycle was long and caused us to miss one or more
1039 * hardclock ticks. Restart processing again, but slightly
1040 * reduce the burst size to prevent that this happens again.
1042 io_ctx
->poll_burst
-= (io_ctx
->poll_burst
/ 8);
1043 if (io_ctx
->poll_burst
< 1)
1044 io_ctx
->poll_burst
= 1;
1045 sched_iopoll(io_ctx
);
1049 crit_exit_quick(td
);
1055 txpollmore_handler(netmsg_t msg
)
1057 struct thread
*td
= curthread
;
1058 struct iopoll_ctx
*io_ctx
;
1059 uint32_t pending_polls
;
1063 io_ctx
= msg
->lmsg
.u
.ms_resultp
;
1064 KKASSERT(&td
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
1066 crit_enter_quick(td
);
1069 netisr_replymsg(&msg
->base
, 0);
1071 if (io_ctx
->poll_handlers
== 0) {
1072 crit_exit_quick(td
);
1079 io_ctx
->pending_polls
--;
1080 pending_polls
= io_ctx
->pending_polls
;
1082 if (pending_polls
== 0) {
1087 * Last cycle was long and caused us to miss one or more
1088 * hardclock ticks. Restart processing again.
1090 sched_iopoll(io_ctx
);
1094 crit_exit_quick(td
);
1100 iopoll_add_sysctl(struct sysctl_ctx_list
*ctx
, struct sysctl_oid_list
*parent
,
1101 struct iopoll_ctx
*io_ctx
, int poll_type
)
1103 if (poll_type
== IFPOLL_RX
) {
1104 SYSCTL_ADD_PROC(ctx
, parent
, OID_AUTO
, "burst_max",
1105 CTLTYPE_UINT
| CTLFLAG_RW
, io_ctx
, 0, sysctl_burstmax
,
1106 "IU", "Max Polling burst size");
1108 SYSCTL_ADD_PROC(ctx
, parent
, OID_AUTO
, "each_burst",
1109 CTLTYPE_UINT
| CTLFLAG_RW
, io_ctx
, 0, sysctl_eachburst
,
1110 "IU", "Max size of each burst");
1112 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "burst", CTLFLAG_RD
,
1113 &io_ctx
->poll_burst
, 0, "Current polling burst size");
1115 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "user_frac", CTLFLAG_RW
,
1116 &io_ctx
->user_frac
, 0, "Desired user fraction of cpu time");
1118 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "kern_frac", CTLFLAG_RD
,
1119 &io_ctx
->kern_frac
, 0, "Kernel fraction of cpu time");
1121 SYSCTL_ADD_INT(ctx
, parent
, OID_AUTO
, "residual_burst", CTLFLAG_RD
,
1122 &io_ctx
->residual_burst
, 0,
1123 "# of residual cycles in burst");
1126 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "phase", CTLFLAG_RD
,
1127 &io_ctx
->phase
, 0, "Polling phase");
1129 SYSCTL_ADD_ULONG(ctx
, parent
, OID_AUTO
, "suspect", CTLFLAG_RW
,
1130 &io_ctx
->suspect
, "Suspected events");
1132 SYSCTL_ADD_ULONG(ctx
, parent
, OID_AUTO
, "stalled", CTLFLAG_RW
,
1133 &io_ctx
->stalled
, "Potential stalls");
1135 SYSCTL_ADD_ULONG(ctx
, parent
, OID_AUTO
, "short_ticks", CTLFLAG_RW
,
1136 &io_ctx
->short_ticks
,
1137 "Hardclock ticks shorter than they should be");
1139 SYSCTL_ADD_ULONG(ctx
, parent
, OID_AUTO
, "lost_polls", CTLFLAG_RW
,
1140 &io_ctx
->lost_polls
,
1141 "How many times we would have lost a poll tick");
1143 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "pending_polls", CTLFLAG_RD
,
1144 &io_ctx
->pending_polls
, 0, "Do we need to poll again");
1146 SYSCTL_ADD_UINT(ctx
, parent
, OID_AUTO
, "handlers", CTLFLAG_RD
,
1147 &io_ctx
->poll_handlers
, 0, "Number of registered poll handlers");
1151 sysctl_burstmax_handler(netmsg_t nmsg
)
1153 struct iopoll_sysctl_netmsg
*msg
= (struct iopoll_sysctl_netmsg
*)nmsg
;
1154 struct iopoll_ctx
*io_ctx
;
1157 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
1159 io_ctx
->poll_burst_max
= nmsg
->lmsg
.u
.ms_result
;
1160 if (io_ctx
->poll_each_burst
> io_ctx
->poll_burst_max
)
1161 io_ctx
->poll_each_burst
= io_ctx
->poll_burst_max
;
1162 if (io_ctx
->poll_burst
> io_ctx
->poll_burst_max
)
1163 io_ctx
->poll_burst
= io_ctx
->poll_burst_max
;
1164 if (io_ctx
->residual_burst
> io_ctx
->poll_burst_max
)
1165 io_ctx
->residual_burst
= io_ctx
->poll_burst_max
;
1167 netisr_replymsg(&nmsg
->base
, 0);
1171 sysctl_burstmax(SYSCTL_HANDLER_ARGS
)
1173 struct iopoll_ctx
*io_ctx
= arg1
;
1174 struct iopoll_sysctl_netmsg msg
;
1178 burst_max
= io_ctx
->poll_burst_max
;
1179 error
= sysctl_handle_int(oidp
, &burst_max
, 0, req
);
1180 if (error
|| req
->newptr
== NULL
)
1182 if (burst_max
< MIN_IOPOLL_BURST_MAX
)
1183 burst_max
= MIN_IOPOLL_BURST_MAX
;
1184 else if (burst_max
> MAX_IOPOLL_BURST_MAX
)
1185 burst_max
= MAX_IOPOLL_BURST_MAX
;
1187 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
1188 0, sysctl_burstmax_handler
);
1189 msg
.base
.lmsg
.u
.ms_result
= burst_max
;
1192 return netisr_domsg(&msg
.base
, io_ctx
->poll_cpuid
);
1196 sysctl_eachburst_handler(netmsg_t nmsg
)
1198 struct iopoll_sysctl_netmsg
*msg
= (struct iopoll_sysctl_netmsg
*)nmsg
;
1199 struct iopoll_ctx
*io_ctx
;
1200 uint32_t each_burst
;
1203 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
1205 each_burst
= nmsg
->lmsg
.u
.ms_result
;
1206 if (each_burst
> io_ctx
->poll_burst_max
)
1207 each_burst
= io_ctx
->poll_burst_max
;
1208 else if (each_burst
< 1)
1210 io_ctx
->poll_each_burst
= each_burst
;
1212 netisr_replymsg(&nmsg
->base
, 0);
1216 sysctl_eachburst(SYSCTL_HANDLER_ARGS
)
1218 struct iopoll_ctx
*io_ctx
= arg1
;
1219 struct iopoll_sysctl_netmsg msg
;
1220 uint32_t each_burst
;
1223 each_burst
= io_ctx
->poll_each_burst
;
1224 error
= sysctl_handle_int(oidp
, &each_burst
, 0, req
);
1225 if (error
|| req
->newptr
== NULL
)
1228 netmsg_init(&msg
.base
, NULL
, &curthread
->td_msgport
,
1229 0, sysctl_eachburst_handler
);
1230 msg
.base
.lmsg
.u
.ms_result
= each_burst
;
1233 return netisr_domsg(&msg
.base
, io_ctx
->poll_cpuid
);
1237 iopoll_register(struct ifnet
*ifp
, struct iopoll_ctx
*io_ctx
,
1238 const struct ifpoll_io
*io_rec
)
1242 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
1244 if (io_rec
->poll_func
== NULL
)
1248 * Check if there is room.
1250 if (io_ctx
->poll_handlers
>= IFPOLL_LIST_LEN
) {
1252 * List full, cannot register more entries.
1253 * This should never happen; if it does, it is probably a
1254 * broken driver trying to register multiple times. Checking
1255 * this at runtime is expensive, and won't solve the problem
1256 * anyways, so just report a few times and then give up.
1258 static int verbose
= 10; /* XXX */
1260 kprintf("io poll handlers list full, "
1261 "maybe a broken driver ?\n");
1266 struct iopoll_rec
*rec
= &io_ctx
->pr
[io_ctx
->poll_handlers
];
1269 rec
->serializer
= io_rec
->serializer
;
1270 rec
->arg
= io_rec
->arg
;
1271 rec
->poll_func
= io_rec
->poll_func
;
1273 io_ctx
->poll_handlers
++;
1280 iopoll_deregister(struct ifnet
*ifp
, struct iopoll_ctx
*io_ctx
)
1284 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(io_ctx
->poll_cpuid
));
1286 for (i
= 0; i
< io_ctx
->poll_handlers
; ++i
) {
1287 if (io_ctx
->pr
[i
].ifp
== ifp
) /* Found it */
1290 if (i
== io_ctx
->poll_handlers
) {
1293 io_ctx
->poll_handlers
--;
1294 if (i
< io_ctx
->poll_handlers
) {
1295 /* Last entry replaces this one. */
1296 io_ctx
->pr
[i
] = io_ctx
->pr
[io_ctx
->poll_handlers
];
1299 if (io_ctx
->poll_handlers
== 0)
1300 iopoll_reset_state(io_ctx
);
1307 poll_comm_init(int cpuid
)
1309 struct poll_comm
*comm
;
1312 comm
= kmalloc(sizeof(*comm
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
1314 if (ifpoll_stfrac
< 1)
1315 ifpoll_stfrac
= IFPOLL_STFRAC_DEFAULT
;
1316 if (ifpoll_txfrac
< 1)
1317 ifpoll_txfrac
= IFPOLL_TXFRAC_DEFAULT
;
1319 comm
->poll_cpuid
= cpuid
;
1320 comm
->pollhz
= poll_comm_pollhz_div(comm
, ifpoll_pollhz
);
1321 comm
->poll_stfrac
= ifpoll_stfrac
- 1;
1322 comm
->poll_txfrac
= ifpoll_txfrac
- 1;
1324 ksnprintf(cpuid_str
, sizeof(cpuid_str
), "%d", cpuid
);
1326 sysctl_ctx_init(&comm
->sysctl_ctx
);
1327 comm
->sysctl_tree
= SYSCTL_ADD_NODE(&comm
->sysctl_ctx
,
1328 SYSCTL_STATIC_CHILDREN(_net_ifpoll
),
1329 OID_AUTO
, cpuid_str
, CTLFLAG_RD
, 0, "");
1331 SYSCTL_ADD_PROC(&comm
->sysctl_ctx
, SYSCTL_CHILDREN(comm
->sysctl_tree
),
1332 OID_AUTO
, "pollhz", CTLTYPE_INT
| CTLFLAG_RW
,
1333 comm
, 0, sysctl_pollhz
,
1334 "I", "Device polling frequency");
1337 SYSCTL_ADD_PROC(&comm
->sysctl_ctx
,
1338 SYSCTL_CHILDREN(comm
->sysctl_tree
),
1339 OID_AUTO
, "status_frac",
1340 CTLTYPE_INT
| CTLFLAG_RW
,
1341 comm
, 0, sysctl_stfrac
,
1342 "I", "# of cycles before status is polled");
1344 SYSCTL_ADD_PROC(&comm
->sysctl_ctx
, SYSCTL_CHILDREN(comm
->sysctl_tree
),
1345 OID_AUTO
, "tx_frac", CTLTYPE_INT
| CTLFLAG_RW
,
1346 comm
, 0, sysctl_txfrac
,
1347 "I", "# of cycles before TX is polled");
1349 poll_common
[cpuid
] = comm
;
1353 poll_comm_start(int cpuid
)
1355 struct poll_comm
*comm
= poll_common
[cpuid
];
1356 systimer_func_t func
;
1359 * Initialize systimer
1362 func
= poll_comm_systimer0
;
1364 func
= poll_comm_systimer
;
1365 systimer_init_periodic_nq(&comm
->pollclock
, func
, comm
, 1);
1369 _poll_comm_systimer(struct poll_comm
*comm
)
1371 iopoll_clock(rxpoll_context
[comm
->poll_cpuid
]);
1372 if (comm
->txfrac_count
-- == 0) {
1373 comm
->txfrac_count
= comm
->poll_txfrac
;
1374 iopoll_clock(txpoll_context
[comm
->poll_cpuid
]);
1379 poll_comm_systimer0(systimer_t info
, int in_ipi __unused
,
1380 struct intrframe
*frame __unused
)
1382 struct poll_comm
*comm
= info
->data
;
1383 globaldata_t gd
= mycpu
;
1385 KKASSERT(comm
->poll_cpuid
== gd
->gd_cpuid
&& gd
->gd_cpuid
== 0);
1389 if (comm
->stfrac_count
-- == 0) {
1390 comm
->stfrac_count
= comm
->poll_stfrac
;
1391 stpoll_clock(&stpoll_context
);
1393 _poll_comm_systimer(comm
);
1399 poll_comm_systimer(systimer_t info
, int in_ipi __unused
,
1400 struct intrframe
*frame __unused
)
1402 struct poll_comm
*comm
= info
->data
;
1403 globaldata_t gd
= mycpu
;
1405 KKASSERT(comm
->poll_cpuid
== gd
->gd_cpuid
&& gd
->gd_cpuid
!= 0);
1408 _poll_comm_systimer(comm
);
1413 poll_comm_adjust_pollhz(struct poll_comm
*comm
)
1418 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(comm
->poll_cpuid
));
1421 * If there is no polling handler registered, set systimer
1422 * frequency to the lowest value. Polling systimer frequency
1423 * will be adjusted to the requested value, once there are
1424 * registered handlers.
1426 handlers
= rxpoll_context
[mycpuid
]->poll_handlers
+
1427 txpoll_context
[mycpuid
]->poll_handlers
;
1428 if (comm
->poll_cpuid
== 0)
1429 handlers
+= stpoll_context
.poll_handlers
;
1431 pollhz
= comm
->pollhz
;
1432 systimer_adjust_periodic(&comm
->pollclock
, pollhz
);
1436 sysctl_pollhz(SYSCTL_HANDLER_ARGS
)
1438 struct poll_comm
*comm
= arg1
;
1439 struct netmsg_base nmsg
;
1442 phz
= poll_comm_pollhz_conv(comm
, comm
->pollhz
);
1443 error
= sysctl_handle_int(oidp
, &phz
, 0, req
);
1444 if (error
|| req
->newptr
== NULL
)
1448 else if (phz
> IFPOLL_FREQ_MAX
)
1449 phz
= IFPOLL_FREQ_MAX
;
1451 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
1452 0, sysctl_pollhz_handler
);
1453 nmsg
.lmsg
.u
.ms_result
= phz
;
1455 return netisr_domsg(&nmsg
, comm
->poll_cpuid
);
1459 sysctl_pollhz_handler(netmsg_t nmsg
)
1461 struct poll_comm
*comm
= poll_common
[mycpuid
];
1463 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(comm
->poll_cpuid
));
1465 /* Save polling frequency */
1466 comm
->pollhz
= poll_comm_pollhz_div(comm
, nmsg
->lmsg
.u
.ms_result
);
1469 * Adjust cached pollhz
1471 rxpoll_context
[mycpuid
]->pollhz
= comm
->pollhz
;
1472 txpoll_context
[mycpuid
]->pollhz
=
1473 comm
->pollhz
/ (comm
->poll_txfrac
+ 1);
1476 * Adjust polling frequency
1478 poll_comm_adjust_pollhz(comm
);
1480 netisr_replymsg(&nmsg
->base
, 0);
1484 sysctl_stfrac(SYSCTL_HANDLER_ARGS
)
1486 struct poll_comm
*comm
= arg1
;
1487 struct netmsg_base nmsg
;
1490 KKASSERT(comm
->poll_cpuid
== 0);
1492 stfrac
= comm
->poll_stfrac
+ 1;
1493 error
= sysctl_handle_int(oidp
, &stfrac
, 0, req
);
1494 if (error
|| req
->newptr
== NULL
)
1499 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
1500 0, sysctl_stfrac_handler
);
1501 nmsg
.lmsg
.u
.ms_result
= stfrac
- 1;
1503 return netisr_domsg(&nmsg
, comm
->poll_cpuid
);
1507 sysctl_stfrac_handler(netmsg_t nmsg
)
1509 struct poll_comm
*comm
= poll_common
[mycpuid
];
1510 int stfrac
= nmsg
->lmsg
.u
.ms_result
;
1512 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(comm
->poll_cpuid
));
1515 comm
->poll_stfrac
= stfrac
;
1516 if (comm
->stfrac_count
> comm
->poll_stfrac
)
1517 comm
->stfrac_count
= comm
->poll_stfrac
;
1520 netisr_replymsg(&nmsg
->base
, 0);
1524 sysctl_txfrac(SYSCTL_HANDLER_ARGS
)
1526 struct poll_comm
*comm
= arg1
;
1527 struct netmsg_base nmsg
;
1530 txfrac
= comm
->poll_txfrac
+ 1;
1531 error
= sysctl_handle_int(oidp
, &txfrac
, 0, req
);
1532 if (error
|| req
->newptr
== NULL
)
1537 netmsg_init(&nmsg
, NULL
, &curthread
->td_msgport
,
1538 0, sysctl_txfrac_handler
);
1539 nmsg
.lmsg
.u
.ms_result
= txfrac
- 1;
1541 return netisr_domsg(&nmsg
, comm
->poll_cpuid
);
1545 sysctl_txfrac_handler(netmsg_t nmsg
)
1547 struct poll_comm
*comm
= poll_common
[mycpuid
];
1548 int txfrac
= nmsg
->lmsg
.u
.ms_result
;
1550 KKASSERT(&curthread
->td_msgport
== netisr_cpuport(comm
->poll_cpuid
));
1553 comm
->poll_txfrac
= txfrac
;
1554 if (comm
->txfrac_count
> comm
->poll_txfrac
)
1555 comm
->txfrac_count
= comm
->poll_txfrac
;
1558 netisr_replymsg(&nmsg
->base
, 0);
1562 ifpoll_compat_setup(struct ifpoll_compat
*cp
,
1563 struct sysctl_ctx_list
*sysctl_ctx
,
1564 struct sysctl_oid
*sysctl_tree
,
1565 int unit
, struct lwkt_serialize
*slz
)
1567 cp
->ifpc_stcount
= 0;
1568 cp
->ifpc_stfrac
= ((poll_common
[0]->poll_stfrac
+ 1) *
1569 howmany(IOPOLL_BURST_MAX
, IOPOLL_EACH_BURST
)) - 1;
1571 cp
->ifpc_cpuid
= unit
% netisr_ncpus
;
1572 cp
->ifpc_serializer
= slz
;
1574 if (sysctl_ctx
!= NULL
&& sysctl_tree
!= NULL
) {
1575 SYSCTL_ADD_PROC(sysctl_ctx
, SYSCTL_CHILDREN(sysctl_tree
),
1576 OID_AUTO
, "npoll_stfrac", CTLTYPE_INT
| CTLFLAG_RW
,
1577 cp
, 0, sysctl_compat_npoll_stfrac
, "I",
1578 "polling status frac");
1579 SYSCTL_ADD_PROC(sysctl_ctx
, SYSCTL_CHILDREN(sysctl_tree
),
1580 OID_AUTO
, "npoll_cpuid", CTLTYPE_INT
| CTLFLAG_RW
,
1581 cp
, 0, sysctl_compat_npoll_cpuid
, "I",
1587 sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS
)
1589 struct ifpoll_compat
*cp
= arg1
;
1590 int error
= 0, stfrac
;
1592 lwkt_serialize_enter(cp
->ifpc_serializer
);
1594 stfrac
= cp
->ifpc_stfrac
+ 1;
1595 error
= sysctl_handle_int(oidp
, &stfrac
, 0, req
);
1596 if (!error
&& req
->newptr
!= NULL
) {
1600 cp
->ifpc_stfrac
= stfrac
- 1;
1601 if (cp
->ifpc_stcount
> cp
->ifpc_stfrac
)
1602 cp
->ifpc_stcount
= cp
->ifpc_stfrac
;
1606 lwkt_serialize_exit(cp
->ifpc_serializer
);
1611 sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS
)
1613 struct ifpoll_compat
*cp
= arg1
;
1614 int error
= 0, cpuid
;
1616 lwkt_serialize_enter(cp
->ifpc_serializer
);
1618 cpuid
= cp
->ifpc_cpuid
;
1619 error
= sysctl_handle_int(oidp
, &cpuid
, 0, req
);
1620 if (!error
&& req
->newptr
!= NULL
) {
1621 if (cpuid
< 0 || cpuid
>= netisr_ncpus
)
1624 cp
->ifpc_cpuid
= cpuid
;
1627 lwkt_serialize_exit(cp
->ifpc_serializer
);