2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/string.h>
18 #include <linux/if_arp.h>
19 #include <linux/inetdevice.h>
20 #include <linux/inet.h>
21 #include <linux/interrupt.h>
22 #include <linux/netpoll.h>
23 #include <linux/sched.h>
24 #include <linux/delay.h>
25 #include <linux/rcupdate.h>
26 #include <linux/workqueue.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 #include <linux/if_vlan.h>
32 #include <net/addrconf.h>
33 #include <net/ndisc.h>
34 #include <net/ip6_checksum.h>
35 #include <asm/unaligned.h>
36 #include <trace/events/napi.h>
39 * We maintain a small pool of fully-sized skbs, to make sure the
40 * message gets out even in extreme OOM situations.
43 #define MAX_UDP_CHUNK 1460
46 static struct sk_buff_head skb_pool
;
48 static atomic_t trapped
;
50 static struct srcu_struct netpoll_srcu
;
52 #define USEC_PER_POLL 50
53 #define NETPOLL_RX_ENABLED 1
54 #define NETPOLL_RX_DROP 2
56 #define MAX_SKB_SIZE \
57 (sizeof(struct ethhdr) + \
58 sizeof(struct iphdr) + \
59 sizeof(struct udphdr) + \
62 static void zap_completion_queue(void);
63 static void netpoll_neigh_reply(struct sk_buff
*skb
, struct netpoll_info
*npinfo
);
65 static unsigned int carrier_timeout
= 4;
66 module_param(carrier_timeout
, uint
, 0644);
68 #define np_info(np, fmt, ...) \
69 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
70 #define np_err(np, fmt, ...) \
71 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
72 #define np_notice(np, fmt, ...) \
73 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
75 static void queue_process(struct work_struct
*work
)
77 struct netpoll_info
*npinfo
=
78 container_of(work
, struct netpoll_info
, tx_work
.work
);
82 while ((skb
= skb_dequeue(&npinfo
->txq
))) {
83 struct net_device
*dev
= skb
->dev
;
84 const struct net_device_ops
*ops
= dev
->netdev_ops
;
85 struct netdev_queue
*txq
;
87 if (!netif_device_present(dev
) || !netif_running(dev
)) {
92 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
94 local_irq_save(flags
);
95 __netif_tx_lock(txq
, smp_processor_id());
96 if (netif_xmit_frozen_or_stopped(txq
) ||
97 ops
->ndo_start_xmit(skb
, dev
) != NETDEV_TX_OK
) {
98 skb_queue_head(&npinfo
->txq
, skb
);
99 __netif_tx_unlock(txq
);
100 local_irq_restore(flags
);
102 schedule_delayed_work(&npinfo
->tx_work
, HZ
/10);
105 __netif_tx_unlock(txq
);
106 local_irq_restore(flags
);
110 static __sum16
checksum_udp(struct sk_buff
*skb
, struct udphdr
*uh
,
111 unsigned short ulen
, __be32 saddr
, __be32 daddr
)
115 if (uh
->check
== 0 || skb_csum_unnecessary(skb
))
118 psum
= csum_tcpudp_nofold(saddr
, daddr
, ulen
, IPPROTO_UDP
, 0);
120 if (skb
->ip_summed
== CHECKSUM_COMPLETE
&&
121 !csum_fold(csum_add(psum
, skb
->csum
)))
126 return __skb_checksum_complete(skb
);
130 * Check whether delayed processing was scheduled for our NIC. If so,
131 * we attempt to grab the poll lock and use ->poll() to pump the card.
132 * If this fails, either we've recursed in ->poll() or it's already
133 * running on another CPU.
135 * Note: we don't mask interrupts with this lock because we're using
136 * trylock here and interrupts are already disabled in the softirq
137 * case. Further, we test the poll_owner to avoid recursion on UP
138 * systems where the lock doesn't exist.
140 * In cases where there is bi-directional communications, reading only
141 * one message at a time can lead to packets being dropped by the
142 * network adapter, forcing superfluous retries and possibly timeouts.
143 * Thus, we set our budget to greater than 1.
145 static int poll_one_napi(struct netpoll_info
*npinfo
,
146 struct napi_struct
*napi
, int budget
)
150 /* net_rx_action's ->poll() invocations and our's are
151 * synchronized by this test which is only made while
152 * holding the napi->poll_lock.
154 if (!test_bit(NAPI_STATE_SCHED
, &napi
->state
))
157 npinfo
->rx_flags
|= NETPOLL_RX_DROP
;
158 atomic_inc(&trapped
);
159 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
161 work
= napi
->poll(napi
, budget
);
162 trace_napi_poll(napi
);
164 clear_bit(NAPI_STATE_NPSVC
, &napi
->state
);
165 atomic_dec(&trapped
);
166 npinfo
->rx_flags
&= ~NETPOLL_RX_DROP
;
168 return budget
- work
;
171 static void poll_napi(struct net_device
*dev
)
173 struct napi_struct
*napi
;
176 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
177 if (napi
->poll_owner
!= smp_processor_id() &&
178 spin_trylock(&napi
->poll_lock
)) {
179 budget
= poll_one_napi(rcu_dereference_bh(dev
->npinfo
),
181 spin_unlock(&napi
->poll_lock
);
189 static void service_neigh_queue(struct netpoll_info
*npi
)
194 while ((skb
= skb_dequeue(&npi
->neigh_tx
)))
195 netpoll_neigh_reply(skb
, npi
);
199 static void netpoll_poll_dev(struct net_device
*dev
)
201 const struct net_device_ops
*ops
;
202 struct netpoll_info
*ni
= rcu_dereference_bh(dev
->npinfo
);
204 /* Don't do any rx activity if the dev_lock mutex is held
205 * the dev_open/close paths use this to block netpoll activity
206 * while changing device state
208 if (!mutex_trylock(&dev
->npinfo
->dev_lock
))
211 if (!dev
|| !netif_running(dev
))
214 ops
= dev
->netdev_ops
;
215 if (!ops
->ndo_poll_controller
)
218 /* Process pending work on NIC */
219 ops
->ndo_poll_controller(dev
);
223 mutex_unlock(&dev
->npinfo
->dev_lock
);
225 if (dev
->flags
& IFF_SLAVE
) {
227 struct net_device
*bond_dev
;
229 struct netpoll_info
*bond_ni
;
231 bond_dev
= netdev_master_upper_dev_get_rcu(dev
);
232 bond_ni
= rcu_dereference_bh(bond_dev
->npinfo
);
233 while ((skb
= skb_dequeue(&ni
->neigh_tx
))) {
235 skb_queue_tail(&bond_ni
->neigh_tx
, skb
);
240 service_neigh_queue(ni
);
242 zap_completion_queue();
245 int netpoll_rx_disable(struct net_device
*dev
)
247 struct netpoll_info
*ni
;
250 idx
= srcu_read_lock(&netpoll_srcu
);
251 ni
= srcu_dereference(dev
->npinfo
, &netpoll_srcu
);
253 mutex_lock(&ni
->dev_lock
);
254 srcu_read_unlock(&netpoll_srcu
, idx
);
257 EXPORT_SYMBOL(netpoll_rx_disable
);
259 void netpoll_rx_enable(struct net_device
*dev
)
261 struct netpoll_info
*ni
;
263 ni
= rcu_dereference(dev
->npinfo
);
265 mutex_unlock(&ni
->dev_lock
);
268 EXPORT_SYMBOL(netpoll_rx_enable
);
270 static void refill_skbs(void)
275 spin_lock_irqsave(&skb_pool
.lock
, flags
);
276 while (skb_pool
.qlen
< MAX_SKBS
) {
277 skb
= alloc_skb(MAX_SKB_SIZE
, GFP_ATOMIC
);
281 __skb_queue_tail(&skb_pool
, skb
);
283 spin_unlock_irqrestore(&skb_pool
.lock
, flags
);
286 static void zap_completion_queue(void)
289 struct softnet_data
*sd
= &get_cpu_var(softnet_data
);
291 if (sd
->completion_queue
) {
292 struct sk_buff
*clist
;
294 local_irq_save(flags
);
295 clist
= sd
->completion_queue
;
296 sd
->completion_queue
= NULL
;
297 local_irq_restore(flags
);
299 while (clist
!= NULL
) {
300 struct sk_buff
*skb
= clist
;
302 if (skb
->destructor
) {
303 atomic_inc(&skb
->users
);
304 dev_kfree_skb_any(skb
); /* put this one back */
311 put_cpu_var(softnet_data
);
314 static struct sk_buff
*find_skb(struct netpoll
*np
, int len
, int reserve
)
319 zap_completion_queue();
323 skb
= alloc_skb(len
, GFP_ATOMIC
);
325 skb
= skb_dequeue(&skb_pool
);
329 netpoll_poll_dev(np
->dev
);
335 atomic_set(&skb
->users
, 1);
336 skb_reserve(skb
, reserve
);
340 static int netpoll_owner_active(struct net_device
*dev
)
342 struct napi_struct
*napi
;
344 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
345 if (napi
->poll_owner
== smp_processor_id())
351 /* call with IRQ disabled */
352 void netpoll_send_skb_on_dev(struct netpoll
*np
, struct sk_buff
*skb
,
353 struct net_device
*dev
)
355 int status
= NETDEV_TX_BUSY
;
357 const struct net_device_ops
*ops
= dev
->netdev_ops
;
358 /* It is up to the caller to keep npinfo alive. */
359 struct netpoll_info
*npinfo
;
361 WARN_ON_ONCE(!irqs_disabled());
363 npinfo
= rcu_dereference_bh(np
->dev
->npinfo
);
364 if (!npinfo
|| !netif_running(dev
) || !netif_device_present(dev
)) {
369 /* don't get messages out of order, and no recursion */
370 if (skb_queue_len(&npinfo
->txq
) == 0 && !netpoll_owner_active(dev
)) {
371 struct netdev_queue
*txq
;
373 txq
= netdev_pick_tx(dev
, skb
);
375 /* try until next clock tick */
376 for (tries
= jiffies_to_usecs(1)/USEC_PER_POLL
;
377 tries
> 0; --tries
) {
378 if (__netif_tx_trylock(txq
)) {
379 if (!netif_xmit_stopped(txq
)) {
380 if (vlan_tx_tag_present(skb
) &&
381 !(netif_skb_features(skb
) & NETIF_F_HW_VLAN_TX
)) {
382 skb
= __vlan_put_tag(skb
, vlan_tx_tag_get(skb
));
388 status
= ops
->ndo_start_xmit(skb
, dev
);
389 if (status
== NETDEV_TX_OK
)
390 txq_trans_update(txq
);
392 __netif_tx_unlock(txq
);
394 if (status
== NETDEV_TX_OK
)
399 /* tickle device maybe there is some cleanup */
400 netpoll_poll_dev(np
->dev
);
402 udelay(USEC_PER_POLL
);
405 WARN_ONCE(!irqs_disabled(),
406 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
407 dev
->name
, ops
->ndo_start_xmit
);
411 if (status
!= NETDEV_TX_OK
) {
412 skb_queue_tail(&npinfo
->txq
, skb
);
413 schedule_delayed_work(&npinfo
->tx_work
,0);
416 EXPORT_SYMBOL(netpoll_send_skb_on_dev
);
418 void netpoll_send_udp(struct netpoll
*np
, const char *msg
, int len
)
420 int total_len
, ip_len
, udp_len
;
425 static atomic_t ip_ident
;
426 struct ipv6hdr
*ip6h
;
428 udp_len
= len
+ sizeof(*udph
);
430 ip_len
= udp_len
+ sizeof(*ip6h
);
432 ip_len
= udp_len
+ sizeof(*iph
);
434 total_len
= ip_len
+ LL_RESERVED_SPACE(np
->dev
);
436 skb
= find_skb(np
, total_len
+ np
->dev
->needed_tailroom
,
441 skb_copy_to_linear_data(skb
, msg
, len
);
444 skb_push(skb
, sizeof(*udph
));
445 skb_reset_transport_header(skb
);
447 udph
->source
= htons(np
->local_port
);
448 udph
->dest
= htons(np
->remote_port
);
449 udph
->len
= htons(udp_len
);
453 udph
->check
= csum_ipv6_magic(&np
->local_ip
.in6
,
455 udp_len
, IPPROTO_UDP
,
456 csum_partial(udph
, udp_len
, 0));
457 if (udph
->check
== 0)
458 udph
->check
= CSUM_MANGLED_0
;
460 skb_push(skb
, sizeof(*ip6h
));
461 skb_reset_network_header(skb
);
462 ip6h
= ipv6_hdr(skb
);
464 /* ip6h->version = 6; ip6h->priority = 0; */
465 put_unaligned(0x60, (unsigned char *)ip6h
);
466 ip6h
->flow_lbl
[0] = 0;
467 ip6h
->flow_lbl
[1] = 0;
468 ip6h
->flow_lbl
[2] = 0;
470 ip6h
->payload_len
= htons(sizeof(struct udphdr
) + len
);
471 ip6h
->nexthdr
= IPPROTO_UDP
;
472 ip6h
->hop_limit
= 32;
473 ip6h
->saddr
= np
->local_ip
.in6
;
474 ip6h
->daddr
= np
->remote_ip
.in6
;
476 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
477 skb_reset_mac_header(skb
);
478 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IPV6
);
481 udph
->check
= csum_tcpudp_magic(np
->local_ip
.ip
,
483 udp_len
, IPPROTO_UDP
,
484 csum_partial(udph
, udp_len
, 0));
485 if (udph
->check
== 0)
486 udph
->check
= CSUM_MANGLED_0
;
488 skb_push(skb
, sizeof(*iph
));
489 skb_reset_network_header(skb
);
492 /* iph->version = 4; iph->ihl = 5; */
493 put_unaligned(0x45, (unsigned char *)iph
);
495 put_unaligned(htons(ip_len
), &(iph
->tot_len
));
496 iph
->id
= htons(atomic_inc_return(&ip_ident
));
499 iph
->protocol
= IPPROTO_UDP
;
501 put_unaligned(np
->local_ip
.ip
, &(iph
->saddr
));
502 put_unaligned(np
->remote_ip
.ip
, &(iph
->daddr
));
503 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
505 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
506 skb_reset_mac_header(skb
);
507 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IP
);
510 memcpy(eth
->h_source
, np
->dev
->dev_addr
, ETH_ALEN
);
511 memcpy(eth
->h_dest
, np
->remote_mac
, ETH_ALEN
);
515 netpoll_send_skb(np
, skb
);
517 EXPORT_SYMBOL(netpoll_send_udp
);
519 static void netpoll_neigh_reply(struct sk_buff
*skb
, struct netpoll_info
*npinfo
)
521 int size
, type
= ARPOP_REPLY
;
524 struct sk_buff
*send_skb
;
525 struct netpoll
*np
, *tmp
;
530 if (list_empty(&npinfo
->rx_np
))
533 /* Before checking the packet, we do some early
534 inspection whether this is interesting at all */
535 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
536 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
537 if (np
->dev
== skb
->dev
)
540 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
542 /* No netpoll struct is using this dev */
546 proto
= ntohs(eth_hdr(skb
)->h_proto
);
547 if (proto
== ETH_P_IP
) {
549 unsigned char *arp_ptr
;
550 /* No arp on this interface */
551 if (skb
->dev
->flags
& IFF_NOARP
)
554 if (!pskb_may_pull(skb
, arp_hdr_len(skb
->dev
)))
557 skb_reset_network_header(skb
);
558 skb_reset_transport_header(skb
);
561 if ((arp
->ar_hrd
!= htons(ARPHRD_ETHER
) &&
562 arp
->ar_hrd
!= htons(ARPHRD_IEEE802
)) ||
563 arp
->ar_pro
!= htons(ETH_P_IP
) ||
564 arp
->ar_op
!= htons(ARPOP_REQUEST
))
567 arp_ptr
= (unsigned char *)(arp
+1);
568 /* save the location of the src hw addr */
570 arp_ptr
+= skb
->dev
->addr_len
;
571 memcpy(&sip
, arp_ptr
, 4);
573 /* If we actually cared about dst hw addr,
574 it would get copied here */
575 arp_ptr
+= skb
->dev
->addr_len
;
576 memcpy(&tip
, arp_ptr
, 4);
578 /* Should we ignore arp? */
579 if (ipv4_is_loopback(tip
) || ipv4_is_multicast(tip
))
582 size
= arp_hdr_len(skb
->dev
);
584 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
585 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
586 if (tip
!= np
->local_ip
.ip
)
589 hlen
= LL_RESERVED_SPACE(np
->dev
);
590 tlen
= np
->dev
->needed_tailroom
;
591 send_skb
= find_skb(np
, size
+ hlen
+ tlen
, hlen
);
595 skb_reset_network_header(send_skb
);
596 arp
= (struct arphdr
*) skb_put(send_skb
, size
);
597 send_skb
->dev
= skb
->dev
;
598 send_skb
->protocol
= htons(ETH_P_ARP
);
600 /* Fill the device header for the ARP frame */
601 if (dev_hard_header(send_skb
, skb
->dev
, ETH_P_ARP
,
602 sha
, np
->dev
->dev_addr
,
603 send_skb
->len
) < 0) {
609 * Fill out the arp protocol part.
611 * we only support ethernet device type,
612 * which (according to RFC 1390) should
613 * always equal 1 (Ethernet).
616 arp
->ar_hrd
= htons(np
->dev
->type
);
617 arp
->ar_pro
= htons(ETH_P_IP
);
618 arp
->ar_hln
= np
->dev
->addr_len
;
620 arp
->ar_op
= htons(type
);
622 arp_ptr
= (unsigned char *)(arp
+ 1);
623 memcpy(arp_ptr
, np
->dev
->dev_addr
, np
->dev
->addr_len
);
624 arp_ptr
+= np
->dev
->addr_len
;
625 memcpy(arp_ptr
, &tip
, 4);
627 memcpy(arp_ptr
, sha
, np
->dev
->addr_len
);
628 arp_ptr
+= np
->dev
->addr_len
;
629 memcpy(arp_ptr
, &sip
, 4);
631 netpoll_send_skb(np
, send_skb
);
633 /* If there are several rx_hooks for the same address,
634 we're fine by sending a single reply */
637 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
638 } else if( proto
== ETH_P_IPV6
) {
639 #if IS_ENABLED(CONFIG_IPV6)
643 struct icmp6hdr
*icmp6h
;
644 const struct in6_addr
*saddr
;
645 const struct in6_addr
*daddr
;
646 struct inet6_dev
*in6_dev
= NULL
;
647 struct in6_addr
*target
;
649 in6_dev
= in6_dev_get(skb
->dev
);
650 if (!in6_dev
|| !in6_dev
->cnf
.accept_ra
)
653 if (!pskb_may_pull(skb
, skb
->len
))
656 msg
= (struct nd_msg
*)skb_transport_header(skb
);
658 __skb_push(skb
, skb
->data
- skb_transport_header(skb
));
660 if (ipv6_hdr(skb
)->hop_limit
!= 255)
662 if (msg
->icmph
.icmp6_code
!= 0)
664 if (msg
->icmph
.icmp6_type
!= NDISC_NEIGHBOUR_SOLICITATION
)
667 saddr
= &ipv6_hdr(skb
)->saddr
;
668 daddr
= &ipv6_hdr(skb
)->daddr
;
670 size
= sizeof(struct icmp6hdr
) + sizeof(struct in6_addr
);
672 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
673 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
674 if (!ipv6_addr_equal(daddr
, &np
->local_ip
.in6
))
677 hlen
= LL_RESERVED_SPACE(np
->dev
);
678 tlen
= np
->dev
->needed_tailroom
;
679 send_skb
= find_skb(np
, size
+ hlen
+ tlen
, hlen
);
683 send_skb
->protocol
= htons(ETH_P_IPV6
);
684 send_skb
->dev
= skb
->dev
;
686 skb_reset_network_header(send_skb
);
687 skb_put(send_skb
, sizeof(struct ipv6hdr
));
688 hdr
= ipv6_hdr(send_skb
);
690 *(__be32
*)hdr
= htonl(0x60000000);
692 hdr
->payload_len
= htons(size
);
693 hdr
->nexthdr
= IPPROTO_ICMPV6
;
694 hdr
->hop_limit
= 255;
698 send_skb
->transport_header
= send_skb
->tail
;
699 skb_put(send_skb
, size
);
701 icmp6h
= (struct icmp6hdr
*)skb_transport_header(skb
);
702 icmp6h
->icmp6_type
= NDISC_NEIGHBOUR_ADVERTISEMENT
;
703 icmp6h
->icmp6_router
= 0;
704 icmp6h
->icmp6_solicited
= 1;
705 target
= (struct in6_addr
*)skb_transport_header(send_skb
) + sizeof(struct icmp6hdr
);
706 *target
= msg
->target
;
707 icmp6h
->icmp6_cksum
= csum_ipv6_magic(saddr
, daddr
, size
,
712 if (dev_hard_header(send_skb
, skb
->dev
, ETH_P_IPV6
,
713 lladdr
, np
->dev
->dev_addr
,
714 send_skb
->len
) < 0) {
719 netpoll_send_skb(np
, send_skb
);
721 /* If there are several rx_hooks for the same address,
722 we're fine by sending a single reply */
725 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
730 static bool pkt_is_ns(struct sk_buff
*skb
)
735 if (skb
->protocol
!= htons(ETH_P_ARP
))
737 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
) + sizeof(struct nd_msg
)))
740 msg
= (struct nd_msg
*)skb_transport_header(skb
);
741 __skb_push(skb
, skb
->data
- skb_transport_header(skb
));
744 if (hdr
->nexthdr
!= IPPROTO_ICMPV6
)
746 if (hdr
->hop_limit
!= 255)
748 if (msg
->icmph
.icmp6_code
!= 0)
750 if (msg
->icmph
.icmp6_type
!= NDISC_NEIGHBOUR_SOLICITATION
)
756 int __netpoll_rx(struct sk_buff
*skb
, struct netpoll_info
*npinfo
)
758 int proto
, len
, ulen
;
760 const struct iphdr
*iph
;
762 struct netpoll
*np
, *tmp
;
764 if (list_empty(&npinfo
->rx_np
))
767 if (skb
->dev
->type
!= ARPHRD_ETHER
)
770 /* check if netpoll clients need ARP */
771 if (skb
->protocol
== htons(ETH_P_ARP
) && atomic_read(&trapped
)) {
772 skb_queue_tail(&npinfo
->neigh_tx
, skb
);
774 } else if (pkt_is_ns(skb
) && atomic_read(&trapped
)) {
775 skb_queue_tail(&npinfo
->neigh_tx
, skb
);
779 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
780 skb
= vlan_untag(skb
);
785 proto
= ntohs(eth_hdr(skb
)->h_proto
);
786 if (proto
!= ETH_P_IP
&& proto
!= ETH_P_IPV6
)
788 if (skb
->pkt_type
== PACKET_OTHERHOST
)
793 if (proto
== ETH_P_IP
) {
794 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
796 iph
= (struct iphdr
*)skb
->data
;
797 if (iph
->ihl
< 5 || iph
->version
!= 4)
799 if (!pskb_may_pull(skb
, iph
->ihl
*4))
801 iph
= (struct iphdr
*)skb
->data
;
802 if (ip_fast_csum((u8
*)iph
, iph
->ihl
) != 0)
805 len
= ntohs(iph
->tot_len
);
806 if (skb
->len
< len
|| len
< iph
->ihl
*4)
810 * Our transport medium may have padded the buffer out.
811 * Now We trim to the true length of the frame.
813 if (pskb_trim_rcsum(skb
, len
))
816 iph
= (struct iphdr
*)skb
->data
;
817 if (iph
->protocol
!= IPPROTO_UDP
)
821 uh
= (struct udphdr
*)(((char *)iph
) + iph
->ihl
*4);
822 ulen
= ntohs(uh
->len
);
826 if (checksum_udp(skb
, uh
, ulen
, iph
->saddr
, iph
->daddr
))
828 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
829 if (np
->local_ip
.ip
&& np
->local_ip
.ip
!= iph
->daddr
)
831 if (np
->remote_ip
.ip
&& np
->remote_ip
.ip
!= iph
->saddr
)
833 if (np
->local_port
&& np
->local_port
!= ntohs(uh
->dest
))
836 np
->rx_hook(np
, ntohs(uh
->source
),
838 ulen
- sizeof(struct udphdr
));
842 #if IS_ENABLED(CONFIG_IPV6)
843 const struct ipv6hdr
*ip6h
;
845 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
847 ip6h
= (struct ipv6hdr
*)skb
->data
;
848 if (ip6h
->version
!= 6)
850 len
= ntohs(ip6h
->payload_len
);
853 if (len
+ sizeof(struct ipv6hdr
) > skb
->len
)
855 if (pskb_trim_rcsum(skb
, len
+ sizeof(struct ipv6hdr
)))
857 ip6h
= ipv6_hdr(skb
);
858 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
861 ulen
= ntohs(uh
->len
);
862 if (ulen
!= skb
->len
)
864 if (udp6_csum_init(skb
, uh
, IPPROTO_UDP
))
866 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
867 if (!ipv6_addr_equal(&np
->local_ip
.in6
, &ip6h
->daddr
))
869 if (!ipv6_addr_equal(&np
->remote_ip
.in6
, &ip6h
->saddr
))
871 if (np
->local_port
&& np
->local_port
!= ntohs(uh
->dest
))
874 np
->rx_hook(np
, ntohs(uh
->source
),
876 ulen
- sizeof(struct udphdr
));
889 if (atomic_read(&trapped
)) {
897 void netpoll_print_options(struct netpoll
*np
)
899 np_info(np
, "local port %d\n", np
->local_port
);
901 np_info(np
, "local IPv6 address %pI6c\n", &np
->local_ip
.in6
);
903 np_info(np
, "local IPv4 address %pI4\n", &np
->local_ip
.ip
);
904 np_info(np
, "interface '%s'\n", np
->dev_name
);
905 np_info(np
, "remote port %d\n", np
->remote_port
);
907 np_info(np
, "remote IPv6 address %pI6c\n", &np
->remote_ip
.in6
);
909 np_info(np
, "remote IPv4 address %pI4\n", &np
->remote_ip
.ip
);
910 np_info(np
, "remote ethernet address %pM\n", np
->remote_mac
);
912 EXPORT_SYMBOL(netpoll_print_options
);
914 static int netpoll_parse_ip_addr(const char *str
, union inet_addr
*addr
)
918 if (!strchr(str
, ':') &&
919 in4_pton(str
, -1, (void *)addr
, -1, &end
) > 0) {
923 if (in6_pton(str
, -1, addr
->in6
.s6_addr
, -1, &end
) > 0) {
924 #if IS_ENABLED(CONFIG_IPV6)
934 int netpoll_parse_options(struct netpoll
*np
, char *opt
)
936 char *cur
=opt
, *delim
;
940 if ((delim
= strchr(cur
, '@')) == NULL
)
943 if (kstrtou16(cur
, 10, &np
->local_port
))
950 if ((delim
= strchr(cur
, '/')) == NULL
)
953 ipv6
= netpoll_parse_ip_addr(cur
, &np
->local_ip
);
957 np
->ipv6
= (bool)ipv6
;
963 /* parse out dev name */
964 if ((delim
= strchr(cur
, ',')) == NULL
)
967 strlcpy(np
->dev_name
, cur
, sizeof(np
->dev_name
));
974 if ((delim
= strchr(cur
, '@')) == NULL
)
977 if (*cur
== ' ' || *cur
== '\t')
978 np_info(np
, "warning: whitespace is not allowed\n");
979 if (kstrtou16(cur
, 10, &np
->remote_port
))
986 if ((delim
= strchr(cur
, '/')) == NULL
)
989 ipv6
= netpoll_parse_ip_addr(cur
, &np
->remote_ip
);
992 else if (np
->ipv6
!= (bool)ipv6
)
995 np
->ipv6
= (bool)ipv6
;
1000 if (!mac_pton(cur
, np
->remote_mac
))
1004 netpoll_print_options(np
);
1009 np_info(np
, "couldn't parse config at '%s'!\n", cur
);
1012 EXPORT_SYMBOL(netpoll_parse_options
);
1014 int __netpoll_setup(struct netpoll
*np
, struct net_device
*ndev
, gfp_t gfp
)
1016 struct netpoll_info
*npinfo
;
1017 const struct net_device_ops
*ops
;
1018 unsigned long flags
;
1022 strlcpy(np
->dev_name
, ndev
->name
, IFNAMSIZ
);
1024 if ((ndev
->priv_flags
& IFF_DISABLE_NETPOLL
) ||
1025 !ndev
->netdev_ops
->ndo_poll_controller
) {
1026 np_err(np
, "%s doesn't support polling, aborting\n",
1032 if (!ndev
->npinfo
) {
1033 npinfo
= kmalloc(sizeof(*npinfo
), gfp
);
1039 npinfo
->rx_flags
= 0;
1040 INIT_LIST_HEAD(&npinfo
->rx_np
);
1042 spin_lock_init(&npinfo
->rx_lock
);
1043 mutex_init(&npinfo
->dev_lock
);
1044 skb_queue_head_init(&npinfo
->neigh_tx
);
1045 skb_queue_head_init(&npinfo
->txq
);
1046 INIT_DELAYED_WORK(&npinfo
->tx_work
, queue_process
);
1048 atomic_set(&npinfo
->refcnt
, 1);
1050 ops
= np
->dev
->netdev_ops
;
1051 if (ops
->ndo_netpoll_setup
) {
1052 err
= ops
->ndo_netpoll_setup(ndev
, npinfo
, gfp
);
1057 npinfo
= ndev
->npinfo
;
1058 atomic_inc(&npinfo
->refcnt
);
1061 npinfo
->netpoll
= np
;
1064 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
1065 npinfo
->rx_flags
|= NETPOLL_RX_ENABLED
;
1066 list_add_tail(&np
->rx
, &npinfo
->rx_np
);
1067 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
1070 /* last thing to do is link it to the net device structure */
1071 rcu_assign_pointer(ndev
->npinfo
, npinfo
);
1080 EXPORT_SYMBOL_GPL(__netpoll_setup
);
1082 int netpoll_setup(struct netpoll
*np
)
1084 struct net_device
*ndev
= NULL
;
1085 struct in_device
*in_dev
;
1090 struct net
*net
= current
->nsproxy
->net_ns
;
1091 ndev
= __dev_get_by_name(net
, np
->dev_name
);
1094 np_err(np
, "%s doesn't exist, aborting\n", np
->dev_name
);
1100 if (netdev_master_upper_dev_get(ndev
)) {
1101 np_err(np
, "%s is a slave device, aborting\n", np
->dev_name
);
1106 if (!netif_running(ndev
)) {
1107 unsigned long atmost
, atleast
;
1109 np_info(np
, "device %s not up yet, forcing it\n", np
->dev_name
);
1111 err
= dev_open(ndev
);
1114 np_err(np
, "failed to open %s\n", ndev
->name
);
1119 atleast
= jiffies
+ HZ
/10;
1120 atmost
= jiffies
+ carrier_timeout
* HZ
;
1121 while (!netif_carrier_ok(ndev
)) {
1122 if (time_after(jiffies
, atmost
)) {
1123 np_notice(np
, "timeout waiting for carrier\n");
1129 /* If carrier appears to come up instantly, we don't
1130 * trust it and pause so that we don't pump all our
1131 * queued console messages into the bitbucket.
1134 if (time_before(jiffies
, atleast
)) {
1135 np_notice(np
, "carrier detect appears untrustworthy, waiting 4 seconds\n");
1141 if (!np
->local_ip
.ip
) {
1143 in_dev
= __in_dev_get_rtnl(ndev
);
1145 if (!in_dev
|| !in_dev
->ifa_list
) {
1146 np_err(np
, "no IP address for %s, aborting\n",
1148 err
= -EDESTADDRREQ
;
1152 np
->local_ip
.ip
= in_dev
->ifa_list
->ifa_local
;
1153 np_info(np
, "local IP %pI4\n", &np
->local_ip
.ip
);
1155 #if IS_ENABLED(CONFIG_IPV6)
1156 struct inet6_dev
*idev
;
1158 err
= -EDESTADDRREQ
;
1159 idev
= __in6_dev_get(ndev
);
1161 struct inet6_ifaddr
*ifp
;
1163 read_lock_bh(&idev
->lock
);
1164 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
1165 if (ipv6_addr_type(&ifp
->addr
) & IPV6_ADDR_LINKLOCAL
)
1167 np
->local_ip
.in6
= ifp
->addr
;
1171 read_unlock_bh(&idev
->lock
);
1174 np_err(np
, "no IPv6 address for %s, aborting\n",
1178 np_info(np
, "local IPv6 %pI6c\n", &np
->local_ip
.in6
);
1180 np_err(np
, "IPv6 is not supported %s, aborting\n",
1188 /* fill up the skb queue */
1191 err
= __netpoll_setup(np
, ndev
, GFP_KERNEL
);
1204 EXPORT_SYMBOL(netpoll_setup
);
1206 static int __init
netpoll_init(void)
1208 skb_queue_head_init(&skb_pool
);
1209 init_srcu_struct(&netpoll_srcu
);
1212 core_initcall(netpoll_init
);
1214 static void rcu_cleanup_netpoll_info(struct rcu_head
*rcu_head
)
1216 struct netpoll_info
*npinfo
=
1217 container_of(rcu_head
, struct netpoll_info
, rcu
);
1219 skb_queue_purge(&npinfo
->neigh_tx
);
1220 skb_queue_purge(&npinfo
->txq
);
1222 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
1223 cancel_delayed_work(&npinfo
->tx_work
);
1225 /* clean after last, unfinished work */
1226 __skb_queue_purge(&npinfo
->txq
);
1227 /* now cancel it again */
1228 cancel_delayed_work(&npinfo
->tx_work
);
1232 void __netpoll_cleanup(struct netpoll
*np
)
1234 struct netpoll_info
*npinfo
;
1235 unsigned long flags
;
1237 npinfo
= np
->dev
->npinfo
;
1241 if (!list_empty(&npinfo
->rx_np
)) {
1242 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
1244 if (list_empty(&npinfo
->rx_np
))
1245 npinfo
->rx_flags
&= ~NETPOLL_RX_ENABLED
;
1246 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
1249 synchronize_srcu(&netpoll_srcu
);
1251 if (atomic_dec_and_test(&npinfo
->refcnt
)) {
1252 const struct net_device_ops
*ops
;
1254 ops
= np
->dev
->netdev_ops
;
1255 if (ops
->ndo_netpoll_cleanup
)
1256 ops
->ndo_netpoll_cleanup(np
->dev
);
1258 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
1259 call_rcu_bh(&npinfo
->rcu
, rcu_cleanup_netpoll_info
);
1262 EXPORT_SYMBOL_GPL(__netpoll_cleanup
);
1264 static void rcu_cleanup_netpoll(struct rcu_head
*rcu_head
)
1266 struct netpoll
*np
= container_of(rcu_head
, struct netpoll
, rcu
);
1268 __netpoll_cleanup(np
);
1272 void __netpoll_free_rcu(struct netpoll
*np
)
1274 call_rcu_bh(&np
->rcu
, rcu_cleanup_netpoll
);
1276 EXPORT_SYMBOL_GPL(__netpoll_free_rcu
);
1278 void netpoll_cleanup(struct netpoll
*np
)
1284 __netpoll_cleanup(np
);
1290 EXPORT_SYMBOL(netpoll_cleanup
);
1292 int netpoll_trap(void)
1294 return atomic_read(&trapped
);
1296 EXPORT_SYMBOL(netpoll_trap
);
1298 void netpoll_set_trap(int trap
)
1301 atomic_inc(&trapped
);
1303 atomic_dec(&trapped
);
1305 EXPORT_SYMBOL(netpoll_set_trap
);