1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
7 bool vlan_do_receive(struct sk_buff
**skbp
)
9 struct sk_buff
*skb
= *skbp
;
10 u16 vlan_id
= skb
->vlan_tci
& VLAN_VID_MASK
;
11 struct net_device
*vlan_dev
;
12 struct vlan_pcpu_stats
*rx_stats
;
14 vlan_dev
= vlan_find_dev(skb
->dev
, vlan_id
);
17 skb
->pkt_type
= PACKET_OTHERHOST
;
21 skb
= *skbp
= skb_share_check(skb
, GFP_ATOMIC
);
26 skb
->priority
= vlan_get_ingress_priority(vlan_dev
, skb
->vlan_tci
);
29 rx_stats
= this_cpu_ptr(vlan_dev_info(vlan_dev
)->vlan_pcpu_stats
);
31 u64_stats_update_begin(&rx_stats
->syncp
);
32 rx_stats
->rx_packets
++;
33 rx_stats
->rx_bytes
+= skb
->len
;
35 switch (skb
->pkt_type
) {
36 case PACKET_BROADCAST
:
38 case PACKET_MULTICAST
:
39 rx_stats
->rx_multicast
++;
41 case PACKET_OTHERHOST
:
42 /* Our lower layer thinks this is not local, let's make sure.
43 * This allows the VLAN to have a different MAC than the
44 * underlying device, and still route correctly. */
45 if (!compare_ether_addr(eth_hdr(skb
)->h_dest
,
47 skb
->pkt_type
= PACKET_HOST
;
50 u64_stats_update_end(&rx_stats
->syncp
);
55 struct net_device
*vlan_dev_real_dev(const struct net_device
*dev
)
57 return vlan_dev_info(dev
)->real_dev
;
59 EXPORT_SYMBOL(vlan_dev_real_dev
);
61 u16
vlan_dev_vlan_id(const struct net_device
*dev
)
63 return vlan_dev_info(dev
)->vlan_id
;
65 EXPORT_SYMBOL(vlan_dev_vlan_id
);
67 /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
68 int __vlan_hwaccel_rx(struct sk_buff
*skb
, struct vlan_group
*grp
,
69 u16 vlan_tci
, int polling
)
71 __vlan_hwaccel_put_tag(skb
, vlan_tci
);
72 return polling
? netif_receive_skb(skb
) : netif_rx(skb
);
74 EXPORT_SYMBOL(__vlan_hwaccel_rx
);
76 gro_result_t
vlan_gro_receive(struct napi_struct
*napi
, struct vlan_group
*grp
,
77 unsigned int vlan_tci
, struct sk_buff
*skb
)
79 __vlan_hwaccel_put_tag(skb
, vlan_tci
);
80 return napi_gro_receive(napi
, skb
);
82 EXPORT_SYMBOL(vlan_gro_receive
);
84 gro_result_t
vlan_gro_frags(struct napi_struct
*napi
, struct vlan_group
*grp
,
85 unsigned int vlan_tci
)
87 __vlan_hwaccel_put_tag(napi
->skb
, vlan_tci
);
88 return napi_gro_frags(napi
);
90 EXPORT_SYMBOL(vlan_gro_frags
);
92 static struct sk_buff
*vlan_check_reorder_header(struct sk_buff
*skb
)
94 if (vlan_dev_info(skb
->dev
)->flags
& VLAN_FLAG_REORDER_HDR
) {
95 if (skb_cow(skb
, skb_headroom(skb
)) < 0)
98 /* Lifted from Gleb's VLAN code... */
99 memmove(skb
->data
- ETH_HLEN
,
100 skb
->data
- VLAN_ETH_HLEN
, 12);
101 skb
->mac_header
+= VLAN_HLEN
;
107 static void vlan_set_encap_proto(struct sk_buff
*skb
, struct vlan_hdr
*vhdr
)
113 * Was a VLAN packet, grab the encapsulated protocol, which the layer
114 * three protocols care about.
117 proto
= vhdr
->h_vlan_encapsulated_proto
;
118 if (ntohs(proto
) >= 1536) {
119 skb
->protocol
= proto
;
124 if (*(unsigned short *) rawp
== 0xFFFF)
126 * This is a magic hack to spot IPX packets. Older Novell
127 * breaks the protocol design and runs IPX over 802.3 without
128 * an 802.2 LLC layer. We look for FFFF which isn't a used
129 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
130 * but does for the rest.
132 skb
->protocol
= htons(ETH_P_802_3
);
137 skb
->protocol
= htons(ETH_P_802_2
);
140 struct sk_buff
*vlan_untag(struct sk_buff
*skb
)
142 struct vlan_hdr
*vhdr
;
145 if (unlikely(vlan_tx_tag_present(skb
))) {
146 /* vlan_tci is already set-up so leave this for another time */
150 skb
= skb_share_check(skb
, GFP_ATOMIC
);
154 if (unlikely(!pskb_may_pull(skb
, VLAN_HLEN
)))
157 vhdr
= (struct vlan_hdr
*) skb
->data
;
158 vlan_tci
= ntohs(vhdr
->h_vlan_TCI
);
159 __vlan_hwaccel_put_tag(skb
, vlan_tci
);
161 skb_pull_rcsum(skb
, VLAN_HLEN
);
162 vlan_set_encap_proto(skb
, vhdr
);
164 skb
= vlan_check_reorder_header(skb
);