1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include <linux/export.h>
8 bool vlan_do_receive(struct sk_buff
**skbp
, bool last_handler
)
10 struct sk_buff
*skb
= *skbp
;
11 u16 vlan_id
= skb
->vlan_tci
& VLAN_VID_MASK
;
12 struct net_device
*vlan_dev
;
13 struct vlan_pcpu_stats
*rx_stats
;
15 vlan_dev
= vlan_find_dev(skb
->dev
, vlan_id
);
17 /* Only the last call to vlan_do_receive() should change
18 * pkt_type to PACKET_OTHERHOST
20 if (vlan_id
&& last_handler
)
21 skb
->pkt_type
= PACKET_OTHERHOST
;
25 skb
= *skbp
= skb_share_check(skb
, GFP_ATOMIC
);
30 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
31 /* Our lower layer thinks this is not local, let's make sure.
32 * This allows the VLAN to have a different MAC than the
33 * underlying device, and still route correctly. */
34 if (!compare_ether_addr(eth_hdr(skb
)->h_dest
,
36 skb
->pkt_type
= PACKET_HOST
;
39 if (!(vlan_dev_priv(vlan_dev
)->flags
& VLAN_FLAG_REORDER_HDR
)) {
40 unsigned int offset
= skb
->data
- skb_mac_header(skb
);
43 * vlan_insert_tag expect skb->data pointing to mac header.
44 * So change skb->data before calling it and change back to
45 * original position later
47 skb_push(skb
, offset
);
48 skb
= *skbp
= vlan_insert_tag(skb
, skb
->vlan_tci
);
51 skb_pull(skb
, offset
+ VLAN_HLEN
);
52 skb_reset_mac_len(skb
);
55 skb
->priority
= vlan_get_ingress_priority(vlan_dev
, skb
->vlan_tci
);
58 rx_stats
= this_cpu_ptr(vlan_dev_priv(vlan_dev
)->vlan_pcpu_stats
);
60 u64_stats_update_begin(&rx_stats
->syncp
);
61 rx_stats
->rx_packets
++;
62 rx_stats
->rx_bytes
+= skb
->len
;
63 if (skb
->pkt_type
== PACKET_MULTICAST
)
64 rx_stats
->rx_multicast
++;
65 u64_stats_update_end(&rx_stats
->syncp
);
70 /* Must be invoked with rcu_read_lock or with RTNL. */
71 struct net_device
*__vlan_find_dev_deep(struct net_device
*real_dev
,
74 struct vlan_info
*vlan_info
= rcu_dereference_rtnl(real_dev
->vlan_info
);
77 return vlan_group_get_device(&vlan_info
->grp
, vlan_id
);
80 * Bonding slaves do not have grp assigned to themselves.
81 * Grp is assigned to bonding master instead.
83 if (netif_is_bond_slave(real_dev
))
84 return __vlan_find_dev_deep(real_dev
->master
, vlan_id
);
89 EXPORT_SYMBOL(__vlan_find_dev_deep
);
91 struct net_device
*vlan_dev_real_dev(const struct net_device
*dev
)
93 return vlan_dev_priv(dev
)->real_dev
;
95 EXPORT_SYMBOL(vlan_dev_real_dev
);
97 u16
vlan_dev_vlan_id(const struct net_device
*dev
)
99 return vlan_dev_priv(dev
)->vlan_id
;
101 EXPORT_SYMBOL(vlan_dev_vlan_id
);
103 static struct sk_buff
*vlan_reorder_header(struct sk_buff
*skb
)
105 if (skb_cow(skb
, skb_headroom(skb
)) < 0)
107 memmove(skb
->data
- ETH_HLEN
, skb
->data
- VLAN_ETH_HLEN
, 2 * ETH_ALEN
);
108 skb
->mac_header
+= VLAN_HLEN
;
109 skb_reset_mac_len(skb
);
113 struct sk_buff
*vlan_untag(struct sk_buff
*skb
)
115 struct vlan_hdr
*vhdr
;
118 if (unlikely(vlan_tx_tag_present(skb
))) {
119 /* vlan_tci is already set-up so leave this for another time */
123 skb
= skb_share_check(skb
, GFP_ATOMIC
);
127 if (unlikely(!pskb_may_pull(skb
, VLAN_HLEN
)))
130 vhdr
= (struct vlan_hdr
*) skb
->data
;
131 vlan_tci
= ntohs(vhdr
->h_vlan_TCI
);
132 __vlan_hwaccel_put_tag(skb
, vlan_tci
);
134 skb_pull_rcsum(skb
, VLAN_HLEN
);
135 vlan_set_encap_proto(skb
, vhdr
);
137 skb
= vlan_reorder_header(skb
);
141 skb_reset_network_header(skb
);
142 skb_reset_transport_header(skb
);
152 * vlan info and vid list
155 static void vlan_group_free(struct vlan_group
*grp
)
159 for (i
= 0; i
< VLAN_GROUP_ARRAY_SPLIT_PARTS
; i
++)
160 kfree(grp
->vlan_devices_arrays
[i
]);
163 static void vlan_info_free(struct vlan_info
*vlan_info
)
165 vlan_group_free(&vlan_info
->grp
);
169 static void vlan_info_rcu_free(struct rcu_head
*rcu
)
171 vlan_info_free(container_of(rcu
, struct vlan_info
, rcu
));
174 static struct vlan_info
*vlan_info_alloc(struct net_device
*dev
)
176 struct vlan_info
*vlan_info
;
178 vlan_info
= kzalloc(sizeof(struct vlan_info
), GFP_KERNEL
);
182 vlan_info
->real_dev
= dev
;
183 INIT_LIST_HEAD(&vlan_info
->vid_list
);
187 struct vlan_vid_info
{
188 struct list_head list
;
193 static struct vlan_vid_info
*vlan_vid_info_get(struct vlan_info
*vlan_info
,
196 struct vlan_vid_info
*vid_info
;
198 list_for_each_entry(vid_info
, &vlan_info
->vid_list
, list
) {
199 if (vid_info
->vid
== vid
)
205 static struct vlan_vid_info
*vlan_vid_info_alloc(unsigned short vid
)
207 struct vlan_vid_info
*vid_info
;
209 vid_info
= kzalloc(sizeof(struct vlan_vid_info
), GFP_KERNEL
);
217 static int __vlan_vid_add(struct vlan_info
*vlan_info
, unsigned short vid
,
218 struct vlan_vid_info
**pvid_info
)
220 struct net_device
*dev
= vlan_info
->real_dev
;
221 const struct net_device_ops
*ops
= dev
->netdev_ops
;
222 struct vlan_vid_info
*vid_info
;
225 vid_info
= vlan_vid_info_alloc(vid
);
229 if ((dev
->features
& NETIF_F_HW_VLAN_FILTER
) &&
230 ops
->ndo_vlan_rx_add_vid
) {
231 err
= ops
->ndo_vlan_rx_add_vid(dev
, vid
);
237 list_add(&vid_info
->list
, &vlan_info
->vid_list
);
238 vlan_info
->nr_vids
++;
239 *pvid_info
= vid_info
;
243 int vlan_vid_add(struct net_device
*dev
, unsigned short vid
)
245 struct vlan_info
*vlan_info
;
246 struct vlan_vid_info
*vid_info
;
247 bool vlan_info_created
= false;
252 vlan_info
= rtnl_dereference(dev
->vlan_info
);
254 vlan_info
= vlan_info_alloc(dev
);
257 vlan_info_created
= true;
259 vid_info
= vlan_vid_info_get(vlan_info
, vid
);
261 err
= __vlan_vid_add(vlan_info
, vid
, &vid_info
);
263 goto out_free_vlan_info
;
265 vid_info
->refcount
++;
267 if (vlan_info_created
)
268 rcu_assign_pointer(dev
->vlan_info
, vlan_info
);
273 if (vlan_info_created
)
277 EXPORT_SYMBOL(vlan_vid_add
);
279 static void __vlan_vid_del(struct vlan_info
*vlan_info
,
280 struct vlan_vid_info
*vid_info
)
282 struct net_device
*dev
= vlan_info
->real_dev
;
283 const struct net_device_ops
*ops
= dev
->netdev_ops
;
284 unsigned short vid
= vid_info
->vid
;
287 if ((dev
->features
& NETIF_F_HW_VLAN_FILTER
) &&
288 ops
->ndo_vlan_rx_kill_vid
) {
289 err
= ops
->ndo_vlan_rx_kill_vid(dev
, vid
);
291 pr_warn("failed to kill vid %d for device %s\n",
295 list_del(&vid_info
->list
);
297 vlan_info
->nr_vids
--;
300 void vlan_vid_del(struct net_device
*dev
, unsigned short vid
)
302 struct vlan_info
*vlan_info
;
303 struct vlan_vid_info
*vid_info
;
307 vlan_info
= rtnl_dereference(dev
->vlan_info
);
311 vid_info
= vlan_vid_info_get(vlan_info
, vid
);
314 vid_info
->refcount
--;
315 if (vid_info
->refcount
== 0) {
316 __vlan_vid_del(vlan_info
, vid_info
);
317 if (vlan_info
->nr_vids
== 0) {
318 RCU_INIT_POINTER(dev
->vlan_info
, NULL
);
319 call_rcu(&vlan_info
->rcu
, vlan_info_rcu_free
);
323 EXPORT_SYMBOL(vlan_vid_del
);
325 int vlan_vids_add_by_dev(struct net_device
*dev
,
326 const struct net_device
*by_dev
)
328 struct vlan_vid_info
*vid_info
;
329 struct vlan_info
*vlan_info
;
334 vlan_info
= rtnl_dereference(by_dev
->vlan_info
);
338 list_for_each_entry(vid_info
, &vlan_info
->vid_list
, list
) {
339 err
= vlan_vid_add(dev
, vid_info
->vid
);
346 list_for_each_entry_continue_reverse(vid_info
,
347 &vlan_info
->vid_list
,
349 vlan_vid_del(dev
, vid_info
->vid
);
354 EXPORT_SYMBOL(vlan_vids_add_by_dev
);
356 void vlan_vids_del_by_dev(struct net_device
*dev
,
357 const struct net_device
*by_dev
)
359 struct vlan_vid_info
*vid_info
;
360 struct vlan_info
*vlan_info
;
364 vlan_info
= rtnl_dereference(by_dev
->vlan_info
);
368 list_for_each_entry(vid_info
, &vlan_info
->vid_list
, list
)
369 vlan_vid_del(dev
, vid_info
->vid
);
371 EXPORT_SYMBOL(vlan_vids_del_by_dev
);