2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/init.h>
24 #include <linux/atomic.h>
25 #include <linux/module.h>
26 #include <linux/highmem.h>
27 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
35 #include <linux/slab.h>
37 #include <net/route.h>
39 #include <net/pkt_sched.h>
41 #include "hyperv_net.h"
43 struct net_device_context
{
44 /* point back to our device context */
45 struct hv_device
*device_ctx
;
46 struct delayed_work dwork
;
47 struct work_struct work
;
50 #define RING_SIZE_MIN 64
51 static int ring_size
= 128;
52 module_param(ring_size
, int, S_IRUGO
);
53 MODULE_PARM_DESC(ring_size
, "Ring buffer size (# of pages)");
55 static void do_set_multicast(struct work_struct
*w
)
57 struct net_device_context
*ndevctx
=
58 container_of(w
, struct net_device_context
, work
);
59 struct netvsc_device
*nvdev
;
60 struct rndis_device
*rdev
;
62 nvdev
= hv_get_drvdata(ndevctx
->device_ctx
);
63 if (nvdev
== NULL
|| nvdev
->ndev
== NULL
)
66 rdev
= nvdev
->extension
;
70 if (nvdev
->ndev
->flags
& IFF_PROMISC
)
71 rndis_filter_set_packet_filter(rdev
,
72 NDIS_PACKET_TYPE_PROMISCUOUS
);
74 rndis_filter_set_packet_filter(rdev
,
75 NDIS_PACKET_TYPE_BROADCAST
|
76 NDIS_PACKET_TYPE_ALL_MULTICAST
|
77 NDIS_PACKET_TYPE_DIRECTED
);
80 static void netvsc_set_multicast_list(struct net_device
*net
)
82 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
84 schedule_work(&net_device_ctx
->work
);
87 static int netvsc_open(struct net_device
*net
)
89 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
90 struct hv_device
*device_obj
= net_device_ctx
->device_ctx
;
93 /* Open up the device */
94 ret
= rndis_filter_open(device_obj
);
96 netdev_err(net
, "unable to open device (ret %d).\n", ret
);
100 netif_start_queue(net
);
105 static int netvsc_close(struct net_device
*net
)
107 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
108 struct hv_device
*device_obj
= net_device_ctx
->device_ctx
;
111 netif_tx_disable(net
);
113 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
114 cancel_work_sync(&net_device_ctx
->work
);
115 ret
= rndis_filter_close(device_obj
);
117 netdev_err(net
, "unable to close device (ret %d).\n", ret
);
122 static void netvsc_xmit_completion(void *context
)
124 struct hv_netvsc_packet
*packet
= (struct hv_netvsc_packet
*)context
;
125 struct sk_buff
*skb
= (struct sk_buff
*)
126 (unsigned long)packet
->completion
.send
.send_completion_tid
;
131 dev_kfree_skb_any(skb
);
134 static int netvsc_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
136 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
137 struct hv_netvsc_packet
*packet
;
139 unsigned int i
, num_pages
, npg_data
;
141 /* Add multipages for skb->data and additional 2 for RNDIS */
142 npg_data
= (((unsigned long)skb
->data
+ skb_headlen(skb
) - 1)
143 >> PAGE_SHIFT
) - ((unsigned long)skb
->data
>> PAGE_SHIFT
) + 1;
144 num_pages
= skb_shinfo(skb
)->nr_frags
+ npg_data
+ 2;
146 /* Allocate a netvsc packet based on # of frags. */
147 packet
= kzalloc(sizeof(struct hv_netvsc_packet
) +
148 (num_pages
* sizeof(struct hv_page_buffer
)) +
149 sizeof(struct rndis_filter_packet
) +
150 NDIS_VLAN_PPI_SIZE
, GFP_ATOMIC
);
152 /* out of memory, drop packet */
153 netdev_err(net
, "unable to allocate hv_netvsc_packet\n");
156 net
->stats
.tx_dropped
++;
160 packet
->vlan_tci
= skb
->vlan_tci
;
162 packet
->extension
= (void *)(unsigned long)packet
+
163 sizeof(struct hv_netvsc_packet
) +
164 (num_pages
* sizeof(struct hv_page_buffer
));
166 /* If the rndis msg goes beyond 1 page, we will add 1 later */
167 packet
->page_buf_cnt
= num_pages
- 1;
169 /* Initialize it from the skb */
170 packet
->total_data_buflen
= skb
->len
;
172 /* Start filling in the page buffers starting after RNDIS buffer. */
173 packet
->page_buf
[1].pfn
= virt_to_phys(skb
->data
) >> PAGE_SHIFT
;
174 packet
->page_buf
[1].offset
175 = (unsigned long)skb
->data
& (PAGE_SIZE
- 1);
177 packet
->page_buf
[1].len
= skb_headlen(skb
);
179 packet
->page_buf
[1].len
= PAGE_SIZE
180 - packet
->page_buf
[1].offset
;
182 for (i
= 2; i
<= npg_data
; i
++) {
183 packet
->page_buf
[i
].pfn
= virt_to_phys(skb
->data
184 + PAGE_SIZE
* (i
-1)) >> PAGE_SHIFT
;
185 packet
->page_buf
[i
].offset
= 0;
186 packet
->page_buf
[i
].len
= PAGE_SIZE
;
189 packet
->page_buf
[npg_data
].len
= (((unsigned long)skb
->data
190 + skb_headlen(skb
) - 1) & (PAGE_SIZE
- 1)) + 1;
192 /* Additional fragments are after SKB data */
193 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
194 const skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
196 packet
->page_buf
[i
+npg_data
+1].pfn
=
197 page_to_pfn(skb_frag_page(f
));
198 packet
->page_buf
[i
+npg_data
+1].offset
= f
->page_offset
;
199 packet
->page_buf
[i
+npg_data
+1].len
= skb_frag_size(f
);
202 /* Set the completion routine */
203 packet
->completion
.send
.send_completion
= netvsc_xmit_completion
;
204 packet
->completion
.send
.send_completion_ctx
= packet
;
205 packet
->completion
.send
.send_completion_tid
= (unsigned long)skb
;
207 ret
= rndis_filter_send(net_device_ctx
->device_ctx
,
210 net
->stats
.tx_bytes
+= skb
->len
;
211 net
->stats
.tx_packets
++;
214 if (ret
!= -EAGAIN
) {
215 dev_kfree_skb_any(skb
);
216 net
->stats
.tx_dropped
++;
220 return (ret
== -EAGAIN
) ? NETDEV_TX_BUSY
: NETDEV_TX_OK
;
224 * netvsc_linkstatus_callback - Link up/down notification
226 void netvsc_linkstatus_callback(struct hv_device
*device_obj
,
229 struct net_device
*net
;
230 struct net_device_context
*ndev_ctx
;
231 struct netvsc_device
*net_device
;
233 net_device
= hv_get_drvdata(device_obj
);
234 net
= net_device
->ndev
;
237 netdev_err(net
, "got link status but net device "
238 "not initialized yet\n");
243 netif_carrier_on(net
);
244 netif_wake_queue(net
);
245 ndev_ctx
= netdev_priv(net
);
246 schedule_delayed_work(&ndev_ctx
->dwork
, 0);
247 schedule_delayed_work(&ndev_ctx
->dwork
, msecs_to_jiffies(20));
249 netif_carrier_off(net
);
250 netif_tx_disable(net
);
255 * netvsc_recv_callback - Callback when we receive a packet from the
256 * "wire" on the specified device.
258 int netvsc_recv_callback(struct hv_device
*device_obj
,
259 struct hv_netvsc_packet
*packet
)
261 struct net_device
*net
;
264 net
= ((struct netvsc_device
*)hv_get_drvdata(device_obj
))->ndev
;
266 netdev_err(net
, "got receive callback but net device"
267 " not initialized yet\n");
268 packet
->status
= NVSP_STAT_FAIL
;
272 /* Allocate a skb - TODO direct I/O to pages? */
273 skb
= netdev_alloc_skb_ip_align(net
, packet
->total_data_buflen
);
274 if (unlikely(!skb
)) {
275 ++net
->stats
.rx_dropped
;
276 packet
->status
= NVSP_STAT_FAIL
;
281 * Copy to skb. This copy is needed here since the memory pointed by
282 * hv_netvsc_packet cannot be deallocated
284 memcpy(skb_put(skb
, packet
->total_data_buflen
), packet
->data
,
285 packet
->total_data_buflen
);
287 skb
->protocol
= eth_type_trans(skb
, net
);
288 skb
->ip_summed
= CHECKSUM_NONE
;
289 skb
->vlan_tci
= packet
->vlan_tci
;
291 net
->stats
.rx_packets
++;
292 net
->stats
.rx_bytes
+= packet
->total_data_buflen
;
295 * Pass the skb back up. Network stack will deallocate the skb when it
304 static void netvsc_get_drvinfo(struct net_device
*net
,
305 struct ethtool_drvinfo
*info
)
307 strcpy(info
->driver
, KBUILD_MODNAME
);
308 strcpy(info
->version
, HV_DRV_VERSION
);
309 strcpy(info
->fw_version
, "N/A");
312 static int netvsc_change_mtu(struct net_device
*ndev
, int mtu
)
314 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
315 struct hv_device
*hdev
= ndevctx
->device_ctx
;
316 struct netvsc_device
*nvdev
= hv_get_drvdata(hdev
);
317 struct netvsc_device_info device_info
;
318 int limit
= ETH_DATA_LEN
;
320 if (nvdev
== NULL
|| nvdev
->destroy
)
323 if (nvdev
->nvsp_version
== NVSP_PROTOCOL_VERSION_2
)
326 if (mtu
< 68 || mtu
> limit
)
329 nvdev
->start_remove
= true;
330 cancel_delayed_work_sync(&ndevctx
->dwork
);
331 cancel_work_sync(&ndevctx
->work
);
332 netif_tx_disable(ndev
);
333 rndis_filter_device_remove(hdev
);
337 ndevctx
->device_ctx
= hdev
;
338 hv_set_drvdata(hdev
, ndev
);
339 device_info
.ring_size
= ring_size
;
340 rndis_filter_device_add(hdev
, &device_info
);
341 netif_wake_queue(ndev
);
347 static int netvsc_set_mac_addr(struct net_device
*ndev
, void *p
)
349 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
350 struct hv_device
*hdev
= ndevctx
->device_ctx
;
351 struct sockaddr
*addr
= p
;
353 unsigned char save_aatype
;
356 memcpy(save_adr
, ndev
->dev_addr
, ETH_ALEN
);
357 save_aatype
= ndev
->addr_assign_type
;
359 err
= eth_mac_addr(ndev
, p
);
363 err
= rndis_filter_set_device_mac(hdev
, addr
->sa_data
);
365 /* roll back to saved MAC */
366 memcpy(ndev
->dev_addr
, save_adr
, ETH_ALEN
);
367 ndev
->addr_assign_type
= save_aatype
;
374 static const struct ethtool_ops ethtool_ops
= {
375 .get_drvinfo
= netvsc_get_drvinfo
,
376 .get_link
= ethtool_op_get_link
,
379 static const struct net_device_ops device_ops
= {
380 .ndo_open
= netvsc_open
,
381 .ndo_stop
= netvsc_close
,
382 .ndo_start_xmit
= netvsc_start_xmit
,
383 .ndo_set_rx_mode
= netvsc_set_multicast_list
,
384 .ndo_change_mtu
= netvsc_change_mtu
,
385 .ndo_validate_addr
= eth_validate_addr
,
386 .ndo_set_mac_address
= netvsc_set_mac_addr
,
390 * Send GARP packet to network peers after migrations.
391 * After Quick Migration, the network is not immediately operational in the
392 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
393 * another netif_notify_peers() into a delayed work, otherwise GARP packet
394 * will not be sent after quick migration, and cause network disconnection.
396 static void netvsc_send_garp(struct work_struct
*w
)
398 struct net_device_context
*ndev_ctx
;
399 struct net_device
*net
;
400 struct netvsc_device
*net_device
;
402 ndev_ctx
= container_of(w
, struct net_device_context
, dwork
.work
);
403 net_device
= hv_get_drvdata(ndev_ctx
->device_ctx
);
404 net
= net_device
->ndev
;
405 netdev_notify_peers(net
);
409 static int netvsc_probe(struct hv_device
*dev
,
410 const struct hv_vmbus_device_id
*dev_id
)
412 struct net_device
*net
= NULL
;
413 struct net_device_context
*net_device_ctx
;
414 struct netvsc_device_info device_info
;
417 net
= alloc_etherdev(sizeof(struct net_device_context
));
421 /* Set initial state */
422 netif_carrier_off(net
);
424 net_device_ctx
= netdev_priv(net
);
425 net_device_ctx
->device_ctx
= dev
;
426 hv_set_drvdata(dev
, net
);
427 INIT_DELAYED_WORK(&net_device_ctx
->dwork
, netvsc_send_garp
);
428 INIT_WORK(&net_device_ctx
->work
, do_set_multicast
);
430 net
->netdev_ops
= &device_ops
;
432 /* TODO: Add GSO and Checksum offload */
433 net
->hw_features
= NETIF_F_SG
;
434 net
->features
= NETIF_F_SG
| NETIF_F_HW_VLAN_TX
;
436 SET_ETHTOOL_OPS(net
, ðtool_ops
);
437 SET_NETDEV_DEV(net
, &dev
->device
);
439 ret
= register_netdev(net
);
441 pr_err("Unable to register netdev.\n");
446 /* Notify the netvsc driver of the new device */
447 device_info
.ring_size
= ring_size
;
448 ret
= rndis_filter_device_add(dev
, &device_info
);
450 netdev_err(net
, "unable to add netvsc device (ret %d)\n", ret
);
451 unregister_netdev(net
);
453 hv_set_drvdata(dev
, NULL
);
456 memcpy(net
->dev_addr
, device_info
.mac_adr
, ETH_ALEN
);
458 netif_carrier_on(net
);
464 static int netvsc_remove(struct hv_device
*dev
)
466 struct net_device
*net
;
467 struct net_device_context
*ndev_ctx
;
468 struct netvsc_device
*net_device
;
470 net_device
= hv_get_drvdata(dev
);
471 net
= net_device
->ndev
;
474 dev_err(&dev
->device
, "No net device to remove\n");
478 net_device
->start_remove
= true;
480 ndev_ctx
= netdev_priv(net
);
481 cancel_delayed_work_sync(&ndev_ctx
->dwork
);
482 cancel_work_sync(&ndev_ctx
->work
);
484 /* Stop outbound asap */
485 netif_tx_disable(net
);
487 unregister_netdev(net
);
490 * Call to the vsc driver to let it know that the device is being
493 rndis_filter_device_remove(dev
);
499 static const struct hv_vmbus_device_id id_table
[] = {
501 { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
502 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
506 MODULE_DEVICE_TABLE(vmbus
, id_table
);
508 /* The one and only one */
509 static struct hv_driver netvsc_drv
= {
510 .name
= KBUILD_MODNAME
,
511 .id_table
= id_table
,
512 .probe
= netvsc_probe
,
513 .remove
= netvsc_remove
,
516 static void __exit
netvsc_drv_exit(void)
518 vmbus_driver_unregister(&netvsc_drv
);
521 static int __init
netvsc_drv_init(void)
523 if (ring_size
< RING_SIZE_MIN
) {
524 ring_size
= RING_SIZE_MIN
;
525 pr_info("Increased ring_size to %d (min allowed)\n",
528 return vmbus_driver_register(&netvsc_drv
);
531 MODULE_LICENSE("GPL");
532 MODULE_VERSION(HV_DRV_VERSION
);
533 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
535 module_init(netvsc_drv_init
);
536 module_exit(netvsc_drv_exit
);