1 /* A simple network driver using virtio.
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/module.h>
23 #include <linux/virtio.h>
24 #include <linux/virtio_net.h>
25 #include <linux/scatterlist.h>
27 /* FIXME: MTU in config. */
28 #define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN)
32 struct virtio_device
*vdev
;
33 struct virtqueue
*rvq
, *svq
;
34 struct net_device
*dev
;
35 struct napi_struct napi
;
37 /* Number of input buffers, and max we've ever had. */
38 unsigned int num
, max
;
40 /* Receive & send queues. */
41 struct sk_buff_head recv
;
42 struct sk_buff_head send
;
45 static inline struct virtio_net_hdr
*skb_vnet_hdr(struct sk_buff
*skb
)
47 return (struct virtio_net_hdr
*)skb
->cb
;
50 static inline void vnet_hdr_to_sg(struct scatterlist
*sg
, struct sk_buff
*skb
)
52 sg_init_one(sg
, skb_vnet_hdr(skb
), sizeof(struct virtio_net_hdr
));
55 static bool skb_xmit_done(struct virtqueue
*rvq
)
57 struct virtnet_info
*vi
= rvq
->vdev
->priv
;
59 /* In case we were waiting for output buffers. */
60 netif_wake_queue(vi
->dev
);
64 static void receive_skb(struct net_device
*dev
, struct sk_buff
*skb
,
67 struct virtio_net_hdr
*hdr
= skb_vnet_hdr(skb
);
69 if (unlikely(len
< sizeof(struct virtio_net_hdr
) + ETH_HLEN
)) {
70 pr_debug("%s: short packet %i\n", dev
->name
, len
);
71 dev
->stats
.rx_length_errors
++;
74 len
-= sizeof(struct virtio_net_hdr
);
75 BUG_ON(len
> MAX_PACKET_LEN
);
78 skb
->protocol
= eth_type_trans(skb
, dev
);
79 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
80 ntohs(skb
->protocol
), skb
->len
, skb
->pkt_type
);
81 dev
->stats
.rx_bytes
+= skb
->len
;
82 dev
->stats
.rx_packets
++;
84 if (hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
85 pr_debug("Needs csum!\n");
86 if (!skb_partial_csum_set(skb
,hdr
->csum_start
,hdr
->csum_offset
))
90 if (hdr
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
92 switch (hdr
->gso_type
) {
93 case VIRTIO_NET_HDR_GSO_TCPV4
:
94 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
96 case VIRTIO_NET_HDR_GSO_TCPV4_ECN
:
97 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCP_ECN
;
99 case VIRTIO_NET_HDR_GSO_UDP
:
100 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
102 case VIRTIO_NET_HDR_GSO_TCPV6
:
103 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
107 printk(KERN_WARNING
"%s: bad gso type %u.\n",
108 dev
->name
, hdr
->gso_type
);
112 skb_shinfo(skb
)->gso_size
= hdr
->gso_size
;
113 if (skb_shinfo(skb
)->gso_size
== 0) {
115 printk(KERN_WARNING
"%s: zero gso size.\n",
120 /* Header must be checked, and gso_segs computed. */
121 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
122 skb_shinfo(skb
)->gso_segs
= 0;
125 netif_receive_skb(skb
);
129 dev
->stats
.rx_frame_errors
++;
134 static void try_fill_recv(struct virtnet_info
*vi
)
137 struct scatterlist sg
[1+MAX_SKB_FRAGS
];
140 sg_init_table(sg
, 1+MAX_SKB_FRAGS
);
142 skb
= netdev_alloc_skb(vi
->dev
, MAX_PACKET_LEN
);
146 skb_put(skb
, MAX_PACKET_LEN
);
147 vnet_hdr_to_sg(sg
, skb
);
148 num
= skb_to_sgvec(skb
, sg
+1, 0, skb
->len
) + 1;
149 skb_queue_head(&vi
->recv
, skb
);
151 err
= vi
->rvq
->vq_ops
->add_buf(vi
->rvq
, sg
, 0, num
, skb
);
153 skb_unlink(skb
, &vi
->recv
);
159 if (unlikely(vi
->num
> vi
->max
))
161 vi
->rvq
->vq_ops
->kick(vi
->rvq
);
164 static bool skb_recv_done(struct virtqueue
*rvq
)
166 struct virtnet_info
*vi
= rvq
->vdev
->priv
;
167 netif_rx_schedule(vi
->dev
, &vi
->napi
);
168 /* Suppress further interrupts. */
172 static int virtnet_poll(struct napi_struct
*napi
, int budget
)
174 struct virtnet_info
*vi
= container_of(napi
, struct virtnet_info
, napi
);
175 struct sk_buff
*skb
= NULL
;
176 unsigned int len
, received
= 0;
179 while (received
< budget
&&
180 (skb
= vi
->rvq
->vq_ops
->get_buf(vi
->rvq
, &len
)) != NULL
) {
181 __skb_unlink(skb
, &vi
->recv
);
182 receive_skb(vi
->dev
, skb
, len
);
187 /* FIXME: If we oom and completely run out of inbufs, we need
188 * to start a timer trying to fill more. */
189 if (vi
->num
< vi
->max
/ 2)
192 /* Out of packets? */
193 if (received
< budget
) {
194 netif_rx_complete(vi
->dev
, napi
);
195 if (unlikely(!vi
->rvq
->vq_ops
->restart(vi
->rvq
))
196 && netif_rx_reschedule(vi
->dev
, napi
))
203 static void free_old_xmit_skbs(struct virtnet_info
*vi
)
208 while ((skb
= vi
->svq
->vq_ops
->get_buf(vi
->svq
, &len
)) != NULL
) {
209 pr_debug("Sent skb %p\n", skb
);
210 __skb_unlink(skb
, &vi
->send
);
211 vi
->dev
->stats
.tx_bytes
+= len
;
212 vi
->dev
->stats
.tx_packets
++;
217 static int start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
219 struct virtnet_info
*vi
= netdev_priv(dev
);
221 struct scatterlist sg
[1+MAX_SKB_FRAGS
];
222 struct virtio_net_hdr
*hdr
;
223 const unsigned char *dest
= ((struct ethhdr
*)skb
->data
)->h_dest
;
224 DECLARE_MAC_BUF(mac
);
226 sg_init_table(sg
, 1+MAX_SKB_FRAGS
);
228 pr_debug("%s: xmit %p %s\n", dev
->name
, skb
, print_mac(mac
, dest
));
230 free_old_xmit_skbs(vi
);
232 /* Encode metadata header at front. */
233 hdr
= skb_vnet_hdr(skb
);
234 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
235 hdr
->flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
236 hdr
->csum_start
= skb
->csum_start
- skb_headroom(skb
);
237 hdr
->csum_offset
= skb
->csum_offset
;
240 hdr
->csum_offset
= hdr
->csum_start
= 0;
243 if (skb_is_gso(skb
)) {
244 hdr
->gso_size
= skb_shinfo(skb
)->gso_size
;
245 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCP_ECN
)
246 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV4_ECN
;
247 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
248 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
249 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
250 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
251 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
252 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_UDP
;
256 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
260 vnet_hdr_to_sg(sg
, skb
);
261 num
= skb_to_sgvec(skb
, sg
+1, 0, skb
->len
) + 1;
262 __skb_queue_head(&vi
->send
, skb
);
263 err
= vi
->svq
->vq_ops
->add_buf(vi
->svq
, sg
, num
, 0, skb
);
265 pr_debug("%s: virtio not prepared to send\n", dev
->name
);
266 skb_unlink(skb
, &vi
->send
);
267 netif_stop_queue(dev
);
268 return NETDEV_TX_BUSY
;
270 vi
->svq
->vq_ops
->kick(vi
->svq
);
275 static int virtnet_open(struct net_device
*dev
)
277 struct virtnet_info
*vi
= netdev_priv(dev
);
281 /* If we didn't even get one input buffer, we're useless. */
285 napi_enable(&vi
->napi
);
289 static int virtnet_close(struct net_device
*dev
)
291 struct virtnet_info
*vi
= netdev_priv(dev
);
294 napi_disable(&vi
->napi
);
296 /* networking core has neutered skb_xmit_done/skb_recv_done, so don't
297 * worry about races vs. get(). */
298 vi
->rvq
->vq_ops
->shutdown(vi
->rvq
);
299 while ((skb
= __skb_dequeue(&vi
->recv
)) != NULL
) {
303 vi
->svq
->vq_ops
->shutdown(vi
->svq
);
304 while ((skb
= __skb_dequeue(&vi
->send
)) != NULL
)
307 BUG_ON(vi
->num
!= 0);
311 static int virtnet_probe(struct virtio_device
*vdev
)
315 struct net_device
*dev
;
316 struct virtnet_info
*vi
;
319 /* Allocate ourselves a network device with room for our info */
320 dev
= alloc_etherdev(sizeof(struct virtnet_info
));
324 /* Set up network device as normal. */
326 dev
->open
= virtnet_open
;
327 dev
->stop
= virtnet_close
;
328 dev
->hard_start_xmit
= start_xmit
;
329 dev
->features
= NETIF_F_HIGHDMA
;
330 SET_NETDEV_DEV(dev
, &vdev
->dev
);
332 /* Do we support "hardware" checksums? */
333 token
= vdev
->config
->find(vdev
, VIRTIO_CONFIG_NET_F
, &len
);
334 if (virtio_use_bit(vdev
, token
, len
, VIRTIO_NET_F_NO_CSUM
)) {
335 /* This opens up the world of extra features. */
336 dev
->features
|= NETIF_F_HW_CSUM
|NETIF_F_SG
|NETIF_F_FRAGLIST
;
337 if (virtio_use_bit(vdev
, token
, len
, VIRTIO_NET_F_TSO4
))
338 dev
->features
|= NETIF_F_TSO
;
339 if (virtio_use_bit(vdev
, token
, len
, VIRTIO_NET_F_UFO
))
340 dev
->features
|= NETIF_F_UFO
;
341 if (virtio_use_bit(vdev
, token
, len
, VIRTIO_NET_F_TSO4_ECN
))
342 dev
->features
|= NETIF_F_TSO_ECN
;
343 if (virtio_use_bit(vdev
, token
, len
, VIRTIO_NET_F_TSO6
))
344 dev
->features
|= NETIF_F_TSO6
;
347 /* Configuration may specify what MAC to use. Otherwise random. */
348 token
= vdev
->config
->find(vdev
, VIRTIO_CONFIG_NET_MAC_F
, &len
);
351 vdev
->config
->get(vdev
, token
, dev
->dev_addr
, len
);
353 random_ether_addr(dev
->dev_addr
);
355 /* Set up our device-specific information */
356 vi
= netdev_priv(dev
);
357 netif_napi_add(dev
, &vi
->napi
, virtnet_poll
, 16);
361 /* We expect two virtqueues, receive then send. */
362 vi
->rvq
= vdev
->config
->find_vq(vdev
, skb_recv_done
);
363 if (IS_ERR(vi
->rvq
)) {
364 err
= PTR_ERR(vi
->rvq
);
368 vi
->svq
= vdev
->config
->find_vq(vdev
, skb_xmit_done
);
369 if (IS_ERR(vi
->svq
)) {
370 err
= PTR_ERR(vi
->svq
);
374 /* Initialize our empty receive and send queues. */
375 skb_queue_head_init(&vi
->recv
);
376 skb_queue_head_init(&vi
->send
);
378 err
= register_netdev(dev
);
380 pr_debug("virtio_net: registering device failed\n");
383 pr_debug("virtnet: registered device %s\n", dev
->name
);
388 vdev
->config
->del_vq(vi
->svq
);
390 vdev
->config
->del_vq(vi
->rvq
);
396 static void virtnet_remove(struct virtio_device
*vdev
)
398 struct virtnet_info
*vi
= vdev
->priv
;
400 vdev
->config
->del_vq(vi
->svq
);
401 vdev
->config
->del_vq(vi
->rvq
);
402 unregister_netdev(vi
->dev
);
403 free_netdev(vi
->dev
);
406 static struct virtio_device_id id_table
[] = {
407 { VIRTIO_ID_NET
, VIRTIO_DEV_ANY_ID
},
411 static struct virtio_driver virtio_net
= {
412 .driver
.name
= KBUILD_MODNAME
,
413 .driver
.owner
= THIS_MODULE
,
414 .id_table
= id_table
,
415 .probe
= virtnet_probe
,
416 .remove
= __devexit_p(virtnet_remove
),
419 static int __init
init(void)
421 return register_virtio_driver(&virtio_net
);
424 static void __exit
fini(void)
426 unregister_virtio_driver(&virtio_net
);
431 MODULE_DEVICE_TABLE(virtio
, id_table
);
432 MODULE_DESCRIPTION("Virtio network driver");
433 MODULE_LICENSE("GPL");