lguest: the net driver
[linux-2.6/btrfs-unstable.git] / drivers / net / lguest_net.c
blob112778652f7d8986345a439ab5e7f46815af1369
1 /* A simple network driver for lguest.
3 * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/module.h>
23 #include <linux/mm_types.h>
24 #include <linux/io.h>
25 #include <linux/lguest_bus.h>
27 #define SHARED_SIZE PAGE_SIZE
28 #define MAX_LANS 4
29 #define NUM_SKBS 8
31 struct lguestnet_info
33 /* The shared page(s). */
34 struct lguest_net *peer;
35 unsigned long peer_phys;
36 unsigned long mapsize;
38 /* The lguest_device I come from */
39 struct lguest_device *lgdev;
41 /* My peerid. */
42 unsigned int me;
44 /* Receive queue. */
45 struct sk_buff *skb[NUM_SKBS];
46 struct lguest_dma dma[NUM_SKBS];
49 /* How many bytes left in this page. */
50 static unsigned int rest_of_page(void *data)
52 return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE);
55 /* Simple convention: offset 4 * peernum. */
56 static unsigned long peer_key(struct lguestnet_info *info, unsigned peernum)
58 return info->peer_phys + 4 * peernum;
61 static void skb_to_dma(const struct sk_buff *skb, unsigned int headlen,
62 struct lguest_dma *dma)
64 unsigned int i, seg;
66 for (i = seg = 0; i < headlen; seg++, i += rest_of_page(skb->data+i)) {
67 dma->addr[seg] = virt_to_phys(skb->data + i);
68 dma->len[seg] = min((unsigned)(headlen - i),
69 rest_of_page(skb->data + i));
71 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, seg++) {
72 const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
73 /* Should not happen with MTU less than 64k - 2 * PAGE_SIZE. */
74 if (seg == LGUEST_MAX_DMA_SECTIONS) {
75 printk("Woah dude! Megapacket!\n");
76 break;
78 dma->addr[seg] = page_to_phys(f->page) + f->page_offset;
79 dma->len[seg] = f->size;
81 if (seg < LGUEST_MAX_DMA_SECTIONS)
82 dma->len[seg] = 0;
85 /* We overload multicast bit to show promiscuous mode. */
86 #define PROMISC_BIT 0x01
88 static void lguestnet_set_multicast(struct net_device *dev)
90 struct lguestnet_info *info = netdev_priv(dev);
92 if ((dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) || dev->mc_count)
93 info->peer[info->me].mac[0] |= PROMISC_BIT;
94 else
95 info->peer[info->me].mac[0] &= ~PROMISC_BIT;
98 static int promisc(struct lguestnet_info *info, unsigned int peer)
100 return info->peer[peer].mac[0] & PROMISC_BIT;
103 static int mac_eq(const unsigned char mac[ETH_ALEN],
104 struct lguestnet_info *info, unsigned int peer)
106 /* Ignore multicast bit, which peer turns on to mean promisc. */
107 if ((info->peer[peer].mac[0] & (~PROMISC_BIT)) != mac[0])
108 return 0;
109 return memcmp(mac+1, info->peer[peer].mac+1, ETH_ALEN-1) == 0;
112 static void transfer_packet(struct net_device *dev,
113 struct sk_buff *skb,
114 unsigned int peernum)
116 struct lguestnet_info *info = netdev_priv(dev);
117 struct lguest_dma dma;
119 skb_to_dma(skb, skb_headlen(skb), &dma);
120 pr_debug("xfer length %04x (%u)\n", htons(skb->len), skb->len);
122 lguest_send_dma(peer_key(info, peernum), &dma);
123 if (dma.used_len != skb->len) {
124 dev->stats.tx_carrier_errors++;
125 pr_debug("Bad xfer to peer %i: %i of %i (dma %p/%i)\n",
126 peernum, dma.used_len, skb->len,
127 (void *)dma.addr[0], dma.len[0]);
128 } else {
129 dev->stats.tx_bytes += skb->len;
130 dev->stats.tx_packets++;
134 static int unused_peer(const struct lguest_net peer[], unsigned int num)
136 return peer[num].mac[0] == 0;
139 static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
141 unsigned int i;
142 int broadcast;
143 struct lguestnet_info *info = netdev_priv(dev);
144 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
146 pr_debug("%s: xmit %02x:%02x:%02x:%02x:%02x:%02x\n",
147 dev->name, dest[0],dest[1],dest[2],dest[3],dest[4],dest[5]);
149 broadcast = is_multicast_ether_addr(dest);
150 for (i = 0; i < info->mapsize/sizeof(struct lguest_net); i++) {
151 if (i == info->me || unused_peer(info->peer, i))
152 continue;
154 if (!broadcast && !promisc(info, i) && !mac_eq(dest, info, i))
155 continue;
157 pr_debug("lguestnet %s: sending from %i to %i\n",
158 dev->name, info->me, i);
159 transfer_packet(dev, skb, i);
161 dev_kfree_skb(skb);
162 return 0;
165 /* Find a new skb to put in this slot in shared mem. */
166 static int fill_slot(struct net_device *dev, unsigned int slot)
168 struct lguestnet_info *info = netdev_priv(dev);
169 /* Try to create and register a new one. */
170 info->skb[slot] = netdev_alloc_skb(dev, ETH_HLEN + ETH_DATA_LEN);
171 if (!info->skb[slot]) {
172 printk("%s: could not fill slot %i\n", dev->name, slot);
173 return -ENOMEM;
176 skb_to_dma(info->skb[slot], ETH_HLEN + ETH_DATA_LEN, &info->dma[slot]);
177 wmb();
178 /* Now we tell hypervisor it can use the slot. */
179 info->dma[slot].used_len = 0;
180 return 0;
183 static irqreturn_t lguestnet_rcv(int irq, void *dev_id)
185 struct net_device *dev = dev_id;
186 struct lguestnet_info *info = netdev_priv(dev);
187 unsigned int i, done = 0;
189 for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
190 unsigned int length;
191 struct sk_buff *skb;
193 length = info->dma[i].used_len;
194 if (length == 0)
195 continue;
197 done++;
198 skb = info->skb[i];
199 fill_slot(dev, i);
201 if (length < ETH_HLEN || length > ETH_HLEN + ETH_DATA_LEN) {
202 pr_debug(KERN_WARNING "%s: unbelievable skb len: %i\n",
203 dev->name, length);
204 dev_kfree_skb(skb);
205 continue;
208 skb_put(skb, length);
209 skb->protocol = eth_type_trans(skb, dev);
210 /* This is a reliable transport. */
211 if (dev->features & NETIF_F_NO_CSUM)
212 skb->ip_summed = CHECKSUM_UNNECESSARY;
213 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
214 ntohs(skb->protocol), skb->len, skb->pkt_type);
216 dev->stats.rx_bytes += skb->len;
217 dev->stats.rx_packets++;
218 netif_rx(skb);
220 return done ? IRQ_HANDLED : IRQ_NONE;
223 static int lguestnet_open(struct net_device *dev)
225 int i;
226 struct lguestnet_info *info = netdev_priv(dev);
228 /* Set up our MAC address */
229 memcpy(info->peer[info->me].mac, dev->dev_addr, ETH_ALEN);
231 /* Turn on promisc mode if needed */
232 lguestnet_set_multicast(dev);
234 for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
235 if (fill_slot(dev, i) != 0)
236 goto cleanup;
238 if (lguest_bind_dma(peer_key(info,info->me), info->dma,
239 NUM_SKBS, lgdev_irq(info->lgdev)) != 0)
240 goto cleanup;
241 return 0;
243 cleanup:
244 while (--i >= 0)
245 dev_kfree_skb(info->skb[i]);
246 return -ENOMEM;
249 static int lguestnet_close(struct net_device *dev)
251 unsigned int i;
252 struct lguestnet_info *info = netdev_priv(dev);
254 /* Clear all trace: others might deliver packets, we'll ignore it. */
255 memset(&info->peer[info->me], 0, sizeof(info->peer[info->me]));
257 /* Deregister sg lists. */
258 lguest_unbind_dma(peer_key(info, info->me), info->dma);
259 for (i = 0; i < ARRAY_SIZE(info->dma); i++)
260 dev_kfree_skb(info->skb[i]);
261 return 0;
264 static int lguestnet_probe(struct lguest_device *lgdev)
266 int err, irqf = IRQF_SHARED;
267 struct net_device *dev;
268 struct lguestnet_info *info;
269 struct lguest_device_desc *desc = &lguest_devices[lgdev->index];
271 pr_debug("lguest_net: probing for device %i\n", lgdev->index);
273 dev = alloc_etherdev(sizeof(struct lguestnet_info));
274 if (!dev)
275 return -ENOMEM;
277 SET_MODULE_OWNER(dev);
279 /* Ethernet defaults with some changes */
280 ether_setup(dev);
281 dev->set_mac_address = NULL;
283 dev->dev_addr[0] = 0x02; /* set local assignment bit (IEEE802) */
284 dev->dev_addr[1] = 0x00;
285 memcpy(&dev->dev_addr[2], &lguest_data.guestid, 2);
286 dev->dev_addr[4] = 0x00;
287 dev->dev_addr[5] = 0x00;
289 dev->open = lguestnet_open;
290 dev->stop = lguestnet_close;
291 dev->hard_start_xmit = lguestnet_start_xmit;
293 /* Turning on/off promisc will call dev->set_multicast_list.
294 * We don't actually support multicast yet */
295 dev->set_multicast_list = lguestnet_set_multicast;
296 SET_NETDEV_DEV(dev, &lgdev->dev);
297 if (desc->features & LGUEST_NET_F_NOCSUM)
298 dev->features = NETIF_F_SG|NETIF_F_NO_CSUM;
300 info = netdev_priv(dev);
301 info->mapsize = PAGE_SIZE * desc->num_pages;
302 info->peer_phys = ((unsigned long)desc->pfn << PAGE_SHIFT);
303 info->lgdev = lgdev;
304 info->peer = lguest_map(info->peer_phys, desc->num_pages);
305 if (!info->peer) {
306 err = -ENOMEM;
307 goto free;
310 /* This stores our peerid (upper bits reserved for future). */
311 info->me = (desc->features & (info->mapsize-1));
313 err = register_netdev(dev);
314 if (err) {
315 pr_debug("lguestnet: registering device failed\n");
316 goto unmap;
319 if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
320 irqf |= IRQF_SAMPLE_RANDOM;
321 if (request_irq(lgdev_irq(lgdev), lguestnet_rcv, irqf, "lguestnet",
322 dev) != 0) {
323 pr_debug("lguestnet: cannot get irq %i\n", lgdev_irq(lgdev));
324 goto unregister;
327 pr_debug("lguestnet: registered device %s\n", dev->name);
328 lgdev->private = dev;
329 return 0;
331 unregister:
332 unregister_netdev(dev);
333 unmap:
334 lguest_unmap(info->peer);
335 free:
336 free_netdev(dev);
337 return err;
340 static struct lguest_driver lguestnet_drv = {
341 .name = "lguestnet",
342 .owner = THIS_MODULE,
343 .device_type = LGUEST_DEVICE_T_NET,
344 .probe = lguestnet_probe,
347 static __init int lguestnet_init(void)
349 return register_lguest_driver(&lguestnet_drv);
351 module_init(lguestnet_init);
353 MODULE_DESCRIPTION("Lguest network driver");
354 MODULE_LICENSE("GPL");