2 * rionet - Ethernet driver over RapidIO messaging services
4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/delay.h>
17 #include <linux/rio.h>
18 #include <linux/rio_drv.h>
19 #include <linux/rio_ids.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/crc32.h>
25 #include <linux/ethtool.h>
27 #define DRV_NAME "rionet"
28 #define DRV_VERSION "0.2"
29 #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
30 #define DRV_DESC "Ethernet over RapidIO"
32 MODULE_AUTHOR(DRV_AUTHOR
);
33 MODULE_DESCRIPTION(DRV_DESC
);
34 MODULE_LICENSE("GPL");
36 #define RIONET_DEFAULT_MSGLEVEL \
42 #define RIONET_DOORBELL_JOIN 0x1000
43 #define RIONET_DOORBELL_LEAVE 0x1001
45 #define RIONET_MAILBOX 0
47 #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
48 #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
50 static LIST_HEAD(rionet_peers
);
52 struct rionet_private
{
53 struct rio_mport
*mport
;
54 struct sk_buff
*rx_skb
[RIONET_RX_RING_SIZE
];
55 struct sk_buff
*tx_skb
[RIONET_TX_RING_SIZE
];
66 struct list_head node
;
71 static int rionet_check
= 0;
72 static int rionet_capable
= 1;
75 * This is a fast lookup table for for translating TX
76 * Ethernet packets into a destination RIO device. It
77 * could be made into a hash table to save memory depending
78 * on system trade-offs.
80 static struct rio_dev
**rionet_active
;
82 #define is_rionet_capable(pef, src_ops, dst_ops) \
83 ((pef & RIO_PEF_INB_MBOX) && \
84 (pef & RIO_PEF_INB_DOORBELL) && \
85 (src_ops & RIO_SRC_OPS_DOORBELL) && \
86 (dst_ops & RIO_DST_OPS_DOORBELL))
87 #define dev_rionet_capable(dev) \
88 is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops)
90 #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
91 #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
93 static int rionet_rx_clean(struct net_device
*ndev
)
97 struct rionet_private
*rnet
= ndev
->priv
;
103 if (!rnet
->rx_skb
[i
])
106 if (!(data
= rio_get_inb_message(rnet
->mport
, RIONET_MAILBOX
)))
109 rnet
->rx_skb
[i
]->data
= data
;
110 skb_put(rnet
->rx_skb
[i
], RIO_MAX_MSG_SIZE
);
111 rnet
->rx_skb
[i
]->protocol
=
112 eth_type_trans(rnet
->rx_skb
[i
], ndev
);
113 error
= netif_rx(rnet
->rx_skb
[i
]);
115 if (error
== NET_RX_DROP
) {
116 ndev
->stats
.rx_dropped
++;
117 } else if (error
== NET_RX_BAD
) {
118 if (netif_msg_rx_err(rnet
))
119 printk(KERN_WARNING
"%s: bad rx packet\n",
121 ndev
->stats
.rx_errors
++;
123 ndev
->stats
.rx_packets
++;
124 ndev
->stats
.rx_bytes
+= RIO_MAX_MSG_SIZE
;
127 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != rnet
->rx_slot
);
132 static void rionet_rx_fill(struct net_device
*ndev
, int end
)
135 struct rionet_private
*rnet
= ndev
->priv
;
139 rnet
->rx_skb
[i
] = dev_alloc_skb(RIO_MAX_MSG_SIZE
);
141 if (!rnet
->rx_skb
[i
])
144 rio_add_inb_buffer(rnet
->mport
, RIONET_MAILBOX
,
145 rnet
->rx_skb
[i
]->data
);
146 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != end
);
151 static int rionet_queue_tx_msg(struct sk_buff
*skb
, struct net_device
*ndev
,
152 struct rio_dev
*rdev
)
154 struct rionet_private
*rnet
= ndev
->priv
;
156 rio_add_outb_message(rnet
->mport
, rdev
, 0, skb
->data
, skb
->len
);
157 rnet
->tx_skb
[rnet
->tx_slot
] = skb
;
159 ndev
->stats
.tx_packets
++;
160 ndev
->stats
.tx_bytes
+= skb
->len
;
162 if (++rnet
->tx_cnt
== RIONET_TX_RING_SIZE
)
163 netif_stop_queue(ndev
);
166 rnet
->tx_slot
&= (RIONET_TX_RING_SIZE
- 1);
168 if (netif_msg_tx_queued(rnet
))
169 printk(KERN_INFO
"%s: queued skb %8.8x len %8.8x\n", DRV_NAME
,
170 (u32
) skb
, skb
->len
);
175 static int rionet_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
178 struct rionet_private
*rnet
= ndev
->priv
;
179 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
183 local_irq_save(flags
);
184 if (!spin_trylock(&rnet
->tx_lock
)) {
185 local_irq_restore(flags
);
186 return NETDEV_TX_LOCKED
;
189 if ((rnet
->tx_cnt
+ 1) > RIONET_TX_RING_SIZE
) {
190 netif_stop_queue(ndev
);
191 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
192 printk(KERN_ERR
"%s: BUG! Tx Ring full when queue awake!\n",
194 return NETDEV_TX_BUSY
;
197 if (eth
->h_dest
[0] & 0x01) {
198 for (i
= 0; i
< RIO_MAX_ROUTE_ENTRIES(rnet
->mport
->sys_size
);
200 if (rionet_active
[i
])
201 rionet_queue_tx_msg(skb
, ndev
,
203 } else if (RIONET_MAC_MATCH(eth
->h_dest
)) {
204 destid
= RIONET_GET_DESTID(eth
->h_dest
);
205 if (rionet_active
[destid
])
206 rionet_queue_tx_msg(skb
, ndev
, rionet_active
[destid
]);
209 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
214 static void rionet_dbell_event(struct rio_mport
*mport
, void *dev_id
, u16 sid
, u16 tid
,
217 struct net_device
*ndev
= dev_id
;
218 struct rionet_private
*rnet
= ndev
->priv
;
219 struct rionet_peer
*peer
;
221 if (netif_msg_intr(rnet
))
222 printk(KERN_INFO
"%s: doorbell sid %4.4x tid %4.4x info %4.4x",
223 DRV_NAME
, sid
, tid
, info
);
224 if (info
== RIONET_DOORBELL_JOIN
) {
225 if (!rionet_active
[sid
]) {
226 list_for_each_entry(peer
, &rionet_peers
, node
) {
227 if (peer
->rdev
->destid
== sid
)
228 rionet_active
[sid
] = peer
->rdev
;
230 rio_mport_send_doorbell(mport
, sid
,
231 RIONET_DOORBELL_JOIN
);
233 } else if (info
== RIONET_DOORBELL_LEAVE
) {
234 rionet_active
[sid
] = NULL
;
236 if (netif_msg_intr(rnet
))
237 printk(KERN_WARNING
"%s: unhandled doorbell\n",
242 static void rionet_inb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
245 struct net_device
*ndev
= dev_id
;
246 struct rionet_private
*rnet
= (struct rionet_private
*)ndev
->priv
;
248 if (netif_msg_intr(rnet
))
249 printk(KERN_INFO
"%s: inbound message event, mbox %d slot %d\n",
250 DRV_NAME
, mbox
, slot
);
252 spin_lock(&rnet
->lock
);
253 if ((n
= rionet_rx_clean(ndev
)) != rnet
->rx_slot
)
254 rionet_rx_fill(ndev
, n
);
255 spin_unlock(&rnet
->lock
);
258 static void rionet_outb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
260 struct net_device
*ndev
= dev_id
;
261 struct rionet_private
*rnet
= ndev
->priv
;
263 spin_lock(&rnet
->lock
);
265 if (netif_msg_intr(rnet
))
267 "%s: outbound message event, mbox %d slot %d\n",
268 DRV_NAME
, mbox
, slot
);
270 while (rnet
->tx_cnt
&& (rnet
->ack_slot
!= slot
)) {
271 /* dma unmap single */
272 dev_kfree_skb_irq(rnet
->tx_skb
[rnet
->ack_slot
]);
273 rnet
->tx_skb
[rnet
->ack_slot
] = NULL
;
275 rnet
->ack_slot
&= (RIONET_TX_RING_SIZE
- 1);
279 if (rnet
->tx_cnt
< RIONET_TX_RING_SIZE
)
280 netif_wake_queue(ndev
);
282 spin_unlock(&rnet
->lock
);
285 static int rionet_open(struct net_device
*ndev
)
288 struct rionet_peer
*peer
, *tmp
;
290 struct rionet_private
*rnet
= ndev
->priv
;
292 if (netif_msg_ifup(rnet
))
293 printk(KERN_INFO
"%s: open\n", DRV_NAME
);
295 if ((rc
= rio_request_inb_dbell(rnet
->mport
,
297 RIONET_DOORBELL_JOIN
,
298 RIONET_DOORBELL_LEAVE
,
299 rionet_dbell_event
)) < 0)
302 if ((rc
= rio_request_inb_mbox(rnet
->mport
,
306 rionet_inb_msg_event
)) < 0)
309 if ((rc
= rio_request_outb_mbox(rnet
->mport
,
313 rionet_outb_msg_event
)) < 0)
316 /* Initialize inbound message ring */
317 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
318 rnet
->rx_skb
[i
] = NULL
;
320 rionet_rx_fill(ndev
, 0);
326 netif_carrier_on(ndev
);
327 netif_start_queue(ndev
);
329 list_for_each_entry_safe(peer
, tmp
, &rionet_peers
, node
) {
330 if (!(peer
->res
= rio_request_outb_dbell(peer
->rdev
,
331 RIONET_DOORBELL_JOIN
,
332 RIONET_DOORBELL_LEAVE
)))
334 printk(KERN_ERR
"%s: error requesting doorbells\n",
340 * If device has initialized inbound doorbells,
341 * send a join message
343 rio_read_config_32(peer
->rdev
, RIO_WRITE_PORT_CSR
, &pwdcsr
);
344 if (pwdcsr
& RIO_DOORBELL_AVAIL
)
345 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_JOIN
);
352 static int rionet_close(struct net_device
*ndev
)
354 struct rionet_private
*rnet
= (struct rionet_private
*)ndev
->priv
;
355 struct rionet_peer
*peer
, *tmp
;
358 if (netif_msg_ifup(rnet
))
359 printk(KERN_INFO
"%s: close\n", DRV_NAME
);
361 netif_stop_queue(ndev
);
362 netif_carrier_off(ndev
);
364 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
366 kfree_skb(rnet
->rx_skb
[i
]);
368 list_for_each_entry_safe(peer
, tmp
, &rionet_peers
, node
) {
369 if (rionet_active
[peer
->rdev
->destid
]) {
370 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_LEAVE
);
371 rionet_active
[peer
->rdev
->destid
] = NULL
;
373 rio_release_outb_dbell(peer
->rdev
, peer
->res
);
376 rio_release_inb_dbell(rnet
->mport
, RIONET_DOORBELL_JOIN
,
377 RIONET_DOORBELL_LEAVE
);
378 rio_release_inb_mbox(rnet
->mport
, RIONET_MAILBOX
);
379 rio_release_outb_mbox(rnet
->mport
, RIONET_MAILBOX
);
384 static void rionet_remove(struct rio_dev
*rdev
)
386 struct net_device
*ndev
= NULL
;
387 struct rionet_peer
*peer
, *tmp
;
389 free_pages((unsigned long)rionet_active
, rdev
->net
->hport
->sys_size
?
390 __ilog2(sizeof(void *)) + 4 : 0);
391 unregister_netdev(ndev
);
394 list_for_each_entry_safe(peer
, tmp
, &rionet_peers
, node
) {
395 list_del(&peer
->node
);
400 static void rionet_get_drvinfo(struct net_device
*ndev
,
401 struct ethtool_drvinfo
*info
)
403 struct rionet_private
*rnet
= ndev
->priv
;
405 strcpy(info
->driver
, DRV_NAME
);
406 strcpy(info
->version
, DRV_VERSION
);
407 strcpy(info
->fw_version
, "n/a");
408 strcpy(info
->bus_info
, rnet
->mport
->name
);
411 static u32
rionet_get_msglevel(struct net_device
*ndev
)
413 struct rionet_private
*rnet
= ndev
->priv
;
415 return rnet
->msg_enable
;
418 static void rionet_set_msglevel(struct net_device
*ndev
, u32 value
)
420 struct rionet_private
*rnet
= ndev
->priv
;
422 rnet
->msg_enable
= value
;
425 static const struct ethtool_ops rionet_ethtool_ops
= {
426 .get_drvinfo
= rionet_get_drvinfo
,
427 .get_msglevel
= rionet_get_msglevel
,
428 .set_msglevel
= rionet_set_msglevel
,
429 .get_link
= ethtool_op_get_link
,
432 static int rionet_setup_netdev(struct rio_mport
*mport
)
435 struct net_device
*ndev
= NULL
;
436 struct rionet_private
*rnet
;
438 DECLARE_MAC_BUF(mac
);
440 /* Allocate our net_device structure */
441 ndev
= alloc_etherdev(sizeof(struct rionet_private
));
443 printk(KERN_INFO
"%s: could not allocate ethernet device.\n",
449 rionet_active
= (struct rio_dev
**)__get_free_pages(GFP_KERNEL
,
450 mport
->sys_size
? __ilog2(sizeof(void *)) + 4 : 0);
451 if (!rionet_active
) {
455 memset((void *)rionet_active
, 0, sizeof(void *) *
456 RIO_MAX_ROUTE_ENTRIES(mport
->sys_size
));
458 /* Set up private area */
459 rnet
= (struct rionet_private
*)ndev
->priv
;
462 /* Set the default MAC address */
463 device_id
= rio_local_get_device_id(mport
);
464 ndev
->dev_addr
[0] = 0x00;
465 ndev
->dev_addr
[1] = 0x01;
466 ndev
->dev_addr
[2] = 0x00;
467 ndev
->dev_addr
[3] = 0x01;
468 ndev
->dev_addr
[4] = device_id
>> 8;
469 ndev
->dev_addr
[5] = device_id
& 0xff;
471 /* Fill in the driver function table */
472 ndev
->open
= &rionet_open
;
473 ndev
->hard_start_xmit
= &rionet_start_xmit
;
474 ndev
->stop
= &rionet_close
;
475 ndev
->mtu
= RIO_MAX_MSG_SIZE
- 14;
476 ndev
->features
= NETIF_F_LLTX
;
477 SET_ETHTOOL_OPS(ndev
, &rionet_ethtool_ops
);
479 spin_lock_init(&rnet
->lock
);
480 spin_lock_init(&rnet
->tx_lock
);
482 rnet
->msg_enable
= RIONET_DEFAULT_MSGLEVEL
;
484 rc
= register_netdev(ndev
);
488 printk("%s: %s %s Version %s, MAC %s\n",
493 print_mac(mac
, ndev
->dev_addr
));
500 * XXX Make multi-net safe
502 static int rionet_probe(struct rio_dev
*rdev
, const struct rio_device_id
*id
)
505 u32 lpef
, lsrc_ops
, ldst_ops
;
506 struct rionet_peer
*peer
;
508 /* If local device is not rionet capable, give up quickly */
513 * First time through, make sure local device is rionet
514 * capable, setup netdev, and set flags so this is skipped
518 rio_local_read_config_32(rdev
->net
->hport
, RIO_PEF_CAR
, &lpef
);
519 rio_local_read_config_32(rdev
->net
->hport
, RIO_SRC_OPS_CAR
,
521 rio_local_read_config_32(rdev
->net
->hport
, RIO_DST_OPS_CAR
,
523 if (!is_rionet_capable(lpef
, lsrc_ops
, ldst_ops
)) {
525 "%s: local device is not network capable\n",
532 rc
= rionet_setup_netdev(rdev
->net
->hport
);
537 * If the remote device has mailbox/doorbell capabilities,
538 * add it to the peer list.
540 if (dev_rionet_capable(rdev
)) {
541 if (!(peer
= kmalloc(sizeof(struct rionet_peer
), GFP_KERNEL
))) {
546 list_add_tail(&peer
->node
, &rionet_peers
);
553 static struct rio_device_id rionet_id_table
[] = {
554 {RIO_DEVICE(RIO_ANY_ID
, RIO_ANY_ID
)}
557 static struct rio_driver rionet_driver
= {
559 .id_table
= rionet_id_table
,
560 .probe
= rionet_probe
,
561 .remove
= rionet_remove
,
564 static int __init
rionet_init(void)
566 return rio_register_driver(&rionet_driver
);
569 static void __exit
rionet_exit(void)
571 rio_unregister_driver(&rionet_driver
);
574 module_init(rionet_init
);
575 module_exit(rionet_exit
);