2 * rionet - Ethernet driver over RapidIO messaging services
4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/delay.h>
17 #include <linux/rio.h>
18 #include <linux/rio_drv.h>
19 #include <linux/slab.h>
20 #include <linux/rio_ids.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/crc32.h>
26 #include <linux/ethtool.h>
28 #define DRV_NAME "rionet"
29 #define DRV_VERSION "0.2"
30 #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
31 #define DRV_DESC "Ethernet over RapidIO"
33 MODULE_AUTHOR(DRV_AUTHOR
);
34 MODULE_DESCRIPTION(DRV_DESC
);
35 MODULE_LICENSE("GPL");
37 #define RIONET_DEFAULT_MSGLEVEL \
43 #define RIONET_DOORBELL_JOIN 0x1000
44 #define RIONET_DOORBELL_LEAVE 0x1001
46 #define RIONET_MAILBOX 0
48 #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
49 #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
51 static LIST_HEAD(rionet_peers
);
53 struct rionet_private
{
54 struct rio_mport
*mport
;
55 struct sk_buff
*rx_skb
[RIONET_RX_RING_SIZE
];
56 struct sk_buff
*tx_skb
[RIONET_TX_RING_SIZE
];
67 struct list_head node
;
72 static int rionet_check
= 0;
73 static int rionet_capable
= 1;
76 * This is a fast lookup table for translating TX
77 * Ethernet packets into a destination RIO device. It
78 * could be made into a hash table to save memory depending
79 * on system trade-offs.
81 static struct rio_dev
**rionet_active
;
83 #define is_rionet_capable(pef, src_ops, dst_ops) \
84 ((pef & RIO_PEF_INB_MBOX) && \
85 (pef & RIO_PEF_INB_DOORBELL) && \
86 (src_ops & RIO_SRC_OPS_DOORBELL) && \
87 (dst_ops & RIO_DST_OPS_DOORBELL))
88 #define dev_rionet_capable(dev) \
89 is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops)
91 #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
92 #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
94 static int rionet_rx_clean(struct net_device
*ndev
)
98 struct rionet_private
*rnet
= netdev_priv(ndev
);
104 if (!rnet
->rx_skb
[i
])
107 if (!(data
= rio_get_inb_message(rnet
->mport
, RIONET_MAILBOX
)))
110 rnet
->rx_skb
[i
]->data
= data
;
111 skb_put(rnet
->rx_skb
[i
], RIO_MAX_MSG_SIZE
);
112 rnet
->rx_skb
[i
]->protocol
=
113 eth_type_trans(rnet
->rx_skb
[i
], ndev
);
114 error
= netif_rx(rnet
->rx_skb
[i
]);
116 if (error
== NET_RX_DROP
) {
117 ndev
->stats
.rx_dropped
++;
119 ndev
->stats
.rx_packets
++;
120 ndev
->stats
.rx_bytes
+= RIO_MAX_MSG_SIZE
;
123 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != rnet
->rx_slot
);
128 static void rionet_rx_fill(struct net_device
*ndev
, int end
)
131 struct rionet_private
*rnet
= netdev_priv(ndev
);
135 rnet
->rx_skb
[i
] = dev_alloc_skb(RIO_MAX_MSG_SIZE
);
137 if (!rnet
->rx_skb
[i
])
140 rio_add_inb_buffer(rnet
->mport
, RIONET_MAILBOX
,
141 rnet
->rx_skb
[i
]->data
);
142 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != end
);
147 static int rionet_queue_tx_msg(struct sk_buff
*skb
, struct net_device
*ndev
,
148 struct rio_dev
*rdev
)
150 struct rionet_private
*rnet
= netdev_priv(ndev
);
152 rio_add_outb_message(rnet
->mport
, rdev
, 0, skb
->data
, skb
->len
);
153 rnet
->tx_skb
[rnet
->tx_slot
] = skb
;
155 ndev
->stats
.tx_packets
++;
156 ndev
->stats
.tx_bytes
+= skb
->len
;
158 if (++rnet
->tx_cnt
== RIONET_TX_RING_SIZE
)
159 netif_stop_queue(ndev
);
162 rnet
->tx_slot
&= (RIONET_TX_RING_SIZE
- 1);
164 if (netif_msg_tx_queued(rnet
))
165 printk(KERN_INFO
"%s: queued skb %8.8x len %8.8x\n", DRV_NAME
,
166 (u32
) skb
, skb
->len
);
171 static int rionet_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
174 struct rionet_private
*rnet
= netdev_priv(ndev
);
175 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
179 local_irq_save(flags
);
180 if (!spin_trylock(&rnet
->tx_lock
)) {
181 local_irq_restore(flags
);
182 return NETDEV_TX_LOCKED
;
185 if ((rnet
->tx_cnt
+ 1) > RIONET_TX_RING_SIZE
) {
186 netif_stop_queue(ndev
);
187 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
188 printk(KERN_ERR
"%s: BUG! Tx Ring full when queue awake!\n",
190 return NETDEV_TX_BUSY
;
193 if (eth
->h_dest
[0] & 0x01) {
194 for (i
= 0; i
< RIO_MAX_ROUTE_ENTRIES(rnet
->mport
->sys_size
);
196 if (rionet_active
[i
])
197 rionet_queue_tx_msg(skb
, ndev
,
199 } else if (RIONET_MAC_MATCH(eth
->h_dest
)) {
200 destid
= RIONET_GET_DESTID(eth
->h_dest
);
201 if (rionet_active
[destid
])
202 rionet_queue_tx_msg(skb
, ndev
, rionet_active
[destid
]);
205 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
210 static void rionet_dbell_event(struct rio_mport
*mport
, void *dev_id
, u16 sid
, u16 tid
,
213 struct net_device
*ndev
= dev_id
;
214 struct rionet_private
*rnet
= netdev_priv(ndev
);
215 struct rionet_peer
*peer
;
217 if (netif_msg_intr(rnet
))
218 printk(KERN_INFO
"%s: doorbell sid %4.4x tid %4.4x info %4.4x",
219 DRV_NAME
, sid
, tid
, info
);
220 if (info
== RIONET_DOORBELL_JOIN
) {
221 if (!rionet_active
[sid
]) {
222 list_for_each_entry(peer
, &rionet_peers
, node
) {
223 if (peer
->rdev
->destid
== sid
)
224 rionet_active
[sid
] = peer
->rdev
;
226 rio_mport_send_doorbell(mport
, sid
,
227 RIONET_DOORBELL_JOIN
);
229 } else if (info
== RIONET_DOORBELL_LEAVE
) {
230 rionet_active
[sid
] = NULL
;
232 if (netif_msg_intr(rnet
))
233 printk(KERN_WARNING
"%s: unhandled doorbell\n",
238 static void rionet_inb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
241 struct net_device
*ndev
= dev_id
;
242 struct rionet_private
*rnet
= netdev_priv(ndev
);
244 if (netif_msg_intr(rnet
))
245 printk(KERN_INFO
"%s: inbound message event, mbox %d slot %d\n",
246 DRV_NAME
, mbox
, slot
);
248 spin_lock(&rnet
->lock
);
249 if ((n
= rionet_rx_clean(ndev
)) != rnet
->rx_slot
)
250 rionet_rx_fill(ndev
, n
);
251 spin_unlock(&rnet
->lock
);
254 static void rionet_outb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
256 struct net_device
*ndev
= dev_id
;
257 struct rionet_private
*rnet
= netdev_priv(ndev
);
259 spin_lock(&rnet
->lock
);
261 if (netif_msg_intr(rnet
))
263 "%s: outbound message event, mbox %d slot %d\n",
264 DRV_NAME
, mbox
, slot
);
266 while (rnet
->tx_cnt
&& (rnet
->ack_slot
!= slot
)) {
267 /* dma unmap single */
268 dev_kfree_skb_irq(rnet
->tx_skb
[rnet
->ack_slot
]);
269 rnet
->tx_skb
[rnet
->ack_slot
] = NULL
;
271 rnet
->ack_slot
&= (RIONET_TX_RING_SIZE
- 1);
275 if (rnet
->tx_cnt
< RIONET_TX_RING_SIZE
)
276 netif_wake_queue(ndev
);
278 spin_unlock(&rnet
->lock
);
281 static int rionet_open(struct net_device
*ndev
)
284 struct rionet_peer
*peer
, *tmp
;
286 struct rionet_private
*rnet
= netdev_priv(ndev
);
288 if (netif_msg_ifup(rnet
))
289 printk(KERN_INFO
"%s: open\n", DRV_NAME
);
291 if ((rc
= rio_request_inb_dbell(rnet
->mport
,
293 RIONET_DOORBELL_JOIN
,
294 RIONET_DOORBELL_LEAVE
,
295 rionet_dbell_event
)) < 0)
298 if ((rc
= rio_request_inb_mbox(rnet
->mport
,
302 rionet_inb_msg_event
)) < 0)
305 if ((rc
= rio_request_outb_mbox(rnet
->mport
,
309 rionet_outb_msg_event
)) < 0)
312 /* Initialize inbound message ring */
313 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
314 rnet
->rx_skb
[i
] = NULL
;
316 rionet_rx_fill(ndev
, 0);
322 netif_carrier_on(ndev
);
323 netif_start_queue(ndev
);
325 list_for_each_entry_safe(peer
, tmp
, &rionet_peers
, node
) {
326 if (!(peer
->res
= rio_request_outb_dbell(peer
->rdev
,
327 RIONET_DOORBELL_JOIN
,
328 RIONET_DOORBELL_LEAVE
)))
330 printk(KERN_ERR
"%s: error requesting doorbells\n",
336 * If device has initialized inbound doorbells,
337 * send a join message
339 rio_read_config_32(peer
->rdev
, RIO_WRITE_PORT_CSR
, &pwdcsr
);
340 if (pwdcsr
& RIO_DOORBELL_AVAIL
)
341 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_JOIN
);
348 static int rionet_close(struct net_device
*ndev
)
350 struct rionet_private
*rnet
= netdev_priv(ndev
);
351 struct rionet_peer
*peer
, *tmp
;
354 if (netif_msg_ifup(rnet
))
355 printk(KERN_INFO
"%s: close\n", DRV_NAME
);
357 netif_stop_queue(ndev
);
358 netif_carrier_off(ndev
);
360 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
361 kfree_skb(rnet
->rx_skb
[i
]);
363 list_for_each_entry_safe(peer
, tmp
, &rionet_peers
, node
) {
364 if (rionet_active
[peer
->rdev
->destid
]) {
365 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_LEAVE
);
366 rionet_active
[peer
->rdev
->destid
] = NULL
;
368 rio_release_outb_dbell(peer
->rdev
, peer
->res
);
371 rio_release_inb_dbell(rnet
->mport
, RIONET_DOORBELL_JOIN
,
372 RIONET_DOORBELL_LEAVE
);
373 rio_release_inb_mbox(rnet
->mport
, RIONET_MAILBOX
);
374 rio_release_outb_mbox(rnet
->mport
, RIONET_MAILBOX
);
379 static void rionet_remove(struct rio_dev
*rdev
)
381 struct net_device
*ndev
= NULL
;
382 struct rionet_peer
*peer
, *tmp
;
384 free_pages((unsigned long)rionet_active
, rdev
->net
->hport
->sys_size
?
385 __ilog2(sizeof(void *)) + 4 : 0);
386 unregister_netdev(ndev
);
389 list_for_each_entry_safe(peer
, tmp
, &rionet_peers
, node
) {
390 list_del(&peer
->node
);
395 static void rionet_get_drvinfo(struct net_device
*ndev
,
396 struct ethtool_drvinfo
*info
)
398 struct rionet_private
*rnet
= netdev_priv(ndev
);
400 strcpy(info
->driver
, DRV_NAME
);
401 strcpy(info
->version
, DRV_VERSION
);
402 strcpy(info
->fw_version
, "n/a");
403 strcpy(info
->bus_info
, rnet
->mport
->name
);
406 static u32
rionet_get_msglevel(struct net_device
*ndev
)
408 struct rionet_private
*rnet
= netdev_priv(ndev
);
410 return rnet
->msg_enable
;
413 static void rionet_set_msglevel(struct net_device
*ndev
, u32 value
)
415 struct rionet_private
*rnet
= netdev_priv(ndev
);
417 rnet
->msg_enable
= value
;
420 static const struct ethtool_ops rionet_ethtool_ops
= {
421 .get_drvinfo
= rionet_get_drvinfo
,
422 .get_msglevel
= rionet_get_msglevel
,
423 .set_msglevel
= rionet_set_msglevel
,
424 .get_link
= ethtool_op_get_link
,
427 static const struct net_device_ops rionet_netdev_ops
= {
428 .ndo_open
= rionet_open
,
429 .ndo_stop
= rionet_close
,
430 .ndo_start_xmit
= rionet_start_xmit
,
431 .ndo_change_mtu
= eth_change_mtu
,
432 .ndo_validate_addr
= eth_validate_addr
,
433 .ndo_set_mac_address
= eth_mac_addr
,
436 static int rionet_setup_netdev(struct rio_mport
*mport
)
439 struct net_device
*ndev
= NULL
;
440 struct rionet_private
*rnet
;
443 /* Allocate our net_device structure */
444 ndev
= alloc_etherdev(sizeof(struct rionet_private
));
446 printk(KERN_INFO
"%s: could not allocate ethernet device.\n",
452 rionet_active
= (struct rio_dev
**)__get_free_pages(GFP_KERNEL
,
453 mport
->sys_size
? __ilog2(sizeof(void *)) + 4 : 0);
454 if (!rionet_active
) {
458 memset((void *)rionet_active
, 0, sizeof(void *) *
459 RIO_MAX_ROUTE_ENTRIES(mport
->sys_size
));
461 /* Set up private area */
462 rnet
= netdev_priv(ndev
);
465 /* Set the default MAC address */
466 device_id
= rio_local_get_device_id(mport
);
467 ndev
->dev_addr
[0] = 0x00;
468 ndev
->dev_addr
[1] = 0x01;
469 ndev
->dev_addr
[2] = 0x00;
470 ndev
->dev_addr
[3] = 0x01;
471 ndev
->dev_addr
[4] = device_id
>> 8;
472 ndev
->dev_addr
[5] = device_id
& 0xff;
474 ndev
->netdev_ops
= &rionet_netdev_ops
;
475 ndev
->mtu
= RIO_MAX_MSG_SIZE
- 14;
476 ndev
->features
= NETIF_F_LLTX
;
477 SET_ETHTOOL_OPS(ndev
, &rionet_ethtool_ops
);
479 spin_lock_init(&rnet
->lock
);
480 spin_lock_init(&rnet
->tx_lock
);
482 rnet
->msg_enable
= RIONET_DEFAULT_MSGLEVEL
;
484 rc
= register_netdev(ndev
);
488 printk("%s: %s %s Version %s, MAC %pM\n",
500 * XXX Make multi-net safe
502 static int rionet_probe(struct rio_dev
*rdev
, const struct rio_device_id
*id
)
505 u32 lpef
, lsrc_ops
, ldst_ops
;
506 struct rionet_peer
*peer
;
508 /* If local device is not rionet capable, give up quickly */
513 * First time through, make sure local device is rionet
514 * capable, setup netdev, and set flags so this is skipped
518 rio_local_read_config_32(rdev
->net
->hport
, RIO_PEF_CAR
, &lpef
);
519 rio_local_read_config_32(rdev
->net
->hport
, RIO_SRC_OPS_CAR
,
521 rio_local_read_config_32(rdev
->net
->hport
, RIO_DST_OPS_CAR
,
523 if (!is_rionet_capable(lpef
, lsrc_ops
, ldst_ops
)) {
525 "%s: local device is not network capable\n",
532 rc
= rionet_setup_netdev(rdev
->net
->hport
);
537 * If the remote device has mailbox/doorbell capabilities,
538 * add it to the peer list.
540 if (dev_rionet_capable(rdev
)) {
541 if (!(peer
= kmalloc(sizeof(struct rionet_peer
), GFP_KERNEL
))) {
546 list_add_tail(&peer
->node
, &rionet_peers
);
553 static struct rio_device_id rionet_id_table
[] = {
554 {RIO_DEVICE(RIO_ANY_ID
, RIO_ANY_ID
)}
557 static struct rio_driver rionet_driver
= {
559 .id_table
= rionet_id_table
,
560 .probe
= rionet_probe
,
561 .remove
= rionet_remove
,
564 static int __init
rionet_init(void)
566 return rio_register_driver(&rionet_driver
);
569 static void __exit
rionet_exit(void)
571 rio_unregister_driver(&rionet_driver
);
574 module_init(rionet_init
);
575 module_exit(rionet_exit
);