1 /* sunvnet.c: Sun LDOM Virtual Network Driver.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/etherdevice.h>
15 #include <linux/mutex.h>
22 #define DRV_MODULE_NAME "sunvnet"
23 #define PFX DRV_MODULE_NAME ": "
24 #define DRV_MODULE_VERSION "1.0"
25 #define DRV_MODULE_RELDATE "June 25, 2007"
27 static char version
[] __devinitdata
=
28 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
29 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
30 MODULE_DESCRIPTION("Sun LDOM virtual network driver");
31 MODULE_LICENSE("GPL");
32 MODULE_VERSION(DRV_MODULE_VERSION
);
34 /* Ordered from largest major to lowest */
35 static struct vio_version vnet_versions
[] = {
36 { .major
= 1, .minor
= 0 },
39 static inline u32
vnet_tx_dring_avail(struct vio_dring_state
*dr
)
41 return vio_dring_avail(dr
, VNET_TX_RING_SIZE
);
44 static int vnet_handle_unknown(struct vnet_port
*port
, void *arg
)
46 struct vio_msg_tag
*pkt
= arg
;
48 printk(KERN_ERR PFX
"Received unknown msg [%02x:%02x:%04x:%08x]\n",
49 pkt
->type
, pkt
->stype
, pkt
->stype_env
, pkt
->sid
);
50 printk(KERN_ERR PFX
"Resetting connection.\n");
52 ldc_disconnect(port
->vio
.lp
);
57 static int vnet_send_attr(struct vio_driver_state
*vio
)
59 struct vnet_port
*port
= to_vnet_port(vio
);
60 struct net_device
*dev
= port
->vp
->dev
;
61 struct vio_net_attr_info pkt
;
64 memset(&pkt
, 0, sizeof(pkt
));
65 pkt
.tag
.type
= VIO_TYPE_CTRL
;
66 pkt
.tag
.stype
= VIO_SUBTYPE_INFO
;
67 pkt
.tag
.stype_env
= VIO_ATTR_INFO
;
68 pkt
.tag
.sid
= vio_send_sid(vio
);
69 pkt
.xfer_mode
= VIO_DRING_MODE
;
70 pkt
.addr_type
= VNET_ADDR_ETHERMAC
;
72 for (i
= 0; i
< 6; i
++)
73 pkt
.addr
|= (u64
)dev
->dev_addr
[i
] << ((5 - i
) * 8);
74 pkt
.mtu
= ETH_FRAME_LEN
;
76 viodbg(HS
, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
77 "ackfreq[%u] mtu[%llu]\n",
78 pkt
.xfer_mode
, pkt
.addr_type
,
79 (unsigned long long) pkt
.addr
,
81 (unsigned long long) pkt
.mtu
);
83 return vio_ldc_send(vio
, &pkt
, sizeof(pkt
));
86 static int handle_attr_info(struct vio_driver_state
*vio
,
87 struct vio_net_attr_info
*pkt
)
89 viodbg(HS
, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
90 "ackfreq[%u] mtu[%llu]\n",
91 pkt
->xfer_mode
, pkt
->addr_type
,
92 (unsigned long long) pkt
->addr
,
94 (unsigned long long) pkt
->mtu
);
96 pkt
->tag
.sid
= vio_send_sid(vio
);
98 if (pkt
->xfer_mode
!= VIO_DRING_MODE
||
99 pkt
->addr_type
!= VNET_ADDR_ETHERMAC
||
100 pkt
->mtu
!= ETH_FRAME_LEN
) {
101 viodbg(HS
, "SEND NET ATTR NACK\n");
103 pkt
->tag
.stype
= VIO_SUBTYPE_NACK
;
105 (void) vio_ldc_send(vio
, pkt
, sizeof(*pkt
));
109 viodbg(HS
, "SEND NET ATTR ACK\n");
111 pkt
->tag
.stype
= VIO_SUBTYPE_ACK
;
113 return vio_ldc_send(vio
, pkt
, sizeof(*pkt
));
118 static int handle_attr_ack(struct vio_driver_state
*vio
,
119 struct vio_net_attr_info
*pkt
)
121 viodbg(HS
, "GOT NET ATTR ACK\n");
126 static int handle_attr_nack(struct vio_driver_state
*vio
,
127 struct vio_net_attr_info
*pkt
)
129 viodbg(HS
, "GOT NET ATTR NACK\n");
134 static int vnet_handle_attr(struct vio_driver_state
*vio
, void *arg
)
136 struct vio_net_attr_info
*pkt
= arg
;
138 switch (pkt
->tag
.stype
) {
139 case VIO_SUBTYPE_INFO
:
140 return handle_attr_info(vio
, pkt
);
142 case VIO_SUBTYPE_ACK
:
143 return handle_attr_ack(vio
, pkt
);
145 case VIO_SUBTYPE_NACK
:
146 return handle_attr_nack(vio
, pkt
);
153 static void vnet_handshake_complete(struct vio_driver_state
*vio
)
155 struct vio_dring_state
*dr
;
157 dr
= &vio
->drings
[VIO_DRIVER_RX_RING
];
158 dr
->snd_nxt
= dr
->rcv_nxt
= 1;
160 dr
= &vio
->drings
[VIO_DRIVER_TX_RING
];
161 dr
->snd_nxt
= dr
->rcv_nxt
= 1;
164 /* The hypervisor interface that implements copying to/from imported
165 * memory from another domain requires that copies are done to 8-byte
166 * aligned buffers, and that the lengths of such copies are also 8-byte
169 * So we align skb->data to an 8-byte multiple and pad-out the data
170 * area so we can round the copy length up to the next multiple of
173 * The transmitter puts the actual start of the packet 6 bytes into
174 * the buffer it sends over, so that the IP headers after the ethernet
175 * header are aligned properly. These 6 bytes are not in the descriptor
176 * length, they are simply implied. This offset is represented using
177 * the VNET_PACKET_SKIP macro.
179 static struct sk_buff
*alloc_and_align_skb(struct net_device
*dev
,
182 struct sk_buff
*skb
= netdev_alloc_skb(dev
, len
+VNET_PACKET_SKIP
+8+8);
183 unsigned long addr
, off
;
188 addr
= (unsigned long) skb
->data
;
189 off
= ((addr
+ 7UL) & ~7UL) - addr
;
191 skb_reserve(skb
, off
);
196 static int vnet_rx_one(struct vnet_port
*port
, unsigned int len
,
197 struct ldc_trans_cookie
*cookies
, int ncookies
)
199 struct net_device
*dev
= port
->vp
->dev
;
200 unsigned int copy_len
;
205 if (unlikely(len
< ETH_ZLEN
|| len
> ETH_FRAME_LEN
)) {
206 dev
->stats
.rx_length_errors
++;
210 skb
= alloc_and_align_skb(dev
, len
);
212 if (unlikely(!skb
)) {
213 dev
->stats
.rx_missed_errors
++;
217 copy_len
= (len
+ VNET_PACKET_SKIP
+ 7U) & ~7U;
218 skb_put(skb
, copy_len
);
219 err
= ldc_copy(port
->vio
.lp
, LDC_COPY_IN
,
220 skb
->data
, copy_len
, 0,
222 if (unlikely(err
< 0)) {
223 dev
->stats
.rx_frame_errors
++;
227 skb_pull(skb
, VNET_PACKET_SKIP
);
229 skb
->protocol
= eth_type_trans(skb
, dev
);
231 dev
->stats
.rx_packets
++;
232 dev
->stats
.rx_bytes
+= len
;
242 dev
->stats
.rx_dropped
++;
246 static int vnet_send_ack(struct vnet_port
*port
, struct vio_dring_state
*dr
,
247 u32 start
, u32 end
, u8 vio_dring_state
)
249 struct vio_dring_data hdr
= {
251 .type
= VIO_TYPE_DATA
,
252 .stype
= VIO_SUBTYPE_ACK
,
253 .stype_env
= VIO_DRING_DATA
,
254 .sid
= vio_send_sid(&port
->vio
),
256 .dring_ident
= dr
->ident
,
259 .state
= vio_dring_state
,
263 hdr
.seq
= dr
->snd_nxt
;
266 err
= vio_ldc_send(&port
->vio
, &hdr
, sizeof(hdr
));
272 if ((delay
<<= 1) > 128)
274 } while (err
== -EAGAIN
);
279 static u32
next_idx(u32 idx
, struct vio_dring_state
*dr
)
281 if (++idx
== dr
->num_entries
)
286 static u32
prev_idx(u32 idx
, struct vio_dring_state
*dr
)
289 idx
= dr
->num_entries
- 1;
296 static struct vio_net_desc
*get_rx_desc(struct vnet_port
*port
,
297 struct vio_dring_state
*dr
,
300 struct vio_net_desc
*desc
= port
->vio
.desc_buf
;
303 err
= ldc_get_dring_entry(port
->vio
.lp
, desc
, dr
->entry_size
,
304 (index
* dr
->entry_size
),
305 dr
->cookies
, dr
->ncookies
);
312 static int put_rx_desc(struct vnet_port
*port
,
313 struct vio_dring_state
*dr
,
314 struct vio_net_desc
*desc
,
319 err
= ldc_put_dring_entry(port
->vio
.lp
, desc
, dr
->entry_size
,
320 (index
* dr
->entry_size
),
321 dr
->cookies
, dr
->ncookies
);
328 static int vnet_walk_rx_one(struct vnet_port
*port
,
329 struct vio_dring_state
*dr
,
330 u32 index
, int *needs_ack
)
332 struct vio_net_desc
*desc
= get_rx_desc(port
, dr
, index
);
333 struct vio_driver_state
*vio
= &port
->vio
;
337 return PTR_ERR(desc
);
339 viodbg(DATA
, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
340 desc
->hdr
.state
, desc
->hdr
.ack
,
341 desc
->size
, desc
->ncookies
,
342 desc
->cookies
[0].cookie_addr
,
343 desc
->cookies
[0].cookie_size
);
345 if (desc
->hdr
.state
!= VIO_DESC_READY
)
347 err
= vnet_rx_one(port
, desc
->size
, desc
->cookies
, desc
->ncookies
);
348 if (err
== -ECONNRESET
)
350 desc
->hdr
.state
= VIO_DESC_DONE
;
351 err
= put_rx_desc(port
, dr
, desc
, index
);
354 *needs_ack
= desc
->hdr
.ack
;
358 static int vnet_walk_rx(struct vnet_port
*port
, struct vio_dring_state
*dr
,
361 struct vio_driver_state
*vio
= &port
->vio
;
362 int ack_start
= -1, ack_end
= -1;
364 end
= (end
== (u32
) -1) ? prev_idx(start
, dr
) : next_idx(end
, dr
);
366 viodbg(DATA
, "vnet_walk_rx start[%08x] end[%08x]\n", start
, end
);
368 while (start
!= end
) {
369 int ack
= 0, err
= vnet_walk_rx_one(port
, dr
, start
, &ack
);
370 if (err
== -ECONNRESET
)
377 start
= next_idx(start
, dr
);
378 if (ack
&& start
!= end
) {
379 err
= vnet_send_ack(port
, dr
, ack_start
, ack_end
,
381 if (err
== -ECONNRESET
)
386 if (unlikely(ack_start
== -1))
387 ack_start
= ack_end
= prev_idx(start
, dr
);
388 return vnet_send_ack(port
, dr
, ack_start
, ack_end
, VIO_DRING_STOPPED
);
391 static int vnet_rx(struct vnet_port
*port
, void *msgbuf
)
393 struct vio_dring_data
*pkt
= msgbuf
;
394 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_RX_RING
];
395 struct vio_driver_state
*vio
= &port
->vio
;
397 viodbg(DATA
, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
398 pkt
->tag
.stype_env
, pkt
->seq
, dr
->rcv_nxt
);
400 if (unlikely(pkt
->tag
.stype_env
!= VIO_DRING_DATA
))
402 if (unlikely(pkt
->seq
!= dr
->rcv_nxt
)) {
403 printk(KERN_ERR PFX
"RX out of sequence seq[0x%llx] "
404 "rcv_nxt[0x%llx]\n", pkt
->seq
, dr
->rcv_nxt
);
410 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
412 return vnet_walk_rx(port
, dr
, pkt
->start_idx
, pkt
->end_idx
);
415 static int idx_is_pending(struct vio_dring_state
*dr
, u32 end
)
420 while (idx
!= dr
->prod
) {
425 idx
= next_idx(idx
, dr
);
430 static int vnet_ack(struct vnet_port
*port
, void *msgbuf
)
432 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
433 struct vio_dring_data
*pkt
= msgbuf
;
434 struct net_device
*dev
;
438 if (unlikely(pkt
->tag
.stype_env
!= VIO_DRING_DATA
))
442 if (unlikely(!idx_is_pending(dr
, end
)))
445 dr
->cons
= next_idx(end
, dr
);
449 if (unlikely(netif_queue_stopped(dev
) &&
450 vnet_tx_dring_avail(dr
) >= VNET_TX_WAKEUP_THRESH(dr
)))
456 static int vnet_nack(struct vnet_port
*port
, void *msgbuf
)
458 /* XXX just reset or similar XXX */
462 static int handle_mcast(struct vnet_port
*port
, void *msgbuf
)
464 struct vio_net_mcast_info
*pkt
= msgbuf
;
466 if (pkt
->tag
.stype
!= VIO_SUBTYPE_ACK
)
467 printk(KERN_ERR PFX
"%s: Got unexpected MCAST reply "
468 "[%02x:%02x:%04x:%08x]\n",
478 static void maybe_tx_wakeup(struct vnet
*vp
)
480 struct net_device
*dev
= vp
->dev
;
483 if (likely(netif_queue_stopped(dev
))) {
484 struct vnet_port
*port
;
487 list_for_each_entry(port
, &vp
->port_list
, list
) {
488 struct vio_dring_state
*dr
;
490 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
491 if (vnet_tx_dring_avail(dr
) <
492 VNET_TX_WAKEUP_THRESH(dr
)) {
498 netif_wake_queue(dev
);
500 netif_tx_unlock(dev
);
503 static void vnet_event(void *arg
, int event
)
505 struct vnet_port
*port
= arg
;
506 struct vio_driver_state
*vio
= &port
->vio
;
510 spin_lock_irqsave(&vio
->lock
, flags
);
512 if (unlikely(event
== LDC_EVENT_RESET
||
513 event
== LDC_EVENT_UP
)) {
514 vio_link_state_change(vio
, event
);
515 spin_unlock_irqrestore(&vio
->lock
, flags
);
517 if (event
== LDC_EVENT_RESET
)
522 if (unlikely(event
!= LDC_EVENT_DATA_READY
)) {
523 printk(KERN_WARNING PFX
"Unexpected LDC event %d\n", event
);
524 spin_unlock_irqrestore(&vio
->lock
, flags
);
531 struct vio_msg_tag tag
;
535 err
= ldc_read(vio
->lp
, &msgbuf
, sizeof(msgbuf
));
536 if (unlikely(err
< 0)) {
537 if (err
== -ECONNRESET
)
543 viodbg(DATA
, "TAG [%02x:%02x:%04x:%08x]\n",
546 msgbuf
.tag
.stype_env
,
548 err
= vio_validate_sid(vio
, &msgbuf
.tag
);
552 if (likely(msgbuf
.tag
.type
== VIO_TYPE_DATA
)) {
553 if (msgbuf
.tag
.stype
== VIO_SUBTYPE_INFO
) {
554 err
= vnet_rx(port
, &msgbuf
);
555 } else if (msgbuf
.tag
.stype
== VIO_SUBTYPE_ACK
) {
556 err
= vnet_ack(port
, &msgbuf
);
559 } else if (msgbuf
.tag
.stype
== VIO_SUBTYPE_NACK
) {
560 err
= vnet_nack(port
, &msgbuf
);
562 } else if (msgbuf
.tag
.type
== VIO_TYPE_CTRL
) {
563 if (msgbuf
.tag
.stype_env
== VNET_MCAST_INFO
)
564 err
= handle_mcast(port
, &msgbuf
);
566 err
= vio_control_pkt_engine(vio
, &msgbuf
);
570 err
= vnet_handle_unknown(port
, &msgbuf
);
572 if (err
== -ECONNRESET
)
575 spin_unlock(&vio
->lock
);
576 if (unlikely(tx_wakeup
&& err
!= -ECONNRESET
))
577 maybe_tx_wakeup(port
->vp
);
578 local_irq_restore(flags
);
581 static int __vnet_tx_trigger(struct vnet_port
*port
)
583 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
584 struct vio_dring_data hdr
= {
586 .type
= VIO_TYPE_DATA
,
587 .stype
= VIO_SUBTYPE_INFO
,
588 .stype_env
= VIO_DRING_DATA
,
589 .sid
= vio_send_sid(&port
->vio
),
591 .dring_ident
= dr
->ident
,
592 .start_idx
= dr
->prod
,
597 hdr
.seq
= dr
->snd_nxt
;
600 err
= vio_ldc_send(&port
->vio
, &hdr
, sizeof(hdr
));
606 if ((delay
<<= 1) > 128)
608 } while (err
== -EAGAIN
);
613 struct vnet_port
*__tx_port_find(struct vnet
*vp
, struct sk_buff
*skb
)
615 unsigned int hash
= vnet_hashfn(skb
->data
);
616 struct hlist_head
*hp
= &vp
->port_hash
[hash
];
617 struct hlist_node
*n
;
618 struct vnet_port
*port
;
620 hlist_for_each_entry(port
, n
, hp
, hash
) {
621 if (!compare_ether_addr(port
->raddr
, skb
->data
))
625 if (!list_empty(&vp
->port_list
))
626 port
= list_entry(vp
->port_list
.next
, struct vnet_port
, list
);
631 struct vnet_port
*tx_port_find(struct vnet
*vp
, struct sk_buff
*skb
)
633 struct vnet_port
*ret
;
636 spin_lock_irqsave(&vp
->lock
, flags
);
637 ret
= __tx_port_find(vp
, skb
);
638 spin_unlock_irqrestore(&vp
->lock
, flags
);
643 static int vnet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
645 struct vnet
*vp
= netdev_priv(dev
);
646 struct vnet_port
*port
= tx_port_find(vp
, skb
);
647 struct vio_dring_state
*dr
;
648 struct vio_net_desc
*d
;
657 spin_lock_irqsave(&port
->vio
.lock
, flags
);
659 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
660 if (unlikely(vnet_tx_dring_avail(dr
) < 2)) {
661 if (!netif_queue_stopped(dev
)) {
662 netif_stop_queue(dev
);
664 /* This is a hard error, log it. */
665 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
666 "queue awake!\n", dev
->name
);
667 dev
->stats
.tx_errors
++;
669 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
670 return NETDEV_TX_BUSY
;
673 d
= vio_dring_cur(dr
);
675 tx_buf
= port
->tx_bufs
[dr
->prod
].buf
;
676 skb_copy_from_linear_data(skb
, tx_buf
+ VNET_PACKET_SKIP
, skb
->len
);
679 if (len
< ETH_ZLEN
) {
681 memset(tx_buf
+VNET_PACKET_SKIP
+skb
->len
, 0, len
- skb
->len
);
684 d
->hdr
.ack
= VIO_ACK_ENABLE
;
686 d
->ncookies
= port
->tx_bufs
[dr
->prod
].ncookies
;
687 for (i
= 0; i
< d
->ncookies
; i
++)
688 d
->cookies
[i
] = port
->tx_bufs
[dr
->prod
].cookies
[i
];
690 /* This has to be a non-SMP write barrier because we are writing
691 * to memory which is shared with the peer LDOM.
695 d
->hdr
.state
= VIO_DESC_READY
;
697 err
= __vnet_tx_trigger(port
);
698 if (unlikely(err
< 0)) {
699 printk(KERN_INFO PFX
"%s: TX trigger error %d\n",
701 d
->hdr
.state
= VIO_DESC_FREE
;
702 dev
->stats
.tx_carrier_errors
++;
703 goto out_dropped_unlock
;
706 dev
->stats
.tx_packets
++;
707 dev
->stats
.tx_bytes
+= skb
->len
;
709 dr
->prod
= (dr
->prod
+ 1) & (VNET_TX_RING_SIZE
- 1);
710 if (unlikely(vnet_tx_dring_avail(dr
) < 2)) {
711 netif_stop_queue(dev
);
712 if (vnet_tx_dring_avail(dr
) > VNET_TX_WAKEUP_THRESH(dr
))
713 netif_wake_queue(dev
);
716 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
720 dev
->trans_start
= jiffies
;
724 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
728 dev
->stats
.tx_dropped
++;
732 static void vnet_tx_timeout(struct net_device
*dev
)
734 /* XXX Implement me XXX */
737 static int vnet_open(struct net_device
*dev
)
739 netif_carrier_on(dev
);
740 netif_start_queue(dev
);
745 static int vnet_close(struct net_device
*dev
)
747 netif_stop_queue(dev
);
748 netif_carrier_off(dev
);
753 static struct vnet_mcast_entry
*__vnet_mc_find(struct vnet
*vp
, u8
*addr
)
755 struct vnet_mcast_entry
*m
;
757 for (m
= vp
->mcast_list
; m
; m
= m
->next
) {
758 if (!memcmp(m
->addr
, addr
, ETH_ALEN
))
764 static void __update_mc_list(struct vnet
*vp
, struct net_device
*dev
)
766 struct dev_addr_list
*p
;
768 for (p
= dev
->mc_list
; p
; p
= p
->next
) {
769 struct vnet_mcast_entry
*m
;
771 m
= __vnet_mc_find(vp
, p
->dmi_addr
);
778 m
= kzalloc(sizeof(*m
), GFP_ATOMIC
);
781 memcpy(m
->addr
, p
->dmi_addr
, ETH_ALEN
);
784 m
->next
= vp
->mcast_list
;
790 static void __send_mc_list(struct vnet
*vp
, struct vnet_port
*port
)
792 struct vio_net_mcast_info info
;
793 struct vnet_mcast_entry
*m
, **pp
;
796 memset(&info
, 0, sizeof(info
));
798 info
.tag
.type
= VIO_TYPE_CTRL
;
799 info
.tag
.stype
= VIO_SUBTYPE_INFO
;
800 info
.tag
.stype_env
= VNET_MCAST_INFO
;
801 info
.tag
.sid
= vio_send_sid(&port
->vio
);
805 for (m
= vp
->mcast_list
; m
; m
= m
->next
) {
809 memcpy(&info
.mcast_addr
[n_addrs
* ETH_ALEN
],
811 if (++n_addrs
== VNET_NUM_MCAST
) {
812 info
.count
= n_addrs
;
814 (void) vio_ldc_send(&port
->vio
, &info
,
820 info
.count
= n_addrs
;
821 (void) vio_ldc_send(&port
->vio
, &info
, sizeof(info
));
827 pp
= &vp
->mcast_list
;
828 while ((m
= *pp
) != NULL
) {
835 memcpy(&info
.mcast_addr
[n_addrs
* ETH_ALEN
],
837 if (++n_addrs
== VNET_NUM_MCAST
) {
838 info
.count
= n_addrs
;
839 (void) vio_ldc_send(&port
->vio
, &info
,
848 info
.count
= n_addrs
;
849 (void) vio_ldc_send(&port
->vio
, &info
, sizeof(info
));
853 static void vnet_set_rx_mode(struct net_device
*dev
)
855 struct vnet
*vp
= netdev_priv(dev
);
856 struct vnet_port
*port
;
859 spin_lock_irqsave(&vp
->lock
, flags
);
860 if (!list_empty(&vp
->port_list
)) {
861 port
= list_entry(vp
->port_list
.next
, struct vnet_port
, list
);
863 if (port
->switch_port
) {
864 __update_mc_list(vp
, dev
);
865 __send_mc_list(vp
, port
);
868 spin_unlock_irqrestore(&vp
->lock
, flags
);
871 static int vnet_change_mtu(struct net_device
*dev
, int new_mtu
)
873 if (new_mtu
!= ETH_DATA_LEN
)
880 static int vnet_set_mac_addr(struct net_device
*dev
, void *p
)
885 static void vnet_get_drvinfo(struct net_device
*dev
,
886 struct ethtool_drvinfo
*info
)
888 strcpy(info
->driver
, DRV_MODULE_NAME
);
889 strcpy(info
->version
, DRV_MODULE_VERSION
);
892 static u32
vnet_get_msglevel(struct net_device
*dev
)
894 struct vnet
*vp
= netdev_priv(dev
);
895 return vp
->msg_enable
;
898 static void vnet_set_msglevel(struct net_device
*dev
, u32 value
)
900 struct vnet
*vp
= netdev_priv(dev
);
901 vp
->msg_enable
= value
;
904 static const struct ethtool_ops vnet_ethtool_ops
= {
905 .get_drvinfo
= vnet_get_drvinfo
,
906 .get_msglevel
= vnet_get_msglevel
,
907 .set_msglevel
= vnet_set_msglevel
,
908 .get_link
= ethtool_op_get_link
,
911 static void vnet_port_free_tx_bufs(struct vnet_port
*port
)
913 struct vio_dring_state
*dr
;
916 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
918 ldc_free_exp_dring(port
->vio
.lp
, dr
->base
,
919 (dr
->entry_size
* dr
->num_entries
),
920 dr
->cookies
, dr
->ncookies
);
928 for (i
= 0; i
< VNET_TX_RING_SIZE
; i
++) {
929 void *buf
= port
->tx_bufs
[i
].buf
;
934 ldc_unmap(port
->vio
.lp
,
935 port
->tx_bufs
[i
].cookies
,
936 port
->tx_bufs
[i
].ncookies
);
939 port
->tx_bufs
[i
].buf
= NULL
;
943 static int __devinit
vnet_port_alloc_tx_bufs(struct vnet_port
*port
)
945 struct vio_dring_state
*dr
;
947 int i
, err
, ncookies
;
950 for (i
= 0; i
< VNET_TX_RING_SIZE
; i
++) {
951 void *buf
= kzalloc(ETH_FRAME_LEN
+ 8, GFP_KERNEL
);
952 int map_len
= (ETH_FRAME_LEN
+ 7) & ~7;
956 printk(KERN_ERR
"TX buffer allocation failure\n");
960 if ((unsigned long)buf
& (8UL - 1)) {
961 printk(KERN_ERR
"TX buffer misaligned\n");
966 err
= ldc_map_single(port
->vio
.lp
, buf
, map_len
,
967 port
->tx_bufs
[i
].cookies
, 2,
975 port
->tx_bufs
[i
].buf
= buf
;
976 port
->tx_bufs
[i
].ncookies
= err
;
979 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
981 len
= (VNET_TX_RING_SIZE
*
982 (sizeof(struct vio_net_desc
) +
983 (sizeof(struct ldc_trans_cookie
) * 2)));
985 ncookies
= VIO_MAX_RING_COOKIES
;
986 dring
= ldc_alloc_exp_dring(port
->vio
.lp
, len
,
987 dr
->cookies
, &ncookies
,
992 err
= PTR_ERR(dring
);
997 dr
->entry_size
= (sizeof(struct vio_net_desc
) +
998 (sizeof(struct ldc_trans_cookie
) * 2));
999 dr
->num_entries
= VNET_TX_RING_SIZE
;
1000 dr
->prod
= dr
->cons
= 0;
1001 dr
->pending
= VNET_TX_RING_SIZE
;
1002 dr
->ncookies
= ncookies
;
1007 vnet_port_free_tx_bufs(port
);
1012 static LIST_HEAD(vnet_list
);
1013 static DEFINE_MUTEX(vnet_list_mutex
);
1015 static const struct net_device_ops vnet_ops
= {
1016 .ndo_open
= vnet_open
,
1017 .ndo_stop
= vnet_close
,
1018 .ndo_set_multicast_list
= vnet_set_rx_mode
,
1019 .ndo_change_mtu
= eth_change_mtu
,
1020 .ndo_set_mac_address
= vnet_set_mac_addr
,
1021 .ndo_validate_addr
= eth_validate_addr
,
1022 .ndo_tx_timeout
= vnet_tx_timeout
,
1023 .ndo_change_mtu
= vnet_change_mtu
,
1024 .ndo_start_xmit
= vnet_start_xmit
,
1027 static struct vnet
* __devinit
vnet_new(const u64
*local_mac
)
1029 struct net_device
*dev
;
1033 dev
= alloc_etherdev(sizeof(*vp
));
1035 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
1036 return ERR_PTR(-ENOMEM
);
1039 for (i
= 0; i
< ETH_ALEN
; i
++)
1040 dev
->dev_addr
[i
] = (*local_mac
>> (5 - i
) * 8) & 0xff;
1042 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
1044 vp
= netdev_priv(dev
);
1046 spin_lock_init(&vp
->lock
);
1049 INIT_LIST_HEAD(&vp
->port_list
);
1050 for (i
= 0; i
< VNET_PORT_HASH_SIZE
; i
++)
1051 INIT_HLIST_HEAD(&vp
->port_hash
[i
]);
1052 INIT_LIST_HEAD(&vp
->list
);
1053 vp
->local_mac
= *local_mac
;
1055 dev
->netdev_ops
= &vnet_ops
;
1056 dev
->ethtool_ops
= &vnet_ethtool_ops
;
1057 dev
->watchdog_timeo
= VNET_TX_TIMEOUT
;
1059 err
= register_netdev(dev
);
1061 printk(KERN_ERR PFX
"Cannot register net device, "
1063 goto err_out_free_dev
;
1066 printk(KERN_INFO
"%s: Sun LDOM vnet ", dev
->name
);
1068 for (i
= 0; i
< 6; i
++)
1069 printk("%2.2x%c", dev
->dev_addr
[i
], i
== 5 ? '\n' : ':');
1071 list_add(&vp
->list
, &vnet_list
);
1078 return ERR_PTR(err
);
1081 static struct vnet
* __devinit
vnet_find_or_create(const u64
*local_mac
)
1083 struct vnet
*iter
, *vp
;
1085 mutex_lock(&vnet_list_mutex
);
1087 list_for_each_entry(iter
, &vnet_list
, list
) {
1088 if (iter
->local_mac
== *local_mac
) {
1094 vp
= vnet_new(local_mac
);
1095 mutex_unlock(&vnet_list_mutex
);
1100 static const char *local_mac_prop
= "local-mac-address";
1102 static struct vnet
* __devinit
vnet_find_parent(struct mdesc_handle
*hp
,
1105 const u64
*local_mac
= NULL
;
1108 mdesc_for_each_arc(a
, hp
, port_node
, MDESC_ARC_TYPE_BACK
) {
1109 u64 target
= mdesc_arc_target(hp
, a
);
1112 name
= mdesc_get_property(hp
, target
, "name", NULL
);
1113 if (!name
|| strcmp(name
, "network"))
1116 local_mac
= mdesc_get_property(hp
, target
,
1117 local_mac_prop
, NULL
);
1122 return ERR_PTR(-ENODEV
);
1124 return vnet_find_or_create(local_mac
);
1127 static struct ldc_channel_config vnet_ldc_cfg
= {
1128 .event
= vnet_event
,
1130 .mode
= LDC_MODE_UNRELIABLE
,
1133 static struct vio_driver_ops vnet_vio_ops
= {
1134 .send_attr
= vnet_send_attr
,
1135 .handle_attr
= vnet_handle_attr
,
1136 .handshake_complete
= vnet_handshake_complete
,
1139 static void __devinit
print_version(void)
1141 static int version_printed
;
1143 if (version_printed
++ == 0)
1144 printk(KERN_INFO
"%s", version
);
1147 const char *remote_macaddr_prop
= "remote-mac-address";
1149 static int __devinit
vnet_port_probe(struct vio_dev
*vdev
,
1150 const struct vio_device_id
*id
)
1152 struct mdesc_handle
*hp
;
1153 struct vnet_port
*port
;
1154 unsigned long flags
;
1157 int len
, i
, err
, switch_port
;
1163 vp
= vnet_find_parent(hp
, vdev
->mp
);
1165 printk(KERN_ERR PFX
"Cannot find port parent vnet.\n");
1167 goto err_out_put_mdesc
;
1170 rmac
= mdesc_get_property(hp
, vdev
->mp
, remote_macaddr_prop
, &len
);
1173 printk(KERN_ERR PFX
"Port lacks %s property.\n",
1174 remote_macaddr_prop
);
1175 goto err_out_put_mdesc
;
1178 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1181 printk(KERN_ERR PFX
"Cannot allocate vnet_port.\n");
1182 goto err_out_put_mdesc
;
1185 for (i
= 0; i
< ETH_ALEN
; i
++)
1186 port
->raddr
[i
] = (*rmac
>> (5 - i
) * 8) & 0xff;
1190 err
= vio_driver_init(&port
->vio
, vdev
, VDEV_NETWORK
,
1191 vnet_versions
, ARRAY_SIZE(vnet_versions
),
1192 &vnet_vio_ops
, vp
->dev
->name
);
1194 goto err_out_free_port
;
1196 err
= vio_ldc_alloc(&port
->vio
, &vnet_ldc_cfg
, port
);
1198 goto err_out_free_port
;
1200 err
= vnet_port_alloc_tx_bufs(port
);
1202 goto err_out_free_ldc
;
1204 INIT_HLIST_NODE(&port
->hash
);
1205 INIT_LIST_HEAD(&port
->list
);
1208 if (mdesc_get_property(hp
, vdev
->mp
, "switch-port", NULL
) != NULL
)
1210 port
->switch_port
= switch_port
;
1212 spin_lock_irqsave(&vp
->lock
, flags
);
1214 list_add(&port
->list
, &vp
->port_list
);
1216 list_add_tail(&port
->list
, &vp
->port_list
);
1217 hlist_add_head(&port
->hash
, &vp
->port_hash
[vnet_hashfn(port
->raddr
)]);
1218 spin_unlock_irqrestore(&vp
->lock
, flags
);
1220 dev_set_drvdata(&vdev
->dev
, port
);
1222 printk(KERN_INFO
"%s: PORT ( remote-mac %pM%s )\n",
1223 vp
->dev
->name
, port
->raddr
,
1224 switch_port
? " switch-port" : "");
1226 vio_port_up(&port
->vio
);
1233 vio_ldc_free(&port
->vio
);
1243 static int vnet_port_remove(struct vio_dev
*vdev
)
1245 struct vnet_port
*port
= dev_get_drvdata(&vdev
->dev
);
1248 struct vnet
*vp
= port
->vp
;
1249 unsigned long flags
;
1251 del_timer_sync(&port
->vio
.timer
);
1253 spin_lock_irqsave(&vp
->lock
, flags
);
1254 list_del(&port
->list
);
1255 hlist_del(&port
->hash
);
1256 spin_unlock_irqrestore(&vp
->lock
, flags
);
1258 vnet_port_free_tx_bufs(port
);
1259 vio_ldc_free(&port
->vio
);
1261 dev_set_drvdata(&vdev
->dev
, NULL
);
1268 static const struct vio_device_id vnet_port_match
[] = {
1270 .type
= "vnet-port",
1274 MODULE_DEVICE_TABLE(vio
, vnet_port_match
);
1276 static struct vio_driver vnet_port_driver
= {
1277 .id_table
= vnet_port_match
,
1278 .probe
= vnet_port_probe
,
1279 .remove
= vnet_port_remove
,
1281 .name
= "vnet_port",
1282 .owner
= THIS_MODULE
,
1286 static int __init
vnet_init(void)
1288 return vio_register_driver(&vnet_port_driver
);
1291 static void __exit
vnet_exit(void)
1293 vio_unregister_driver(&vnet_port_driver
);
1296 module_init(vnet_init
);
1297 module_exit(vnet_exit
);