1 /* sunvnet.c: Sun LDOM Virtual Network Driver.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/etherdevice.h>
15 #include <linux/mutex.h>
22 #define DRV_MODULE_NAME "sunvnet"
23 #define PFX DRV_MODULE_NAME ": "
24 #define DRV_MODULE_VERSION "1.0"
25 #define DRV_MODULE_RELDATE "June 25, 2007"
27 static char version
[] __devinitdata
=
28 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
29 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
30 MODULE_DESCRIPTION("Sun LDOM virtual network driver");
31 MODULE_LICENSE("GPL");
32 MODULE_VERSION(DRV_MODULE_VERSION
);
34 /* Ordered from largest major to lowest */
35 static struct vio_version vnet_versions
[] = {
36 { .major
= 1, .minor
= 0 },
39 static inline u32
vnet_tx_dring_avail(struct vio_dring_state
*dr
)
41 return vio_dring_avail(dr
, VNET_TX_RING_SIZE
);
44 static int vnet_handle_unknown(struct vnet_port
*port
, void *arg
)
46 struct vio_msg_tag
*pkt
= arg
;
48 printk(KERN_ERR PFX
"Received unknown msg [%02x:%02x:%04x:%08x]\n",
49 pkt
->type
, pkt
->stype
, pkt
->stype_env
, pkt
->sid
);
50 printk(KERN_ERR PFX
"Resetting connection.\n");
52 ldc_disconnect(port
->vio
.lp
);
57 static int vnet_send_attr(struct vio_driver_state
*vio
)
59 struct vnet_port
*port
= to_vnet_port(vio
);
60 struct net_device
*dev
= port
->vp
->dev
;
61 struct vio_net_attr_info pkt
;
64 memset(&pkt
, 0, sizeof(pkt
));
65 pkt
.tag
.type
= VIO_TYPE_CTRL
;
66 pkt
.tag
.stype
= VIO_SUBTYPE_INFO
;
67 pkt
.tag
.stype_env
= VIO_ATTR_INFO
;
68 pkt
.tag
.sid
= vio_send_sid(vio
);
69 pkt
.xfer_mode
= VIO_DRING_MODE
;
70 pkt
.addr_type
= VNET_ADDR_ETHERMAC
;
72 for (i
= 0; i
< 6; i
++)
73 pkt
.addr
|= (u64
)dev
->dev_addr
[i
] << ((5 - i
) * 8);
74 pkt
.mtu
= ETH_FRAME_LEN
;
76 viodbg(HS
, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
77 "ackfreq[%u] mtu[%llu]\n",
78 pkt
.xfer_mode
, pkt
.addr_type
,
79 (unsigned long long) pkt
.addr
,
81 (unsigned long long) pkt
.mtu
);
83 return vio_ldc_send(vio
, &pkt
, sizeof(pkt
));
86 static int handle_attr_info(struct vio_driver_state
*vio
,
87 struct vio_net_attr_info
*pkt
)
89 viodbg(HS
, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
90 "ackfreq[%u] mtu[%llu]\n",
91 pkt
->xfer_mode
, pkt
->addr_type
,
92 (unsigned long long) pkt
->addr
,
94 (unsigned long long) pkt
->mtu
);
96 pkt
->tag
.sid
= vio_send_sid(vio
);
98 if (pkt
->xfer_mode
!= VIO_DRING_MODE
||
99 pkt
->addr_type
!= VNET_ADDR_ETHERMAC
||
100 pkt
->mtu
!= ETH_FRAME_LEN
) {
101 viodbg(HS
, "SEND NET ATTR NACK\n");
103 pkt
->tag
.stype
= VIO_SUBTYPE_NACK
;
105 (void) vio_ldc_send(vio
, pkt
, sizeof(*pkt
));
109 viodbg(HS
, "SEND NET ATTR ACK\n");
111 pkt
->tag
.stype
= VIO_SUBTYPE_ACK
;
113 return vio_ldc_send(vio
, pkt
, sizeof(*pkt
));
118 static int handle_attr_ack(struct vio_driver_state
*vio
,
119 struct vio_net_attr_info
*pkt
)
121 viodbg(HS
, "GOT NET ATTR ACK\n");
126 static int handle_attr_nack(struct vio_driver_state
*vio
,
127 struct vio_net_attr_info
*pkt
)
129 viodbg(HS
, "GOT NET ATTR NACK\n");
134 static int vnet_handle_attr(struct vio_driver_state
*vio
, void *arg
)
136 struct vio_net_attr_info
*pkt
= arg
;
138 switch (pkt
->tag
.stype
) {
139 case VIO_SUBTYPE_INFO
:
140 return handle_attr_info(vio
, pkt
);
142 case VIO_SUBTYPE_ACK
:
143 return handle_attr_ack(vio
, pkt
);
145 case VIO_SUBTYPE_NACK
:
146 return handle_attr_nack(vio
, pkt
);
153 static void vnet_handshake_complete(struct vio_driver_state
*vio
)
155 struct vio_dring_state
*dr
;
157 dr
= &vio
->drings
[VIO_DRIVER_RX_RING
];
158 dr
->snd_nxt
= dr
->rcv_nxt
= 1;
160 dr
= &vio
->drings
[VIO_DRIVER_TX_RING
];
161 dr
->snd_nxt
= dr
->rcv_nxt
= 1;
164 /* The hypervisor interface that implements copying to/from imported
165 * memory from another domain requires that copies are done to 8-byte
166 * aligned buffers, and that the lengths of such copies are also 8-byte
169 * So we align skb->data to an 8-byte multiple and pad-out the data
170 * area so we can round the copy length up to the next multiple of
173 * The transmitter puts the actual start of the packet 6 bytes into
174 * the buffer it sends over, so that the IP headers after the ethernet
175 * header are aligned properly. These 6 bytes are not in the descriptor
176 * length, they are simply implied. This offset is represented using
177 * the VNET_PACKET_SKIP macro.
179 static struct sk_buff
*alloc_and_align_skb(struct net_device
*dev
,
182 struct sk_buff
*skb
= netdev_alloc_skb(dev
, len
+VNET_PACKET_SKIP
+8+8);
183 unsigned long addr
, off
;
188 addr
= (unsigned long) skb
->data
;
189 off
= ((addr
+ 7UL) & ~7UL) - addr
;
191 skb_reserve(skb
, off
);
196 static int vnet_rx_one(struct vnet_port
*port
, unsigned int len
,
197 struct ldc_trans_cookie
*cookies
, int ncookies
)
199 struct net_device
*dev
= port
->vp
->dev
;
200 unsigned int copy_len
;
205 if (unlikely(len
< ETH_ZLEN
|| len
> ETH_FRAME_LEN
)) {
206 dev
->stats
.rx_length_errors
++;
210 skb
= alloc_and_align_skb(dev
, len
);
212 if (unlikely(!skb
)) {
213 dev
->stats
.rx_missed_errors
++;
217 copy_len
= (len
+ VNET_PACKET_SKIP
+ 7U) & ~7U;
218 skb_put(skb
, copy_len
);
219 err
= ldc_copy(port
->vio
.lp
, LDC_COPY_IN
,
220 skb
->data
, copy_len
, 0,
222 if (unlikely(err
< 0)) {
223 dev
->stats
.rx_frame_errors
++;
227 skb_pull(skb
, VNET_PACKET_SKIP
);
229 skb
->protocol
= eth_type_trans(skb
, dev
);
231 dev
->stats
.rx_packets
++;
232 dev
->stats
.rx_bytes
+= len
;
242 dev
->stats
.rx_dropped
++;
246 static int vnet_send_ack(struct vnet_port
*port
, struct vio_dring_state
*dr
,
247 u32 start
, u32 end
, u8 vio_dring_state
)
249 struct vio_dring_data hdr
= {
251 .type
= VIO_TYPE_DATA
,
252 .stype
= VIO_SUBTYPE_ACK
,
253 .stype_env
= VIO_DRING_DATA
,
254 .sid
= vio_send_sid(&port
->vio
),
256 .dring_ident
= dr
->ident
,
259 .state
= vio_dring_state
,
263 hdr
.seq
= dr
->snd_nxt
;
266 err
= vio_ldc_send(&port
->vio
, &hdr
, sizeof(hdr
));
272 if ((delay
<<= 1) > 128)
274 } while (err
== -EAGAIN
);
279 static u32
next_idx(u32 idx
, struct vio_dring_state
*dr
)
281 if (++idx
== dr
->num_entries
)
286 static u32
prev_idx(u32 idx
, struct vio_dring_state
*dr
)
289 idx
= dr
->num_entries
- 1;
296 static struct vio_net_desc
*get_rx_desc(struct vnet_port
*port
,
297 struct vio_dring_state
*dr
,
300 struct vio_net_desc
*desc
= port
->vio
.desc_buf
;
303 err
= ldc_get_dring_entry(port
->vio
.lp
, desc
, dr
->entry_size
,
304 (index
* dr
->entry_size
),
305 dr
->cookies
, dr
->ncookies
);
312 static int put_rx_desc(struct vnet_port
*port
,
313 struct vio_dring_state
*dr
,
314 struct vio_net_desc
*desc
,
319 err
= ldc_put_dring_entry(port
->vio
.lp
, desc
, dr
->entry_size
,
320 (index
* dr
->entry_size
),
321 dr
->cookies
, dr
->ncookies
);
328 static int vnet_walk_rx_one(struct vnet_port
*port
,
329 struct vio_dring_state
*dr
,
330 u32 index
, int *needs_ack
)
332 struct vio_net_desc
*desc
= get_rx_desc(port
, dr
, index
);
333 struct vio_driver_state
*vio
= &port
->vio
;
337 return PTR_ERR(desc
);
339 viodbg(DATA
, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
340 desc
->hdr
.state
, desc
->hdr
.ack
,
341 desc
->size
, desc
->ncookies
,
342 desc
->cookies
[0].cookie_addr
,
343 desc
->cookies
[0].cookie_size
);
345 if (desc
->hdr
.state
!= VIO_DESC_READY
)
347 err
= vnet_rx_one(port
, desc
->size
, desc
->cookies
, desc
->ncookies
);
348 if (err
== -ECONNRESET
)
350 desc
->hdr
.state
= VIO_DESC_DONE
;
351 err
= put_rx_desc(port
, dr
, desc
, index
);
354 *needs_ack
= desc
->hdr
.ack
;
358 static int vnet_walk_rx(struct vnet_port
*port
, struct vio_dring_state
*dr
,
361 struct vio_driver_state
*vio
= &port
->vio
;
362 int ack_start
= -1, ack_end
= -1;
364 end
= (end
== (u32
) -1) ? prev_idx(start
, dr
) : next_idx(end
, dr
);
366 viodbg(DATA
, "vnet_walk_rx start[%08x] end[%08x]\n", start
, end
);
368 while (start
!= end
) {
369 int ack
= 0, err
= vnet_walk_rx_one(port
, dr
, start
, &ack
);
370 if (err
== -ECONNRESET
)
377 start
= next_idx(start
, dr
);
378 if (ack
&& start
!= end
) {
379 err
= vnet_send_ack(port
, dr
, ack_start
, ack_end
,
381 if (err
== -ECONNRESET
)
386 if (unlikely(ack_start
== -1))
387 ack_start
= ack_end
= prev_idx(start
, dr
);
388 return vnet_send_ack(port
, dr
, ack_start
, ack_end
, VIO_DRING_STOPPED
);
391 static int vnet_rx(struct vnet_port
*port
, void *msgbuf
)
393 struct vio_dring_data
*pkt
= msgbuf
;
394 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_RX_RING
];
395 struct vio_driver_state
*vio
= &port
->vio
;
397 viodbg(DATA
, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
398 pkt
->tag
.stype_env
, pkt
->seq
, dr
->rcv_nxt
);
400 if (unlikely(pkt
->tag
.stype_env
!= VIO_DRING_DATA
))
402 if (unlikely(pkt
->seq
!= dr
->rcv_nxt
)) {
403 printk(KERN_ERR PFX
"RX out of sequence seq[0x%llx] "
404 "rcv_nxt[0x%llx]\n", pkt
->seq
, dr
->rcv_nxt
);
411 return vnet_walk_rx(port
, dr
, pkt
->start_idx
, pkt
->end_idx
);
414 static int idx_is_pending(struct vio_dring_state
*dr
, u32 end
)
419 while (idx
!= dr
->prod
) {
424 idx
= next_idx(idx
, dr
);
429 static int vnet_ack(struct vnet_port
*port
, void *msgbuf
)
431 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
432 struct vio_dring_data
*pkt
= msgbuf
;
433 struct net_device
*dev
;
437 if (unlikely(pkt
->tag
.stype_env
!= VIO_DRING_DATA
))
441 if (unlikely(!idx_is_pending(dr
, end
)))
444 dr
->cons
= next_idx(end
, dr
);
448 if (unlikely(netif_queue_stopped(dev
) &&
449 vnet_tx_dring_avail(dr
) >= VNET_TX_WAKEUP_THRESH(dr
)))
455 static int vnet_nack(struct vnet_port
*port
, void *msgbuf
)
460 static int handle_mcast(struct vnet_port
*port
, void *msgbuf
)
462 struct vio_net_mcast_info
*pkt
= msgbuf
;
464 if (pkt
->tag
.stype
!= VIO_SUBTYPE_ACK
)
465 printk(KERN_ERR PFX
"%s: Got unexpected MCAST reply "
466 "[%02x:%02x:%04x:%08x]\n",
476 static void maybe_tx_wakeup(struct vnet
*vp
)
478 struct net_device
*dev
= vp
->dev
;
481 if (likely(netif_queue_stopped(dev
))) {
482 struct vnet_port
*port
;
485 list_for_each_entry(port
, &vp
->port_list
, list
) {
486 struct vio_dring_state
*dr
;
488 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
489 if (vnet_tx_dring_avail(dr
) <
490 VNET_TX_WAKEUP_THRESH(dr
)) {
496 netif_wake_queue(dev
);
498 netif_tx_unlock(dev
);
501 static void vnet_event(void *arg
, int event
)
503 struct vnet_port
*port
= arg
;
504 struct vio_driver_state
*vio
= &port
->vio
;
508 spin_lock_irqsave(&vio
->lock
, flags
);
510 if (unlikely(event
== LDC_EVENT_RESET
||
511 event
== LDC_EVENT_UP
)) {
512 vio_link_state_change(vio
, event
);
513 spin_unlock_irqrestore(&vio
->lock
, flags
);
515 if (event
== LDC_EVENT_RESET
)
520 if (unlikely(event
!= LDC_EVENT_DATA_READY
)) {
521 printk(KERN_WARNING PFX
"Unexpected LDC event %d\n", event
);
522 spin_unlock_irqrestore(&vio
->lock
, flags
);
529 struct vio_msg_tag tag
;
533 err
= ldc_read(vio
->lp
, &msgbuf
, sizeof(msgbuf
));
534 if (unlikely(err
< 0)) {
535 if (err
== -ECONNRESET
)
541 viodbg(DATA
, "TAG [%02x:%02x:%04x:%08x]\n",
544 msgbuf
.tag
.stype_env
,
546 err
= vio_validate_sid(vio
, &msgbuf
.tag
);
550 if (likely(msgbuf
.tag
.type
== VIO_TYPE_DATA
)) {
551 if (msgbuf
.tag
.stype
== VIO_SUBTYPE_INFO
) {
552 err
= vnet_rx(port
, &msgbuf
);
553 } else if (msgbuf
.tag
.stype
== VIO_SUBTYPE_ACK
) {
554 err
= vnet_ack(port
, &msgbuf
);
557 } else if (msgbuf
.tag
.stype
== VIO_SUBTYPE_NACK
) {
558 err
= vnet_nack(port
, &msgbuf
);
560 } else if (msgbuf
.tag
.type
== VIO_TYPE_CTRL
) {
561 if (msgbuf
.tag
.stype_env
== VNET_MCAST_INFO
)
562 err
= handle_mcast(port
, &msgbuf
);
564 err
= vio_control_pkt_engine(vio
, &msgbuf
);
568 err
= vnet_handle_unknown(port
, &msgbuf
);
570 if (err
== -ECONNRESET
)
573 spin_unlock(&vio
->lock
);
574 if (unlikely(tx_wakeup
&& err
!= -ECONNRESET
))
575 maybe_tx_wakeup(port
->vp
);
576 local_irq_restore(flags
);
579 static int __vnet_tx_trigger(struct vnet_port
*port
)
581 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
582 struct vio_dring_data hdr
= {
584 .type
= VIO_TYPE_DATA
,
585 .stype
= VIO_SUBTYPE_INFO
,
586 .stype_env
= VIO_DRING_DATA
,
587 .sid
= vio_send_sid(&port
->vio
),
589 .dring_ident
= dr
->ident
,
590 .start_idx
= dr
->prod
,
595 hdr
.seq
= dr
->snd_nxt
;
598 err
= vio_ldc_send(&port
->vio
, &hdr
, sizeof(hdr
));
604 if ((delay
<<= 1) > 128)
606 } while (err
== -EAGAIN
);
611 struct vnet_port
*__tx_port_find(struct vnet
*vp
, struct sk_buff
*skb
)
613 unsigned int hash
= vnet_hashfn(skb
->data
);
614 struct hlist_head
*hp
= &vp
->port_hash
[hash
];
615 struct hlist_node
*n
;
616 struct vnet_port
*port
;
618 hlist_for_each_entry(port
, n
, hp
, hash
) {
619 if (!compare_ether_addr(port
->raddr
, skb
->data
))
623 if (!list_empty(&vp
->port_list
))
624 port
= list_entry(vp
->port_list
.next
, struct vnet_port
, list
);
629 struct vnet_port
*tx_port_find(struct vnet
*vp
, struct sk_buff
*skb
)
631 struct vnet_port
*ret
;
634 spin_lock_irqsave(&vp
->lock
, flags
);
635 ret
= __tx_port_find(vp
, skb
);
636 spin_unlock_irqrestore(&vp
->lock
, flags
);
641 static int vnet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
643 struct vnet
*vp
= netdev_priv(dev
);
644 struct vnet_port
*port
= tx_port_find(vp
, skb
);
645 struct vio_dring_state
*dr
;
646 struct vio_net_desc
*d
;
655 spin_lock_irqsave(&port
->vio
.lock
, flags
);
657 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
658 if (unlikely(vnet_tx_dring_avail(dr
) < 2)) {
659 if (!netif_queue_stopped(dev
)) {
660 netif_stop_queue(dev
);
662 /* This is a hard error, log it. */
663 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
664 "queue awake!\n", dev
->name
);
665 dev
->stats
.tx_errors
++;
667 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
668 return NETDEV_TX_BUSY
;
671 d
= vio_dring_cur(dr
);
673 tx_buf
= port
->tx_bufs
[dr
->prod
].buf
;
674 skb_copy_from_linear_data(skb
, tx_buf
+ VNET_PACKET_SKIP
, skb
->len
);
677 if (len
< ETH_ZLEN
) {
679 memset(tx_buf
+VNET_PACKET_SKIP
+skb
->len
, 0, len
- skb
->len
);
682 d
->hdr
.ack
= VIO_ACK_ENABLE
;
684 d
->ncookies
= port
->tx_bufs
[dr
->prod
].ncookies
;
685 for (i
= 0; i
< d
->ncookies
; i
++)
686 d
->cookies
[i
] = port
->tx_bufs
[dr
->prod
].cookies
[i
];
688 /* This has to be a non-SMP write barrier because we are writing
689 * to memory which is shared with the peer LDOM.
693 d
->hdr
.state
= VIO_DESC_READY
;
695 err
= __vnet_tx_trigger(port
);
696 if (unlikely(err
< 0)) {
697 printk(KERN_INFO PFX
"%s: TX trigger error %d\n",
699 d
->hdr
.state
= VIO_DESC_FREE
;
700 dev
->stats
.tx_carrier_errors
++;
701 goto out_dropped_unlock
;
704 dev
->stats
.tx_packets
++;
705 dev
->stats
.tx_bytes
+= skb
->len
;
707 dr
->prod
= (dr
->prod
+ 1) & (VNET_TX_RING_SIZE
- 1);
708 if (unlikely(vnet_tx_dring_avail(dr
) < 2)) {
709 netif_stop_queue(dev
);
710 if (vnet_tx_dring_avail(dr
) > VNET_TX_WAKEUP_THRESH(dr
))
711 netif_wake_queue(dev
);
714 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
721 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
725 dev
->stats
.tx_dropped
++;
729 static void vnet_tx_timeout(struct net_device
*dev
)
733 static int vnet_open(struct net_device
*dev
)
735 netif_carrier_on(dev
);
736 netif_start_queue(dev
);
741 static int vnet_close(struct net_device
*dev
)
743 netif_stop_queue(dev
);
744 netif_carrier_off(dev
);
749 static struct vnet_mcast_entry
*__vnet_mc_find(struct vnet
*vp
, u8
*addr
)
751 struct vnet_mcast_entry
*m
;
753 for (m
= vp
->mcast_list
; m
; m
= m
->next
) {
754 if (!memcmp(m
->addr
, addr
, ETH_ALEN
))
760 static void __update_mc_list(struct vnet
*vp
, struct net_device
*dev
)
762 struct netdev_hw_addr
*ha
;
764 netdev_for_each_mc_addr(ha
, dev
) {
765 struct vnet_mcast_entry
*m
;
767 m
= __vnet_mc_find(vp
, ha
->addr
);
774 m
= kzalloc(sizeof(*m
), GFP_ATOMIC
);
777 memcpy(m
->addr
, ha
->addr
, ETH_ALEN
);
780 m
->next
= vp
->mcast_list
;
786 static void __send_mc_list(struct vnet
*vp
, struct vnet_port
*port
)
788 struct vio_net_mcast_info info
;
789 struct vnet_mcast_entry
*m
, **pp
;
792 memset(&info
, 0, sizeof(info
));
794 info
.tag
.type
= VIO_TYPE_CTRL
;
795 info
.tag
.stype
= VIO_SUBTYPE_INFO
;
796 info
.tag
.stype_env
= VNET_MCAST_INFO
;
797 info
.tag
.sid
= vio_send_sid(&port
->vio
);
801 for (m
= vp
->mcast_list
; m
; m
= m
->next
) {
805 memcpy(&info
.mcast_addr
[n_addrs
* ETH_ALEN
],
807 if (++n_addrs
== VNET_NUM_MCAST
) {
808 info
.count
= n_addrs
;
810 (void) vio_ldc_send(&port
->vio
, &info
,
816 info
.count
= n_addrs
;
817 (void) vio_ldc_send(&port
->vio
, &info
, sizeof(info
));
823 pp
= &vp
->mcast_list
;
824 while ((m
= *pp
) != NULL
) {
831 memcpy(&info
.mcast_addr
[n_addrs
* ETH_ALEN
],
833 if (++n_addrs
== VNET_NUM_MCAST
) {
834 info
.count
= n_addrs
;
835 (void) vio_ldc_send(&port
->vio
, &info
,
844 info
.count
= n_addrs
;
845 (void) vio_ldc_send(&port
->vio
, &info
, sizeof(info
));
849 static void vnet_set_rx_mode(struct net_device
*dev
)
851 struct vnet
*vp
= netdev_priv(dev
);
852 struct vnet_port
*port
;
855 spin_lock_irqsave(&vp
->lock
, flags
);
856 if (!list_empty(&vp
->port_list
)) {
857 port
= list_entry(vp
->port_list
.next
, struct vnet_port
, list
);
859 if (port
->switch_port
) {
860 __update_mc_list(vp
, dev
);
861 __send_mc_list(vp
, port
);
864 spin_unlock_irqrestore(&vp
->lock
, flags
);
867 static int vnet_change_mtu(struct net_device
*dev
, int new_mtu
)
869 if (new_mtu
!= ETH_DATA_LEN
)
876 static int vnet_set_mac_addr(struct net_device
*dev
, void *p
)
881 static void vnet_get_drvinfo(struct net_device
*dev
,
882 struct ethtool_drvinfo
*info
)
884 strcpy(info
->driver
, DRV_MODULE_NAME
);
885 strcpy(info
->version
, DRV_MODULE_VERSION
);
888 static u32
vnet_get_msglevel(struct net_device
*dev
)
890 struct vnet
*vp
= netdev_priv(dev
);
891 return vp
->msg_enable
;
894 static void vnet_set_msglevel(struct net_device
*dev
, u32 value
)
896 struct vnet
*vp
= netdev_priv(dev
);
897 vp
->msg_enable
= value
;
900 static const struct ethtool_ops vnet_ethtool_ops
= {
901 .get_drvinfo
= vnet_get_drvinfo
,
902 .get_msglevel
= vnet_get_msglevel
,
903 .set_msglevel
= vnet_set_msglevel
,
904 .get_link
= ethtool_op_get_link
,
907 static void vnet_port_free_tx_bufs(struct vnet_port
*port
)
909 struct vio_dring_state
*dr
;
912 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
914 ldc_free_exp_dring(port
->vio
.lp
, dr
->base
,
915 (dr
->entry_size
* dr
->num_entries
),
916 dr
->cookies
, dr
->ncookies
);
924 for (i
= 0; i
< VNET_TX_RING_SIZE
; i
++) {
925 void *buf
= port
->tx_bufs
[i
].buf
;
930 ldc_unmap(port
->vio
.lp
,
931 port
->tx_bufs
[i
].cookies
,
932 port
->tx_bufs
[i
].ncookies
);
935 port
->tx_bufs
[i
].buf
= NULL
;
939 static int __devinit
vnet_port_alloc_tx_bufs(struct vnet_port
*port
)
941 struct vio_dring_state
*dr
;
943 int i
, err
, ncookies
;
946 for (i
= 0; i
< VNET_TX_RING_SIZE
; i
++) {
947 void *buf
= kzalloc(ETH_FRAME_LEN
+ 8, GFP_KERNEL
);
948 int map_len
= (ETH_FRAME_LEN
+ 7) & ~7;
952 printk(KERN_ERR
"TX buffer allocation failure\n");
956 if ((unsigned long)buf
& (8UL - 1)) {
957 printk(KERN_ERR
"TX buffer misaligned\n");
962 err
= ldc_map_single(port
->vio
.lp
, buf
, map_len
,
963 port
->tx_bufs
[i
].cookies
, 2,
971 port
->tx_bufs
[i
].buf
= buf
;
972 port
->tx_bufs
[i
].ncookies
= err
;
975 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
977 len
= (VNET_TX_RING_SIZE
*
978 (sizeof(struct vio_net_desc
) +
979 (sizeof(struct ldc_trans_cookie
) * 2)));
981 ncookies
= VIO_MAX_RING_COOKIES
;
982 dring
= ldc_alloc_exp_dring(port
->vio
.lp
, len
,
983 dr
->cookies
, &ncookies
,
988 err
= PTR_ERR(dring
);
993 dr
->entry_size
= (sizeof(struct vio_net_desc
) +
994 (sizeof(struct ldc_trans_cookie
) * 2));
995 dr
->num_entries
= VNET_TX_RING_SIZE
;
996 dr
->prod
= dr
->cons
= 0;
997 dr
->pending
= VNET_TX_RING_SIZE
;
998 dr
->ncookies
= ncookies
;
1003 vnet_port_free_tx_bufs(port
);
1008 static LIST_HEAD(vnet_list
);
1009 static DEFINE_MUTEX(vnet_list_mutex
);
1011 static const struct net_device_ops vnet_ops
= {
1012 .ndo_open
= vnet_open
,
1013 .ndo_stop
= vnet_close
,
1014 .ndo_set_multicast_list
= vnet_set_rx_mode
,
1015 .ndo_set_mac_address
= vnet_set_mac_addr
,
1016 .ndo_validate_addr
= eth_validate_addr
,
1017 .ndo_tx_timeout
= vnet_tx_timeout
,
1018 .ndo_change_mtu
= vnet_change_mtu
,
1019 .ndo_start_xmit
= vnet_start_xmit
,
1022 static struct vnet
* __devinit
vnet_new(const u64
*local_mac
)
1024 struct net_device
*dev
;
1028 dev
= alloc_etherdev(sizeof(*vp
));
1030 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
1031 return ERR_PTR(-ENOMEM
);
1034 for (i
= 0; i
< ETH_ALEN
; i
++)
1035 dev
->dev_addr
[i
] = (*local_mac
>> (5 - i
) * 8) & 0xff;
1037 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
1039 vp
= netdev_priv(dev
);
1041 spin_lock_init(&vp
->lock
);
1044 INIT_LIST_HEAD(&vp
->port_list
);
1045 for (i
= 0; i
< VNET_PORT_HASH_SIZE
; i
++)
1046 INIT_HLIST_HEAD(&vp
->port_hash
[i
]);
1047 INIT_LIST_HEAD(&vp
->list
);
1048 vp
->local_mac
= *local_mac
;
1050 dev
->netdev_ops
= &vnet_ops
;
1051 dev
->ethtool_ops
= &vnet_ethtool_ops
;
1052 dev
->watchdog_timeo
= VNET_TX_TIMEOUT
;
1054 err
= register_netdev(dev
);
1056 printk(KERN_ERR PFX
"Cannot register net device, "
1058 goto err_out_free_dev
;
1061 printk(KERN_INFO
"%s: Sun LDOM vnet %pM\n", dev
->name
, dev
->dev_addr
);
1063 list_add(&vp
->list
, &vnet_list
);
1070 return ERR_PTR(err
);
1073 static struct vnet
* __devinit
vnet_find_or_create(const u64
*local_mac
)
1075 struct vnet
*iter
, *vp
;
1077 mutex_lock(&vnet_list_mutex
);
1079 list_for_each_entry(iter
, &vnet_list
, list
) {
1080 if (iter
->local_mac
== *local_mac
) {
1086 vp
= vnet_new(local_mac
);
1087 mutex_unlock(&vnet_list_mutex
);
1092 static const char *local_mac_prop
= "local-mac-address";
1094 static struct vnet
* __devinit
vnet_find_parent(struct mdesc_handle
*hp
,
1097 const u64
*local_mac
= NULL
;
1100 mdesc_for_each_arc(a
, hp
, port_node
, MDESC_ARC_TYPE_BACK
) {
1101 u64 target
= mdesc_arc_target(hp
, a
);
1104 name
= mdesc_get_property(hp
, target
, "name", NULL
);
1105 if (!name
|| strcmp(name
, "network"))
1108 local_mac
= mdesc_get_property(hp
, target
,
1109 local_mac_prop
, NULL
);
1114 return ERR_PTR(-ENODEV
);
1116 return vnet_find_or_create(local_mac
);
1119 static struct ldc_channel_config vnet_ldc_cfg
= {
1120 .event
= vnet_event
,
1122 .mode
= LDC_MODE_UNRELIABLE
,
1125 static struct vio_driver_ops vnet_vio_ops
= {
1126 .send_attr
= vnet_send_attr
,
1127 .handle_attr
= vnet_handle_attr
,
1128 .handshake_complete
= vnet_handshake_complete
,
1131 static void __devinit
print_version(void)
1133 static int version_printed
;
1135 if (version_printed
++ == 0)
1136 printk(KERN_INFO
"%s", version
);
1139 const char *remote_macaddr_prop
= "remote-mac-address";
1141 static int __devinit
vnet_port_probe(struct vio_dev
*vdev
,
1142 const struct vio_device_id
*id
)
1144 struct mdesc_handle
*hp
;
1145 struct vnet_port
*port
;
1146 unsigned long flags
;
1149 int len
, i
, err
, switch_port
;
1155 vp
= vnet_find_parent(hp
, vdev
->mp
);
1157 printk(KERN_ERR PFX
"Cannot find port parent vnet.\n");
1159 goto err_out_put_mdesc
;
1162 rmac
= mdesc_get_property(hp
, vdev
->mp
, remote_macaddr_prop
, &len
);
1165 printk(KERN_ERR PFX
"Port lacks %s property.\n",
1166 remote_macaddr_prop
);
1167 goto err_out_put_mdesc
;
1170 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1173 printk(KERN_ERR PFX
"Cannot allocate vnet_port.\n");
1174 goto err_out_put_mdesc
;
1177 for (i
= 0; i
< ETH_ALEN
; i
++)
1178 port
->raddr
[i
] = (*rmac
>> (5 - i
) * 8) & 0xff;
1182 err
= vio_driver_init(&port
->vio
, vdev
, VDEV_NETWORK
,
1183 vnet_versions
, ARRAY_SIZE(vnet_versions
),
1184 &vnet_vio_ops
, vp
->dev
->name
);
1186 goto err_out_free_port
;
1188 err
= vio_ldc_alloc(&port
->vio
, &vnet_ldc_cfg
, port
);
1190 goto err_out_free_port
;
1192 err
= vnet_port_alloc_tx_bufs(port
);
1194 goto err_out_free_ldc
;
1196 INIT_HLIST_NODE(&port
->hash
);
1197 INIT_LIST_HEAD(&port
->list
);
1200 if (mdesc_get_property(hp
, vdev
->mp
, "switch-port", NULL
) != NULL
)
1202 port
->switch_port
= switch_port
;
1204 spin_lock_irqsave(&vp
->lock
, flags
);
1206 list_add(&port
->list
, &vp
->port_list
);
1208 list_add_tail(&port
->list
, &vp
->port_list
);
1209 hlist_add_head(&port
->hash
, &vp
->port_hash
[vnet_hashfn(port
->raddr
)]);
1210 spin_unlock_irqrestore(&vp
->lock
, flags
);
1212 dev_set_drvdata(&vdev
->dev
, port
);
1214 printk(KERN_INFO
"%s: PORT ( remote-mac %pM%s )\n",
1215 vp
->dev
->name
, port
->raddr
,
1216 switch_port
? " switch-port" : "");
1218 vio_port_up(&port
->vio
);
1225 vio_ldc_free(&port
->vio
);
1235 static int vnet_port_remove(struct vio_dev
*vdev
)
1237 struct vnet_port
*port
= dev_get_drvdata(&vdev
->dev
);
1240 struct vnet
*vp
= port
->vp
;
1241 unsigned long flags
;
1243 del_timer_sync(&port
->vio
.timer
);
1245 spin_lock_irqsave(&vp
->lock
, flags
);
1246 list_del(&port
->list
);
1247 hlist_del(&port
->hash
);
1248 spin_unlock_irqrestore(&vp
->lock
, flags
);
1250 vnet_port_free_tx_bufs(port
);
1251 vio_ldc_free(&port
->vio
);
1253 dev_set_drvdata(&vdev
->dev
, NULL
);
1260 static const struct vio_device_id vnet_port_match
[] = {
1262 .type
= "vnet-port",
1266 MODULE_DEVICE_TABLE(vio
, vnet_port_match
);
1268 static struct vio_driver vnet_port_driver
= {
1269 .id_table
= vnet_port_match
,
1270 .probe
= vnet_port_probe
,
1271 .remove
= vnet_port_remove
,
1273 .name
= "vnet_port",
1274 .owner
= THIS_MODULE
,
1278 static int __init
vnet_init(void)
1280 return vio_register_driver(&vnet_port_driver
);
1283 static void __exit
vnet_exit(void)
1285 vio_unregister_driver(&vnet_port_driver
);
1288 module_init(vnet_init
);
1289 module_exit(vnet_exit
);