1 /* sunvnet.c: Sun LDOM Virtual Network Driver.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/etherdevice.h>
17 #include <linux/mutex.h>
24 #define DRV_MODULE_NAME "sunvnet"
25 #define DRV_MODULE_VERSION "1.0"
26 #define DRV_MODULE_RELDATE "June 25, 2007"
28 static char version
[] __devinitdata
=
29 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
30 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
31 MODULE_DESCRIPTION("Sun LDOM virtual network driver");
32 MODULE_LICENSE("GPL");
33 MODULE_VERSION(DRV_MODULE_VERSION
);
35 /* Ordered from largest major to lowest */
36 static struct vio_version vnet_versions
[] = {
37 { .major
= 1, .minor
= 0 },
40 static inline u32
vnet_tx_dring_avail(struct vio_dring_state
*dr
)
42 return vio_dring_avail(dr
, VNET_TX_RING_SIZE
);
45 static int vnet_handle_unknown(struct vnet_port
*port
, void *arg
)
47 struct vio_msg_tag
*pkt
= arg
;
49 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
50 pkt
->type
, pkt
->stype
, pkt
->stype_env
, pkt
->sid
);
51 pr_err("Resetting connection\n");
53 ldc_disconnect(port
->vio
.lp
);
58 static int vnet_send_attr(struct vio_driver_state
*vio
)
60 struct vnet_port
*port
= to_vnet_port(vio
);
61 struct net_device
*dev
= port
->vp
->dev
;
62 struct vio_net_attr_info pkt
;
65 memset(&pkt
, 0, sizeof(pkt
));
66 pkt
.tag
.type
= VIO_TYPE_CTRL
;
67 pkt
.tag
.stype
= VIO_SUBTYPE_INFO
;
68 pkt
.tag
.stype_env
= VIO_ATTR_INFO
;
69 pkt
.tag
.sid
= vio_send_sid(vio
);
70 pkt
.xfer_mode
= VIO_DRING_MODE
;
71 pkt
.addr_type
= VNET_ADDR_ETHERMAC
;
73 for (i
= 0; i
< 6; i
++)
74 pkt
.addr
|= (u64
)dev
->dev_addr
[i
] << ((5 - i
) * 8);
75 pkt
.mtu
= ETH_FRAME_LEN
;
77 viodbg(HS
, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
78 "ackfreq[%u] mtu[%llu]\n",
79 pkt
.xfer_mode
, pkt
.addr_type
,
80 (unsigned long long) pkt
.addr
,
82 (unsigned long long) pkt
.mtu
);
84 return vio_ldc_send(vio
, &pkt
, sizeof(pkt
));
87 static int handle_attr_info(struct vio_driver_state
*vio
,
88 struct vio_net_attr_info
*pkt
)
90 viodbg(HS
, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
91 "ackfreq[%u] mtu[%llu]\n",
92 pkt
->xfer_mode
, pkt
->addr_type
,
93 (unsigned long long) pkt
->addr
,
95 (unsigned long long) pkt
->mtu
);
97 pkt
->tag
.sid
= vio_send_sid(vio
);
99 if (pkt
->xfer_mode
!= VIO_DRING_MODE
||
100 pkt
->addr_type
!= VNET_ADDR_ETHERMAC
||
101 pkt
->mtu
!= ETH_FRAME_LEN
) {
102 viodbg(HS
, "SEND NET ATTR NACK\n");
104 pkt
->tag
.stype
= VIO_SUBTYPE_NACK
;
106 (void) vio_ldc_send(vio
, pkt
, sizeof(*pkt
));
110 viodbg(HS
, "SEND NET ATTR ACK\n");
112 pkt
->tag
.stype
= VIO_SUBTYPE_ACK
;
114 return vio_ldc_send(vio
, pkt
, sizeof(*pkt
));
119 static int handle_attr_ack(struct vio_driver_state
*vio
,
120 struct vio_net_attr_info
*pkt
)
122 viodbg(HS
, "GOT NET ATTR ACK\n");
127 static int handle_attr_nack(struct vio_driver_state
*vio
,
128 struct vio_net_attr_info
*pkt
)
130 viodbg(HS
, "GOT NET ATTR NACK\n");
135 static int vnet_handle_attr(struct vio_driver_state
*vio
, void *arg
)
137 struct vio_net_attr_info
*pkt
= arg
;
139 switch (pkt
->tag
.stype
) {
140 case VIO_SUBTYPE_INFO
:
141 return handle_attr_info(vio
, pkt
);
143 case VIO_SUBTYPE_ACK
:
144 return handle_attr_ack(vio
, pkt
);
146 case VIO_SUBTYPE_NACK
:
147 return handle_attr_nack(vio
, pkt
);
154 static void vnet_handshake_complete(struct vio_driver_state
*vio
)
156 struct vio_dring_state
*dr
;
158 dr
= &vio
->drings
[VIO_DRIVER_RX_RING
];
159 dr
->snd_nxt
= dr
->rcv_nxt
= 1;
161 dr
= &vio
->drings
[VIO_DRIVER_TX_RING
];
162 dr
->snd_nxt
= dr
->rcv_nxt
= 1;
165 /* The hypervisor interface that implements copying to/from imported
166 * memory from another domain requires that copies are done to 8-byte
167 * aligned buffers, and that the lengths of such copies are also 8-byte
170 * So we align skb->data to an 8-byte multiple and pad-out the data
171 * area so we can round the copy length up to the next multiple of
174 * The transmitter puts the actual start of the packet 6 bytes into
175 * the buffer it sends over, so that the IP headers after the ethernet
176 * header are aligned properly. These 6 bytes are not in the descriptor
177 * length, they are simply implied. This offset is represented using
178 * the VNET_PACKET_SKIP macro.
180 static struct sk_buff
*alloc_and_align_skb(struct net_device
*dev
,
183 struct sk_buff
*skb
= netdev_alloc_skb(dev
, len
+VNET_PACKET_SKIP
+8+8);
184 unsigned long addr
, off
;
189 addr
= (unsigned long) skb
->data
;
190 off
= ((addr
+ 7UL) & ~7UL) - addr
;
192 skb_reserve(skb
, off
);
197 static int vnet_rx_one(struct vnet_port
*port
, unsigned int len
,
198 struct ldc_trans_cookie
*cookies
, int ncookies
)
200 struct net_device
*dev
= port
->vp
->dev
;
201 unsigned int copy_len
;
206 if (unlikely(len
< ETH_ZLEN
|| len
> ETH_FRAME_LEN
)) {
207 dev
->stats
.rx_length_errors
++;
211 skb
= alloc_and_align_skb(dev
, len
);
213 if (unlikely(!skb
)) {
214 dev
->stats
.rx_missed_errors
++;
218 copy_len
= (len
+ VNET_PACKET_SKIP
+ 7U) & ~7U;
219 skb_put(skb
, copy_len
);
220 err
= ldc_copy(port
->vio
.lp
, LDC_COPY_IN
,
221 skb
->data
, copy_len
, 0,
223 if (unlikely(err
< 0)) {
224 dev
->stats
.rx_frame_errors
++;
228 skb_pull(skb
, VNET_PACKET_SKIP
);
230 skb
->protocol
= eth_type_trans(skb
, dev
);
232 dev
->stats
.rx_packets
++;
233 dev
->stats
.rx_bytes
+= len
;
243 dev
->stats
.rx_dropped
++;
247 static int vnet_send_ack(struct vnet_port
*port
, struct vio_dring_state
*dr
,
248 u32 start
, u32 end
, u8 vio_dring_state
)
250 struct vio_dring_data hdr
= {
252 .type
= VIO_TYPE_DATA
,
253 .stype
= VIO_SUBTYPE_ACK
,
254 .stype_env
= VIO_DRING_DATA
,
255 .sid
= vio_send_sid(&port
->vio
),
257 .dring_ident
= dr
->ident
,
260 .state
= vio_dring_state
,
264 hdr
.seq
= dr
->snd_nxt
;
267 err
= vio_ldc_send(&port
->vio
, &hdr
, sizeof(hdr
));
273 if ((delay
<<= 1) > 128)
275 } while (err
== -EAGAIN
);
280 static u32
next_idx(u32 idx
, struct vio_dring_state
*dr
)
282 if (++idx
== dr
->num_entries
)
287 static u32
prev_idx(u32 idx
, struct vio_dring_state
*dr
)
290 idx
= dr
->num_entries
- 1;
297 static struct vio_net_desc
*get_rx_desc(struct vnet_port
*port
,
298 struct vio_dring_state
*dr
,
301 struct vio_net_desc
*desc
= port
->vio
.desc_buf
;
304 err
= ldc_get_dring_entry(port
->vio
.lp
, desc
, dr
->entry_size
,
305 (index
* dr
->entry_size
),
306 dr
->cookies
, dr
->ncookies
);
313 static int put_rx_desc(struct vnet_port
*port
,
314 struct vio_dring_state
*dr
,
315 struct vio_net_desc
*desc
,
320 err
= ldc_put_dring_entry(port
->vio
.lp
, desc
, dr
->entry_size
,
321 (index
* dr
->entry_size
),
322 dr
->cookies
, dr
->ncookies
);
329 static int vnet_walk_rx_one(struct vnet_port
*port
,
330 struct vio_dring_state
*dr
,
331 u32 index
, int *needs_ack
)
333 struct vio_net_desc
*desc
= get_rx_desc(port
, dr
, index
);
334 struct vio_driver_state
*vio
= &port
->vio
;
338 return PTR_ERR(desc
);
340 viodbg(DATA
, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
341 desc
->hdr
.state
, desc
->hdr
.ack
,
342 desc
->size
, desc
->ncookies
,
343 desc
->cookies
[0].cookie_addr
,
344 desc
->cookies
[0].cookie_size
);
346 if (desc
->hdr
.state
!= VIO_DESC_READY
)
348 err
= vnet_rx_one(port
, desc
->size
, desc
->cookies
, desc
->ncookies
);
349 if (err
== -ECONNRESET
)
351 desc
->hdr
.state
= VIO_DESC_DONE
;
352 err
= put_rx_desc(port
, dr
, desc
, index
);
355 *needs_ack
= desc
->hdr
.ack
;
359 static int vnet_walk_rx(struct vnet_port
*port
, struct vio_dring_state
*dr
,
362 struct vio_driver_state
*vio
= &port
->vio
;
363 int ack_start
= -1, ack_end
= -1;
365 end
= (end
== (u32
) -1) ? prev_idx(start
, dr
) : next_idx(end
, dr
);
367 viodbg(DATA
, "vnet_walk_rx start[%08x] end[%08x]\n", start
, end
);
369 while (start
!= end
) {
370 int ack
= 0, err
= vnet_walk_rx_one(port
, dr
, start
, &ack
);
371 if (err
== -ECONNRESET
)
378 start
= next_idx(start
, dr
);
379 if (ack
&& start
!= end
) {
380 err
= vnet_send_ack(port
, dr
, ack_start
, ack_end
,
382 if (err
== -ECONNRESET
)
387 if (unlikely(ack_start
== -1))
388 ack_start
= ack_end
= prev_idx(start
, dr
);
389 return vnet_send_ack(port
, dr
, ack_start
, ack_end
, VIO_DRING_STOPPED
);
392 static int vnet_rx(struct vnet_port
*port
, void *msgbuf
)
394 struct vio_dring_data
*pkt
= msgbuf
;
395 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_RX_RING
];
396 struct vio_driver_state
*vio
= &port
->vio
;
398 viodbg(DATA
, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
399 pkt
->tag
.stype_env
, pkt
->seq
, dr
->rcv_nxt
);
401 if (unlikely(pkt
->tag
.stype_env
!= VIO_DRING_DATA
))
403 if (unlikely(pkt
->seq
!= dr
->rcv_nxt
)) {
404 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
405 pkt
->seq
, dr
->rcv_nxt
);
411 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
413 return vnet_walk_rx(port
, dr
, pkt
->start_idx
, pkt
->end_idx
);
416 static int idx_is_pending(struct vio_dring_state
*dr
, u32 end
)
421 while (idx
!= dr
->prod
) {
426 idx
= next_idx(idx
, dr
);
431 static int vnet_ack(struct vnet_port
*port
, void *msgbuf
)
433 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
434 struct vio_dring_data
*pkt
= msgbuf
;
435 struct net_device
*dev
;
439 if (unlikely(pkt
->tag
.stype_env
!= VIO_DRING_DATA
))
443 if (unlikely(!idx_is_pending(dr
, end
)))
446 dr
->cons
= next_idx(end
, dr
);
450 if (unlikely(netif_queue_stopped(dev
) &&
451 vnet_tx_dring_avail(dr
) >= VNET_TX_WAKEUP_THRESH(dr
)))
457 static int vnet_nack(struct vnet_port
*port
, void *msgbuf
)
459 /* XXX just reset or similar XXX */
463 static int handle_mcast(struct vnet_port
*port
, void *msgbuf
)
465 struct vio_net_mcast_info
*pkt
= msgbuf
;
467 if (pkt
->tag
.stype
!= VIO_SUBTYPE_ACK
)
468 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
478 static void maybe_tx_wakeup(struct vnet
*vp
)
480 struct net_device
*dev
= vp
->dev
;
483 if (likely(netif_queue_stopped(dev
))) {
484 struct vnet_port
*port
;
487 list_for_each_entry(port
, &vp
->port_list
, list
) {
488 struct vio_dring_state
*dr
;
490 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
491 if (vnet_tx_dring_avail(dr
) <
492 VNET_TX_WAKEUP_THRESH(dr
)) {
498 netif_wake_queue(dev
);
500 netif_tx_unlock(dev
);
503 static void vnet_event(void *arg
, int event
)
505 struct vnet_port
*port
= arg
;
506 struct vio_driver_state
*vio
= &port
->vio
;
510 spin_lock_irqsave(&vio
->lock
, flags
);
512 if (unlikely(event
== LDC_EVENT_RESET
||
513 event
== LDC_EVENT_UP
)) {
514 vio_link_state_change(vio
, event
);
515 spin_unlock_irqrestore(&vio
->lock
, flags
);
517 if (event
== LDC_EVENT_RESET
)
522 if (unlikely(event
!= LDC_EVENT_DATA_READY
)) {
523 pr_warning("Unexpected LDC event %d\n", event
);
524 spin_unlock_irqrestore(&vio
->lock
, flags
);
531 struct vio_msg_tag tag
;
535 err
= ldc_read(vio
->lp
, &msgbuf
, sizeof(msgbuf
));
536 if (unlikely(err
< 0)) {
537 if (err
== -ECONNRESET
)
543 viodbg(DATA
, "TAG [%02x:%02x:%04x:%08x]\n",
546 msgbuf
.tag
.stype_env
,
548 err
= vio_validate_sid(vio
, &msgbuf
.tag
);
552 if (likely(msgbuf
.tag
.type
== VIO_TYPE_DATA
)) {
553 if (msgbuf
.tag
.stype
== VIO_SUBTYPE_INFO
) {
554 err
= vnet_rx(port
, &msgbuf
);
555 } else if (msgbuf
.tag
.stype
== VIO_SUBTYPE_ACK
) {
556 err
= vnet_ack(port
, &msgbuf
);
559 } else if (msgbuf
.tag
.stype
== VIO_SUBTYPE_NACK
) {
560 err
= vnet_nack(port
, &msgbuf
);
562 } else if (msgbuf
.tag
.type
== VIO_TYPE_CTRL
) {
563 if (msgbuf
.tag
.stype_env
== VNET_MCAST_INFO
)
564 err
= handle_mcast(port
, &msgbuf
);
566 err
= vio_control_pkt_engine(vio
, &msgbuf
);
570 err
= vnet_handle_unknown(port
, &msgbuf
);
572 if (err
== -ECONNRESET
)
575 spin_unlock(&vio
->lock
);
576 if (unlikely(tx_wakeup
&& err
!= -ECONNRESET
))
577 maybe_tx_wakeup(port
->vp
);
578 local_irq_restore(flags
);
581 static int __vnet_tx_trigger(struct vnet_port
*port
)
583 struct vio_dring_state
*dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
584 struct vio_dring_data hdr
= {
586 .type
= VIO_TYPE_DATA
,
587 .stype
= VIO_SUBTYPE_INFO
,
588 .stype_env
= VIO_DRING_DATA
,
589 .sid
= vio_send_sid(&port
->vio
),
591 .dring_ident
= dr
->ident
,
592 .start_idx
= dr
->prod
,
597 hdr
.seq
= dr
->snd_nxt
;
600 err
= vio_ldc_send(&port
->vio
, &hdr
, sizeof(hdr
));
606 if ((delay
<<= 1) > 128)
608 } while (err
== -EAGAIN
);
613 struct vnet_port
*__tx_port_find(struct vnet
*vp
, struct sk_buff
*skb
)
615 unsigned int hash
= vnet_hashfn(skb
->data
);
616 struct hlist_head
*hp
= &vp
->port_hash
[hash
];
617 struct hlist_node
*n
;
618 struct vnet_port
*port
;
620 hlist_for_each_entry(port
, n
, hp
, hash
) {
621 if (!compare_ether_addr(port
->raddr
, skb
->data
))
625 if (!list_empty(&vp
->port_list
))
626 port
= list_entry(vp
->port_list
.next
, struct vnet_port
, list
);
631 struct vnet_port
*tx_port_find(struct vnet
*vp
, struct sk_buff
*skb
)
633 struct vnet_port
*ret
;
636 spin_lock_irqsave(&vp
->lock
, flags
);
637 ret
= __tx_port_find(vp
, skb
);
638 spin_unlock_irqrestore(&vp
->lock
, flags
);
643 static int vnet_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
645 struct vnet
*vp
= netdev_priv(dev
);
646 struct vnet_port
*port
= tx_port_find(vp
, skb
);
647 struct vio_dring_state
*dr
;
648 struct vio_net_desc
*d
;
657 spin_lock_irqsave(&port
->vio
.lock
, flags
);
659 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
660 if (unlikely(vnet_tx_dring_avail(dr
) < 2)) {
661 if (!netif_queue_stopped(dev
)) {
662 netif_stop_queue(dev
);
664 /* This is a hard error, log it. */
665 netdev_err(dev
, "BUG! Tx Ring full when queue awake!\n");
666 dev
->stats
.tx_errors
++;
668 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
669 return NETDEV_TX_BUSY
;
672 d
= vio_dring_cur(dr
);
674 tx_buf
= port
->tx_bufs
[dr
->prod
].buf
;
675 skb_copy_from_linear_data(skb
, tx_buf
+ VNET_PACKET_SKIP
, skb
->len
);
678 if (len
< ETH_ZLEN
) {
680 memset(tx_buf
+VNET_PACKET_SKIP
+skb
->len
, 0, len
- skb
->len
);
683 d
->hdr
.ack
= VIO_ACK_ENABLE
;
685 d
->ncookies
= port
->tx_bufs
[dr
->prod
].ncookies
;
686 for (i
= 0; i
< d
->ncookies
; i
++)
687 d
->cookies
[i
] = port
->tx_bufs
[dr
->prod
].cookies
[i
];
689 /* This has to be a non-SMP write barrier because we are writing
690 * to memory which is shared with the peer LDOM.
694 d
->hdr
.state
= VIO_DESC_READY
;
696 err
= __vnet_tx_trigger(port
);
697 if (unlikely(err
< 0)) {
698 netdev_info(dev
, "TX trigger error %d\n", err
);
699 d
->hdr
.state
= VIO_DESC_FREE
;
700 dev
->stats
.tx_carrier_errors
++;
701 goto out_dropped_unlock
;
704 dev
->stats
.tx_packets
++;
705 dev
->stats
.tx_bytes
+= skb
->len
;
707 dr
->prod
= (dr
->prod
+ 1) & (VNET_TX_RING_SIZE
- 1);
708 if (unlikely(vnet_tx_dring_avail(dr
) < 2)) {
709 netif_stop_queue(dev
);
710 if (vnet_tx_dring_avail(dr
) > VNET_TX_WAKEUP_THRESH(dr
))
711 netif_wake_queue(dev
);
714 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
721 spin_unlock_irqrestore(&port
->vio
.lock
, flags
);
725 dev
->stats
.tx_dropped
++;
729 static void vnet_tx_timeout(struct net_device
*dev
)
731 /* XXX Implement me XXX */
734 static int vnet_open(struct net_device
*dev
)
736 netif_carrier_on(dev
);
737 netif_start_queue(dev
);
742 static int vnet_close(struct net_device
*dev
)
744 netif_stop_queue(dev
);
745 netif_carrier_off(dev
);
750 static struct vnet_mcast_entry
*__vnet_mc_find(struct vnet
*vp
, u8
*addr
)
752 struct vnet_mcast_entry
*m
;
754 for (m
= vp
->mcast_list
; m
; m
= m
->next
) {
755 if (!memcmp(m
->addr
, addr
, ETH_ALEN
))
761 static void __update_mc_list(struct vnet
*vp
, struct net_device
*dev
)
763 struct netdev_hw_addr
*ha
;
765 netdev_for_each_mc_addr(ha
, dev
) {
766 struct vnet_mcast_entry
*m
;
768 m
= __vnet_mc_find(vp
, ha
->addr
);
775 m
= kzalloc(sizeof(*m
), GFP_ATOMIC
);
778 memcpy(m
->addr
, ha
->addr
, ETH_ALEN
);
781 m
->next
= vp
->mcast_list
;
787 static void __send_mc_list(struct vnet
*vp
, struct vnet_port
*port
)
789 struct vio_net_mcast_info info
;
790 struct vnet_mcast_entry
*m
, **pp
;
793 memset(&info
, 0, sizeof(info
));
795 info
.tag
.type
= VIO_TYPE_CTRL
;
796 info
.tag
.stype
= VIO_SUBTYPE_INFO
;
797 info
.tag
.stype_env
= VNET_MCAST_INFO
;
798 info
.tag
.sid
= vio_send_sid(&port
->vio
);
802 for (m
= vp
->mcast_list
; m
; m
= m
->next
) {
806 memcpy(&info
.mcast_addr
[n_addrs
* ETH_ALEN
],
808 if (++n_addrs
== VNET_NUM_MCAST
) {
809 info
.count
= n_addrs
;
811 (void) vio_ldc_send(&port
->vio
, &info
,
817 info
.count
= n_addrs
;
818 (void) vio_ldc_send(&port
->vio
, &info
, sizeof(info
));
824 pp
= &vp
->mcast_list
;
825 while ((m
= *pp
) != NULL
) {
832 memcpy(&info
.mcast_addr
[n_addrs
* ETH_ALEN
],
834 if (++n_addrs
== VNET_NUM_MCAST
) {
835 info
.count
= n_addrs
;
836 (void) vio_ldc_send(&port
->vio
, &info
,
845 info
.count
= n_addrs
;
846 (void) vio_ldc_send(&port
->vio
, &info
, sizeof(info
));
850 static void vnet_set_rx_mode(struct net_device
*dev
)
852 struct vnet
*vp
= netdev_priv(dev
);
853 struct vnet_port
*port
;
856 spin_lock_irqsave(&vp
->lock
, flags
);
857 if (!list_empty(&vp
->port_list
)) {
858 port
= list_entry(vp
->port_list
.next
, struct vnet_port
, list
);
860 if (port
->switch_port
) {
861 __update_mc_list(vp
, dev
);
862 __send_mc_list(vp
, port
);
865 spin_unlock_irqrestore(&vp
->lock
, flags
);
868 static int vnet_change_mtu(struct net_device
*dev
, int new_mtu
)
870 if (new_mtu
!= ETH_DATA_LEN
)
877 static int vnet_set_mac_addr(struct net_device
*dev
, void *p
)
882 static void vnet_get_drvinfo(struct net_device
*dev
,
883 struct ethtool_drvinfo
*info
)
885 strcpy(info
->driver
, DRV_MODULE_NAME
);
886 strcpy(info
->version
, DRV_MODULE_VERSION
);
889 static u32
vnet_get_msglevel(struct net_device
*dev
)
891 struct vnet
*vp
= netdev_priv(dev
);
892 return vp
->msg_enable
;
895 static void vnet_set_msglevel(struct net_device
*dev
, u32 value
)
897 struct vnet
*vp
= netdev_priv(dev
);
898 vp
->msg_enable
= value
;
901 static const struct ethtool_ops vnet_ethtool_ops
= {
902 .get_drvinfo
= vnet_get_drvinfo
,
903 .get_msglevel
= vnet_get_msglevel
,
904 .set_msglevel
= vnet_set_msglevel
,
905 .get_link
= ethtool_op_get_link
,
908 static void vnet_port_free_tx_bufs(struct vnet_port
*port
)
910 struct vio_dring_state
*dr
;
913 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
915 ldc_free_exp_dring(port
->vio
.lp
, dr
->base
,
916 (dr
->entry_size
* dr
->num_entries
),
917 dr
->cookies
, dr
->ncookies
);
925 for (i
= 0; i
< VNET_TX_RING_SIZE
; i
++) {
926 void *buf
= port
->tx_bufs
[i
].buf
;
931 ldc_unmap(port
->vio
.lp
,
932 port
->tx_bufs
[i
].cookies
,
933 port
->tx_bufs
[i
].ncookies
);
936 port
->tx_bufs
[i
].buf
= NULL
;
940 static int __devinit
vnet_port_alloc_tx_bufs(struct vnet_port
*port
)
942 struct vio_dring_state
*dr
;
944 int i
, err
, ncookies
;
947 for (i
= 0; i
< VNET_TX_RING_SIZE
; i
++) {
948 void *buf
= kzalloc(ETH_FRAME_LEN
+ 8, GFP_KERNEL
);
949 int map_len
= (ETH_FRAME_LEN
+ 7) & ~7;
953 pr_err("TX buffer allocation failure\n");
957 if ((unsigned long)buf
& (8UL - 1)) {
958 pr_err("TX buffer misaligned\n");
963 err
= ldc_map_single(port
->vio
.lp
, buf
, map_len
,
964 port
->tx_bufs
[i
].cookies
, 2,
972 port
->tx_bufs
[i
].buf
= buf
;
973 port
->tx_bufs
[i
].ncookies
= err
;
976 dr
= &port
->vio
.drings
[VIO_DRIVER_TX_RING
];
978 len
= (VNET_TX_RING_SIZE
*
979 (sizeof(struct vio_net_desc
) +
980 (sizeof(struct ldc_trans_cookie
) * 2)));
982 ncookies
= VIO_MAX_RING_COOKIES
;
983 dring
= ldc_alloc_exp_dring(port
->vio
.lp
, len
,
984 dr
->cookies
, &ncookies
,
989 err
= PTR_ERR(dring
);
994 dr
->entry_size
= (sizeof(struct vio_net_desc
) +
995 (sizeof(struct ldc_trans_cookie
) * 2));
996 dr
->num_entries
= VNET_TX_RING_SIZE
;
997 dr
->prod
= dr
->cons
= 0;
998 dr
->pending
= VNET_TX_RING_SIZE
;
999 dr
->ncookies
= ncookies
;
1004 vnet_port_free_tx_bufs(port
);
1009 static LIST_HEAD(vnet_list
);
1010 static DEFINE_MUTEX(vnet_list_mutex
);
1012 static const struct net_device_ops vnet_ops
= {
1013 .ndo_open
= vnet_open
,
1014 .ndo_stop
= vnet_close
,
1015 .ndo_set_multicast_list
= vnet_set_rx_mode
,
1016 .ndo_set_mac_address
= vnet_set_mac_addr
,
1017 .ndo_validate_addr
= eth_validate_addr
,
1018 .ndo_tx_timeout
= vnet_tx_timeout
,
1019 .ndo_change_mtu
= vnet_change_mtu
,
1020 .ndo_start_xmit
= vnet_start_xmit
,
1023 static struct vnet
* __devinit
vnet_new(const u64
*local_mac
)
1025 struct net_device
*dev
;
1029 dev
= alloc_etherdev(sizeof(*vp
));
1031 pr_err("Etherdev alloc failed, aborting\n");
1032 return ERR_PTR(-ENOMEM
);
1035 for (i
= 0; i
< ETH_ALEN
; i
++)
1036 dev
->dev_addr
[i
] = (*local_mac
>> (5 - i
) * 8) & 0xff;
1038 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
1040 vp
= netdev_priv(dev
);
1042 spin_lock_init(&vp
->lock
);
1045 INIT_LIST_HEAD(&vp
->port_list
);
1046 for (i
= 0; i
< VNET_PORT_HASH_SIZE
; i
++)
1047 INIT_HLIST_HEAD(&vp
->port_hash
[i
]);
1048 INIT_LIST_HEAD(&vp
->list
);
1049 vp
->local_mac
= *local_mac
;
1051 dev
->netdev_ops
= &vnet_ops
;
1052 dev
->ethtool_ops
= &vnet_ethtool_ops
;
1053 dev
->watchdog_timeo
= VNET_TX_TIMEOUT
;
1055 err
= register_netdev(dev
);
1057 pr_err("Cannot register net device, aborting\n");
1058 goto err_out_free_dev
;
1061 netdev_info(dev
, "Sun LDOM vnet %pM\n", dev
->dev_addr
);
1063 list_add(&vp
->list
, &vnet_list
);
1070 return ERR_PTR(err
);
1073 static struct vnet
* __devinit
vnet_find_or_create(const u64
*local_mac
)
1075 struct vnet
*iter
, *vp
;
1077 mutex_lock(&vnet_list_mutex
);
1079 list_for_each_entry(iter
, &vnet_list
, list
) {
1080 if (iter
->local_mac
== *local_mac
) {
1086 vp
= vnet_new(local_mac
);
1087 mutex_unlock(&vnet_list_mutex
);
1092 static const char *local_mac_prop
= "local-mac-address";
1094 static struct vnet
* __devinit
vnet_find_parent(struct mdesc_handle
*hp
,
1097 const u64
*local_mac
= NULL
;
1100 mdesc_for_each_arc(a
, hp
, port_node
, MDESC_ARC_TYPE_BACK
) {
1101 u64 target
= mdesc_arc_target(hp
, a
);
1104 name
= mdesc_get_property(hp
, target
, "name", NULL
);
1105 if (!name
|| strcmp(name
, "network"))
1108 local_mac
= mdesc_get_property(hp
, target
,
1109 local_mac_prop
, NULL
);
1114 return ERR_PTR(-ENODEV
);
1116 return vnet_find_or_create(local_mac
);
1119 static struct ldc_channel_config vnet_ldc_cfg
= {
1120 .event
= vnet_event
,
1122 .mode
= LDC_MODE_UNRELIABLE
,
1125 static struct vio_driver_ops vnet_vio_ops
= {
1126 .send_attr
= vnet_send_attr
,
1127 .handle_attr
= vnet_handle_attr
,
1128 .handshake_complete
= vnet_handshake_complete
,
1131 static void __devinit
print_version(void)
1133 printk_once(KERN_INFO
"%s", version
);
1136 const char *remote_macaddr_prop
= "remote-mac-address";
1138 static int __devinit
vnet_port_probe(struct vio_dev
*vdev
,
1139 const struct vio_device_id
*id
)
1141 struct mdesc_handle
*hp
;
1142 struct vnet_port
*port
;
1143 unsigned long flags
;
1146 int len
, i
, err
, switch_port
;
1152 vp
= vnet_find_parent(hp
, vdev
->mp
);
1154 pr_err("Cannot find port parent vnet\n");
1156 goto err_out_put_mdesc
;
1159 rmac
= mdesc_get_property(hp
, vdev
->mp
, remote_macaddr_prop
, &len
);
1162 pr_err("Port lacks %s property\n", remote_macaddr_prop
);
1163 goto err_out_put_mdesc
;
1166 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1169 pr_err("Cannot allocate vnet_port\n");
1170 goto err_out_put_mdesc
;
1173 for (i
= 0; i
< ETH_ALEN
; i
++)
1174 port
->raddr
[i
] = (*rmac
>> (5 - i
) * 8) & 0xff;
1178 err
= vio_driver_init(&port
->vio
, vdev
, VDEV_NETWORK
,
1179 vnet_versions
, ARRAY_SIZE(vnet_versions
),
1180 &vnet_vio_ops
, vp
->dev
->name
);
1182 goto err_out_free_port
;
1184 err
= vio_ldc_alloc(&port
->vio
, &vnet_ldc_cfg
, port
);
1186 goto err_out_free_port
;
1188 err
= vnet_port_alloc_tx_bufs(port
);
1190 goto err_out_free_ldc
;
1192 INIT_HLIST_NODE(&port
->hash
);
1193 INIT_LIST_HEAD(&port
->list
);
1196 if (mdesc_get_property(hp
, vdev
->mp
, "switch-port", NULL
) != NULL
)
1198 port
->switch_port
= switch_port
;
1200 spin_lock_irqsave(&vp
->lock
, flags
);
1202 list_add(&port
->list
, &vp
->port_list
);
1204 list_add_tail(&port
->list
, &vp
->port_list
);
1205 hlist_add_head(&port
->hash
, &vp
->port_hash
[vnet_hashfn(port
->raddr
)]);
1206 spin_unlock_irqrestore(&vp
->lock
, flags
);
1208 dev_set_drvdata(&vdev
->dev
, port
);
1210 pr_info("%s: PORT ( remote-mac %pM%s )\n",
1211 vp
->dev
->name
, port
->raddr
, switch_port
? " switch-port" : "");
1213 vio_port_up(&port
->vio
);
1220 vio_ldc_free(&port
->vio
);
1230 static int vnet_port_remove(struct vio_dev
*vdev
)
1232 struct vnet_port
*port
= dev_get_drvdata(&vdev
->dev
);
1235 struct vnet
*vp
= port
->vp
;
1236 unsigned long flags
;
1238 del_timer_sync(&port
->vio
.timer
);
1240 spin_lock_irqsave(&vp
->lock
, flags
);
1241 list_del(&port
->list
);
1242 hlist_del(&port
->hash
);
1243 spin_unlock_irqrestore(&vp
->lock
, flags
);
1245 vnet_port_free_tx_bufs(port
);
1246 vio_ldc_free(&port
->vio
);
1248 dev_set_drvdata(&vdev
->dev
, NULL
);
1255 static const struct vio_device_id vnet_port_match
[] = {
1257 .type
= "vnet-port",
1261 MODULE_DEVICE_TABLE(vio
, vnet_port_match
);
1263 static struct vio_driver vnet_port_driver
= {
1264 .id_table
= vnet_port_match
,
1265 .probe
= vnet_port_probe
,
1266 .remove
= vnet_port_remove
,
1268 .name
= "vnet_port",
1269 .owner
= THIS_MODULE
,
1273 static int __init
vnet_init(void)
1275 return vio_register_driver(&vnet_port_driver
);
1278 static void __exit
vnet_exit(void)
1280 vio_unregister_driver(&vnet_port_driver
);
1283 module_init(vnet_init
);
1284 module_exit(vnet_exit
);