2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
27 #include <net/ip6_checksum.h>
29 #include "vmxnet3_int.h"
31 char vmxnet3_driver_name
[] = "vmxnet3";
32 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
36 * Last entry must be all 0s
38 static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table
) = {
39 {PCI_VDEVICE(VMWARE
, PCI_DEVICE_ID_VMWARE_VMXNET3
)},
43 MODULE_DEVICE_TABLE(pci
, vmxnet3_pciid_table
);
45 static atomic_t devices_found
;
49 * Enable/Disable the given intr
52 vmxnet3_enable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
54 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 0);
59 vmxnet3_disable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
61 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 1);
66 * Enable/Disable all intrs used by the device
69 vmxnet3_enable_all_intrs(struct vmxnet3_adapter
*adapter
)
73 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
74 vmxnet3_enable_intr(adapter
, i
);
75 adapter
->shared
->devRead
.intrConf
.intrCtrl
&=
76 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL
);
81 vmxnet3_disable_all_intrs(struct vmxnet3_adapter
*adapter
)
85 adapter
->shared
->devRead
.intrConf
.intrCtrl
|=
86 cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
87 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
88 vmxnet3_disable_intr(adapter
, i
);
93 vmxnet3_ack_events(struct vmxnet3_adapter
*adapter
, u32 events
)
95 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_ECR
, events
);
100 vmxnet3_tq_stopped(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
102 return netif_queue_stopped(adapter
->netdev
);
107 vmxnet3_tq_start(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
110 netif_start_queue(adapter
->netdev
);
115 vmxnet3_tq_wake(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
118 netif_wake_queue(adapter
->netdev
);
123 vmxnet3_tq_stop(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
127 netif_stop_queue(adapter
->netdev
);
132 * Check the link state. This may start or stop the tx queue.
135 vmxnet3_check_link(struct vmxnet3_adapter
*adapter
, bool affectTxQueue
)
139 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_GET_LINK
);
140 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
141 adapter
->link_speed
= ret
>> 16;
142 if (ret
& 1) { /* Link is up. */
143 printk(KERN_INFO
"%s: NIC Link is Up %d Mbps\n",
144 adapter
->netdev
->name
, adapter
->link_speed
);
145 if (!netif_carrier_ok(adapter
->netdev
))
146 netif_carrier_on(adapter
->netdev
);
149 vmxnet3_tq_start(&adapter
->tx_queue
, adapter
);
151 printk(KERN_INFO
"%s: NIC Link is Down\n",
152 adapter
->netdev
->name
);
153 if (netif_carrier_ok(adapter
->netdev
))
154 netif_carrier_off(adapter
->netdev
);
157 vmxnet3_tq_stop(&adapter
->tx_queue
, adapter
);
162 vmxnet3_process_events(struct vmxnet3_adapter
*adapter
)
164 u32 events
= le32_to_cpu(adapter
->shared
->ecr
);
168 vmxnet3_ack_events(adapter
, events
);
170 /* Check if link state has changed */
171 if (events
& VMXNET3_ECR_LINK
)
172 vmxnet3_check_link(adapter
, true);
174 /* Check if there is an error on xmit/recv queues */
175 if (events
& (VMXNET3_ECR_TQERR
| VMXNET3_ECR_RQERR
)) {
176 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
177 VMXNET3_CMD_GET_QUEUE_STATUS
);
179 if (adapter
->tqd_start
->status
.stopped
) {
180 printk(KERN_ERR
"%s: tq error 0x%x\n",
181 adapter
->netdev
->name
,
182 le32_to_cpu(adapter
->tqd_start
->status
.error
));
184 if (adapter
->rqd_start
->status
.stopped
) {
185 printk(KERN_ERR
"%s: rq error 0x%x\n",
186 adapter
->netdev
->name
,
187 adapter
->rqd_start
->status
.error
);
190 schedule_work(&adapter
->work
);
194 #ifdef __BIG_ENDIAN_BITFIELD
196 * The device expects the bitfields in shared structures to be written in
197 * little endian. When CPU is big endian, the following routines are used to
198 * correctly read and write into ABI.
199 * The general technique used here is : double word bitfields are defined in
200 * opposite order for big endian architecture. Then before reading them in
201 * driver the complete double word is translated using le32_to_cpu. Similarly
202 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
203 * double words into required format.
204 * In order to avoid touching bits in shared structure more than once, temporary
205 * descriptors are used. These are passed as srcDesc to following functions.
207 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc
*srcDesc
,
208 struct Vmxnet3_RxDesc
*dstDesc
)
210 u32
*src
= (u32
*)srcDesc
+ 2;
211 u32
*dst
= (u32
*)dstDesc
+ 2;
212 dstDesc
->addr
= le64_to_cpu(srcDesc
->addr
);
213 *dst
= le32_to_cpu(*src
);
214 dstDesc
->ext1
= le32_to_cpu(srcDesc
->ext1
);
217 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc
*srcDesc
,
218 struct Vmxnet3_TxDesc
*dstDesc
)
221 u32
*src
= (u32
*)(srcDesc
+ 1);
222 u32
*dst
= (u32
*)(dstDesc
+ 1);
224 /* Working backwards so that the gen bit is set at the end. */
225 for (i
= 2; i
> 0; i
--) {
228 *dst
= cpu_to_le32(*src
);
233 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc
*srcDesc
,
234 struct Vmxnet3_RxCompDesc
*dstDesc
)
237 u32
*src
= (u32
*)srcDesc
;
238 u32
*dst
= (u32
*)dstDesc
;
239 for (i
= 0; i
< sizeof(struct Vmxnet3_RxCompDesc
) / sizeof(u32
); i
++) {
240 *dst
= le32_to_cpu(*src
);
247 /* Used to read bitfield values from double words. */
248 static u32
get_bitfield32(const __le32
*bitfield
, u32 pos
, u32 size
)
250 u32 temp
= le32_to_cpu(*bitfield
);
251 u32 mask
= ((1 << size
) - 1) << pos
;
259 #endif /* __BIG_ENDIAN_BITFIELD */
261 #ifdef __BIG_ENDIAN_BITFIELD
263 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
264 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
265 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
266 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
267 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
268 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
269 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
270 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
271 VMXNET3_TCD_GEN_SIZE)
272 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
273 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
274 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
276 vmxnet3_RxCompToCPU((rcd), (tmp)); \
278 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
280 vmxnet3_RxDescToCPU((rxd), (tmp)); \
285 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
286 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
287 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
288 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
289 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
290 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
292 #endif /* __BIG_ENDIAN_BITFIELD */
296 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info
*tbi
,
297 struct pci_dev
*pdev
)
299 if (tbi
->map_type
== VMXNET3_MAP_SINGLE
)
300 pci_unmap_single(pdev
, tbi
->dma_addr
, tbi
->len
,
302 else if (tbi
->map_type
== VMXNET3_MAP_PAGE
)
303 pci_unmap_page(pdev
, tbi
->dma_addr
, tbi
->len
,
306 BUG_ON(tbi
->map_type
!= VMXNET3_MAP_NONE
);
308 tbi
->map_type
= VMXNET3_MAP_NONE
; /* to help debugging */
313 vmxnet3_unmap_pkt(u32 eop_idx
, struct vmxnet3_tx_queue
*tq
,
314 struct pci_dev
*pdev
, struct vmxnet3_adapter
*adapter
)
319 /* no out of order completion */
320 BUG_ON(tq
->buf_info
[eop_idx
].sop_idx
!= tq
->tx_ring
.next2comp
);
321 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq
->tx_ring
.base
[eop_idx
].txd
)) != 1);
323 skb
= tq
->buf_info
[eop_idx
].skb
;
325 tq
->buf_info
[eop_idx
].skb
= NULL
;
327 VMXNET3_INC_RING_IDX_ONLY(eop_idx
, tq
->tx_ring
.size
);
329 while (tq
->tx_ring
.next2comp
!= eop_idx
) {
330 vmxnet3_unmap_tx_buf(tq
->buf_info
+ tq
->tx_ring
.next2comp
,
333 /* update next2comp w/o tx_lock. Since we are marking more,
334 * instead of less, tx ring entries avail, the worst case is
335 * that the tx routine incorrectly re-queues a pkt due to
336 * insufficient tx ring entries.
338 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
342 dev_kfree_skb_any(skb
);
348 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue
*tq
,
349 struct vmxnet3_adapter
*adapter
)
352 union Vmxnet3_GenericDesc
*gdesc
;
354 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
355 while (VMXNET3_TCD_GET_GEN(&gdesc
->tcd
) == tq
->comp_ring
.gen
) {
356 completed
+= vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
357 &gdesc
->tcd
), tq
, adapter
->pdev
,
360 vmxnet3_comp_ring_adv_next2proc(&tq
->comp_ring
);
361 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
365 spin_lock(&tq
->tx_lock
);
366 if (unlikely(vmxnet3_tq_stopped(tq
, adapter
) &&
367 vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
) >
368 VMXNET3_WAKE_QUEUE_THRESHOLD(tq
) &&
369 netif_carrier_ok(adapter
->netdev
))) {
370 vmxnet3_tq_wake(tq
, adapter
);
372 spin_unlock(&tq
->tx_lock
);
379 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue
*tq
,
380 struct vmxnet3_adapter
*adapter
)
384 while (tq
->tx_ring
.next2comp
!= tq
->tx_ring
.next2fill
) {
385 struct vmxnet3_tx_buf_info
*tbi
;
386 union Vmxnet3_GenericDesc
*gdesc
;
388 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2comp
;
389 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2comp
;
391 vmxnet3_unmap_tx_buf(tbi
, adapter
->pdev
);
393 dev_kfree_skb_any(tbi
->skb
);
396 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
399 /* sanity check, verify all buffers are indeed unmapped and freed */
400 for (i
= 0; i
< tq
->tx_ring
.size
; i
++) {
401 BUG_ON(tq
->buf_info
[i
].skb
!= NULL
||
402 tq
->buf_info
[i
].map_type
!= VMXNET3_MAP_NONE
);
405 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
406 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
408 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
409 tq
->comp_ring
.next2proc
= 0;
414 vmxnet3_tq_destroy(struct vmxnet3_tx_queue
*tq
,
415 struct vmxnet3_adapter
*adapter
)
417 if (tq
->tx_ring
.base
) {
418 pci_free_consistent(adapter
->pdev
, tq
->tx_ring
.size
*
419 sizeof(struct Vmxnet3_TxDesc
),
420 tq
->tx_ring
.base
, tq
->tx_ring
.basePA
);
421 tq
->tx_ring
.base
= NULL
;
423 if (tq
->data_ring
.base
) {
424 pci_free_consistent(adapter
->pdev
, tq
->data_ring
.size
*
425 sizeof(struct Vmxnet3_TxDataDesc
),
426 tq
->data_ring
.base
, tq
->data_ring
.basePA
);
427 tq
->data_ring
.base
= NULL
;
429 if (tq
->comp_ring
.base
) {
430 pci_free_consistent(adapter
->pdev
, tq
->comp_ring
.size
*
431 sizeof(struct Vmxnet3_TxCompDesc
),
432 tq
->comp_ring
.base
, tq
->comp_ring
.basePA
);
433 tq
->comp_ring
.base
= NULL
;
441 vmxnet3_tq_init(struct vmxnet3_tx_queue
*tq
,
442 struct vmxnet3_adapter
*adapter
)
446 /* reset the tx ring contents to 0 and reset the tx ring states */
447 memset(tq
->tx_ring
.base
, 0, tq
->tx_ring
.size
*
448 sizeof(struct Vmxnet3_TxDesc
));
449 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
450 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
452 memset(tq
->data_ring
.base
, 0, tq
->data_ring
.size
*
453 sizeof(struct Vmxnet3_TxDataDesc
));
455 /* reset the tx comp ring contents to 0 and reset comp ring states */
456 memset(tq
->comp_ring
.base
, 0, tq
->comp_ring
.size
*
457 sizeof(struct Vmxnet3_TxCompDesc
));
458 tq
->comp_ring
.next2proc
= 0;
459 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
461 /* reset the bookkeeping data */
462 memset(tq
->buf_info
, 0, sizeof(tq
->buf_info
[0]) * tq
->tx_ring
.size
);
463 for (i
= 0; i
< tq
->tx_ring
.size
; i
++)
464 tq
->buf_info
[i
].map_type
= VMXNET3_MAP_NONE
;
466 /* stats are not reset */
471 vmxnet3_tq_create(struct vmxnet3_tx_queue
*tq
,
472 struct vmxnet3_adapter
*adapter
)
474 BUG_ON(tq
->tx_ring
.base
|| tq
->data_ring
.base
||
475 tq
->comp_ring
.base
|| tq
->buf_info
);
477 tq
->tx_ring
.base
= pci_alloc_consistent(adapter
->pdev
, tq
->tx_ring
.size
478 * sizeof(struct Vmxnet3_TxDesc
),
479 &tq
->tx_ring
.basePA
);
480 if (!tq
->tx_ring
.base
) {
481 printk(KERN_ERR
"%s: failed to allocate tx ring\n",
482 adapter
->netdev
->name
);
486 tq
->data_ring
.base
= pci_alloc_consistent(adapter
->pdev
,
488 sizeof(struct Vmxnet3_TxDataDesc
),
489 &tq
->data_ring
.basePA
);
490 if (!tq
->data_ring
.base
) {
491 printk(KERN_ERR
"%s: failed to allocate data ring\n",
492 adapter
->netdev
->name
);
496 tq
->comp_ring
.base
= pci_alloc_consistent(adapter
->pdev
,
498 sizeof(struct Vmxnet3_TxCompDesc
),
499 &tq
->comp_ring
.basePA
);
500 if (!tq
->comp_ring
.base
) {
501 printk(KERN_ERR
"%s: failed to allocate tx comp ring\n",
502 adapter
->netdev
->name
);
506 tq
->buf_info
= kcalloc(tq
->tx_ring
.size
, sizeof(tq
->buf_info
[0]),
509 printk(KERN_ERR
"%s: failed to allocate tx bufinfo\n",
510 adapter
->netdev
->name
);
517 vmxnet3_tq_destroy(tq
, adapter
);
523 * starting from ring->next2fill, allocate rx buffers for the given ring
524 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
525 * are allocated or allocation fails
529 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue
*rq
, u32 ring_idx
,
530 int num_to_alloc
, struct vmxnet3_adapter
*adapter
)
532 int num_allocated
= 0;
533 struct vmxnet3_rx_buf_info
*rbi_base
= rq
->buf_info
[ring_idx
];
534 struct vmxnet3_cmd_ring
*ring
= &rq
->rx_ring
[ring_idx
];
537 while (num_allocated
< num_to_alloc
) {
538 struct vmxnet3_rx_buf_info
*rbi
;
539 union Vmxnet3_GenericDesc
*gd
;
541 rbi
= rbi_base
+ ring
->next2fill
;
542 gd
= ring
->base
+ ring
->next2fill
;
544 if (rbi
->buf_type
== VMXNET3_RX_BUF_SKB
) {
545 if (rbi
->skb
== NULL
) {
546 rbi
->skb
= dev_alloc_skb(rbi
->len
+
548 if (unlikely(rbi
->skb
== NULL
)) {
549 rq
->stats
.rx_buf_alloc_failure
++;
552 rbi
->skb
->dev
= adapter
->netdev
;
554 skb_reserve(rbi
->skb
, NET_IP_ALIGN
);
555 rbi
->dma_addr
= pci_map_single(adapter
->pdev
,
556 rbi
->skb
->data
, rbi
->len
,
559 /* rx buffer skipped by the device */
561 val
= VMXNET3_RXD_BTYPE_HEAD
<< VMXNET3_RXD_BTYPE_SHIFT
;
563 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_PAGE
||
564 rbi
->len
!= PAGE_SIZE
);
566 if (rbi
->page
== NULL
) {
567 rbi
->page
= alloc_page(GFP_ATOMIC
);
568 if (unlikely(rbi
->page
== NULL
)) {
569 rq
->stats
.rx_buf_alloc_failure
++;
572 rbi
->dma_addr
= pci_map_page(adapter
->pdev
,
573 rbi
->page
, 0, PAGE_SIZE
,
576 /* rx buffers skipped by the device */
578 val
= VMXNET3_RXD_BTYPE_BODY
<< VMXNET3_RXD_BTYPE_SHIFT
;
581 BUG_ON(rbi
->dma_addr
== 0);
582 gd
->rxd
.addr
= cpu_to_le64(rbi
->dma_addr
);
583 gd
->dword
[2] = cpu_to_le32((ring
->gen
<< VMXNET3_RXD_GEN_SHIFT
)
587 vmxnet3_cmd_ring_adv_next2fill(ring
);
589 rq
->uncommitted
[ring_idx
] += num_allocated
;
591 dev_dbg(&adapter
->netdev
->dev
,
592 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
593 "%u, uncommited %u\n", num_allocated
, ring
->next2fill
,
594 ring
->next2comp
, rq
->uncommitted
[ring_idx
]);
596 /* so that the device can distinguish a full ring and an empty ring */
597 BUG_ON(num_allocated
!= 0 && ring
->next2fill
== ring
->next2comp
);
599 return num_allocated
;
604 vmxnet3_append_frag(struct sk_buff
*skb
, struct Vmxnet3_RxCompDesc
*rcd
,
605 struct vmxnet3_rx_buf_info
*rbi
)
607 struct skb_frag_struct
*frag
= skb_shinfo(skb
)->frags
+
608 skb_shinfo(skb
)->nr_frags
;
610 BUG_ON(skb_shinfo(skb
)->nr_frags
>= MAX_SKB_FRAGS
);
612 frag
->page
= rbi
->page
;
613 frag
->page_offset
= 0;
614 frag
->size
= rcd
->len
;
615 skb
->data_len
+= frag
->size
;
616 skb_shinfo(skb
)->nr_frags
++;
621 vmxnet3_map_pkt(struct sk_buff
*skb
, struct vmxnet3_tx_ctx
*ctx
,
622 struct vmxnet3_tx_queue
*tq
, struct pci_dev
*pdev
,
623 struct vmxnet3_adapter
*adapter
)
626 unsigned long buf_offset
;
628 union Vmxnet3_GenericDesc
*gdesc
;
629 struct vmxnet3_tx_buf_info
*tbi
= NULL
;
631 BUG_ON(ctx
->copy_size
> skb_headlen(skb
));
633 /* use the previous gen bit for the SOP desc */
634 dw2
= (tq
->tx_ring
.gen
^ 0x1) << VMXNET3_TXD_GEN_SHIFT
;
636 ctx
->sop_txd
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
637 gdesc
= ctx
->sop_txd
; /* both loops below can be skipped */
639 /* no need to map the buffer if headers are copied */
640 if (ctx
->copy_size
) {
641 ctx
->sop_txd
->txd
.addr
= cpu_to_le64(tq
->data_ring
.basePA
+
642 tq
->tx_ring
.next2fill
*
643 sizeof(struct Vmxnet3_TxDataDesc
));
644 ctx
->sop_txd
->dword
[2] = cpu_to_le32(dw2
| ctx
->copy_size
);
645 ctx
->sop_txd
->dword
[3] = 0;
647 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
648 tbi
->map_type
= VMXNET3_MAP_NONE
;
650 dev_dbg(&adapter
->netdev
->dev
,
651 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
652 tq
->tx_ring
.next2fill
,
653 le64_to_cpu(ctx
->sop_txd
->txd
.addr
),
654 ctx
->sop_txd
->dword
[2], ctx
->sop_txd
->dword
[3]);
655 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
657 /* use the right gen for non-SOP desc */
658 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
661 /* linear part can use multiple tx desc if it's big */
662 len
= skb_headlen(skb
) - ctx
->copy_size
;
663 buf_offset
= ctx
->copy_size
;
667 if (len
< VMXNET3_MAX_TX_BUF_SIZE
) {
671 buf_size
= VMXNET3_MAX_TX_BUF_SIZE
;
672 /* spec says that for TxDesc.len, 0 == 2^14 */
675 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
676 tbi
->map_type
= VMXNET3_MAP_SINGLE
;
677 tbi
->dma_addr
= pci_map_single(adapter
->pdev
,
678 skb
->data
+ buf_offset
, buf_size
,
683 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
684 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
686 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
687 gdesc
->dword
[2] = cpu_to_le32(dw2
);
690 dev_dbg(&adapter
->netdev
->dev
,
691 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
692 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
693 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
694 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
695 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
698 buf_offset
+= buf_size
;
701 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
702 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
704 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
705 tbi
->map_type
= VMXNET3_MAP_PAGE
;
706 tbi
->dma_addr
= pci_map_page(adapter
->pdev
, frag
->page
,
707 frag
->page_offset
, frag
->size
,
710 tbi
->len
= frag
->size
;
712 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
713 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
715 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
716 gdesc
->dword
[2] = cpu_to_le32(dw2
| frag
->size
);
719 dev_dbg(&adapter
->netdev
->dev
,
720 "txd[%u]: 0x%llu %u %u\n",
721 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
722 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
723 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
724 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
727 ctx
->eop_txd
= gdesc
;
729 /* set the last buf_info for the pkt */
731 tbi
->sop_idx
= ctx
->sop_txd
- tq
->tx_ring
.base
;
736 * parse and copy relevant protocol headers:
737 * For a tso pkt, relevant headers are L2/3/4 including options
738 * For a pkt requesting csum offloading, they are L2/3 and may include L4
739 * if it's a TCP/UDP pkt
742 * -1: error happens during parsing
743 * 0: protocol headers parsed, but too big to be copied
744 * 1: protocol headers parsed and copied
747 * 1. related *ctx fields are updated.
748 * 2. ctx->copy_size is # of bytes copied
749 * 3. the portion copied is guaranteed to be in the linear part
753 vmxnet3_parse_and_copy_hdr(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
754 struct vmxnet3_tx_ctx
*ctx
,
755 struct vmxnet3_adapter
*adapter
)
757 struct Vmxnet3_TxDataDesc
*tdd
;
760 ctx
->eth_ip_hdr_size
= skb_transport_offset(skb
);
761 ctx
->l4_hdr_size
= ((struct tcphdr
*)
762 skb_transport_header(skb
))->doff
* 4;
763 ctx
->copy_size
= ctx
->eth_ip_hdr_size
+ ctx
->l4_hdr_size
;
765 unsigned int pull_size
;
767 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
768 ctx
->eth_ip_hdr_size
= skb_transport_offset(skb
);
771 struct iphdr
*iph
= (struct iphdr
*)
772 skb_network_header(skb
);
773 if (iph
->protocol
== IPPROTO_TCP
) {
774 pull_size
= ctx
->eth_ip_hdr_size
+
775 sizeof(struct tcphdr
);
777 if (unlikely(!pskb_may_pull(skb
,
781 ctx
->l4_hdr_size
= ((struct tcphdr
*)
782 skb_transport_header(skb
))->doff
* 4;
783 } else if (iph
->protocol
== IPPROTO_UDP
) {
785 sizeof(struct udphdr
);
787 ctx
->l4_hdr_size
= 0;
790 /* for simplicity, don't copy L4 headers */
791 ctx
->l4_hdr_size
= 0;
793 ctx
->copy_size
= ctx
->eth_ip_hdr_size
+
796 ctx
->eth_ip_hdr_size
= 0;
797 ctx
->l4_hdr_size
= 0;
798 /* copy as much as allowed */
799 ctx
->copy_size
= min((unsigned int)VMXNET3_HDR_COPY_SIZE
803 /* make sure headers are accessible directly */
804 if (unlikely(!pskb_may_pull(skb
, ctx
->copy_size
)))
808 if (unlikely(ctx
->copy_size
> VMXNET3_HDR_COPY_SIZE
)) {
809 tq
->stats
.oversized_hdr
++;
814 tdd
= tq
->data_ring
.base
+ tq
->tx_ring
.next2fill
;
816 memcpy(tdd
->data
, skb
->data
, ctx
->copy_size
);
817 dev_dbg(&adapter
->netdev
->dev
,
818 "copy %u bytes to dataRing[%u]\n",
819 ctx
->copy_size
, tq
->tx_ring
.next2fill
);
828 vmxnet3_prepare_tso(struct sk_buff
*skb
,
829 struct vmxnet3_tx_ctx
*ctx
)
831 struct tcphdr
*tcph
= (struct tcphdr
*)skb_transport_header(skb
);
833 struct iphdr
*iph
= (struct iphdr
*)skb_network_header(skb
);
835 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
838 struct ipv6hdr
*iph
= (struct ipv6hdr
*)skb_network_header(skb
);
839 tcph
->check
= ~csum_ipv6_magic(&iph
->saddr
, &iph
->daddr
, 0,
846 * Transmits a pkt thru a given tq
848 * NETDEV_TX_OK: descriptors are setup successfully
849 * NETDEV_TX_OK: error occured, the pkt is dropped
850 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
853 * 1. tx ring may be changed
854 * 2. tq stats may be updated accordingly
855 * 3. shared->txNumDeferred may be updated
859 vmxnet3_tq_xmit(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
860 struct vmxnet3_adapter
*adapter
, struct net_device
*netdev
)
865 struct vmxnet3_tx_ctx ctx
;
866 union Vmxnet3_GenericDesc
*gdesc
;
867 #ifdef __BIG_ENDIAN_BITFIELD
868 /* Use temporary descriptor to avoid touching bits multiple times */
869 union Vmxnet3_GenericDesc tempTxDesc
;
872 /* conservatively estimate # of descriptors to use */
873 count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) +
874 skb_shinfo(skb
)->nr_frags
+ 1;
876 ctx
.ipv4
= (skb
->protocol
== cpu_to_be16(ETH_P_IP
));
878 ctx
.mss
= skb_shinfo(skb
)->gso_size
;
880 if (skb_header_cloned(skb
)) {
881 if (unlikely(pskb_expand_head(skb
, 0, 0,
883 tq
->stats
.drop_tso
++;
886 tq
->stats
.copy_skb_header
++;
888 vmxnet3_prepare_tso(skb
, &ctx
);
890 if (unlikely(count
> VMXNET3_MAX_TXD_PER_PKT
)) {
892 /* non-tso pkts must not use more than
893 * VMXNET3_MAX_TXD_PER_PKT entries
895 if (skb_linearize(skb
) != 0) {
896 tq
->stats
.drop_too_many_frags
++;
899 tq
->stats
.linearized
++;
901 /* recalculate the # of descriptors to use */
902 count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) + 1;
906 ret
= vmxnet3_parse_and_copy_hdr(skb
, tq
, &ctx
, adapter
);
908 BUG_ON(ret
<= 0 && ctx
.copy_size
!= 0);
909 /* hdrs parsed, check against other limits */
911 if (unlikely(ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
>
912 VMXNET3_MAX_TX_BUF_SIZE
)) {
916 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
917 if (unlikely(ctx
.eth_ip_hdr_size
+
919 VMXNET3_MAX_CSUM_OFFSET
)) {
925 tq
->stats
.drop_hdr_inspect_err
++;
929 spin_lock_irqsave(&tq
->tx_lock
, flags
);
931 if (count
> vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
)) {
932 tq
->stats
.tx_ring_full
++;
933 dev_dbg(&adapter
->netdev
->dev
,
934 "tx queue stopped on %s, next2comp %u"
935 " next2fill %u\n", adapter
->netdev
->name
,
936 tq
->tx_ring
.next2comp
, tq
->tx_ring
.next2fill
);
938 vmxnet3_tq_stop(tq
, adapter
);
939 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
940 return NETDEV_TX_BUSY
;
943 /* fill tx descs related to addr & len */
944 vmxnet3_map_pkt(skb
, &ctx
, tq
, adapter
->pdev
, adapter
);
946 /* setup the EOP desc */
947 ctx
.eop_txd
->dword
[3] = cpu_to_le32(VMXNET3_TXD_CQ
| VMXNET3_TXD_EOP
);
949 /* setup the SOP desc */
950 #ifdef __BIG_ENDIAN_BITFIELD
952 gdesc
->dword
[2] = ctx
.sop_txd
->dword
[2];
953 gdesc
->dword
[3] = ctx
.sop_txd
->dword
[3];
958 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
;
959 gdesc
->txd
.om
= VMXNET3_OM_TSO
;
960 gdesc
->txd
.msscof
= ctx
.mss
;
961 le32_add_cpu(&tq
->shared
->txNumDeferred
, (skb
->len
-
962 gdesc
->txd
.hlen
+ ctx
.mss
- 1) / ctx
.mss
);
964 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
965 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
;
966 gdesc
->txd
.om
= VMXNET3_OM_CSUM
;
967 gdesc
->txd
.msscof
= ctx
.eth_ip_hdr_size
+
971 gdesc
->txd
.msscof
= 0;
973 le32_add_cpu(&tq
->shared
->txNumDeferred
, 1);
976 if (vlan_tx_tag_present(skb
)) {
978 gdesc
->txd
.tci
= vlan_tx_tag_get(skb
);
981 /* finally flips the GEN bit of the SOP desc. */
982 gdesc
->dword
[2] = cpu_to_le32(le32_to_cpu(gdesc
->dword
[2]) ^
984 #ifdef __BIG_ENDIAN_BITFIELD
985 /* Finished updating in bitfields of Tx Desc, so write them in original
988 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc
*)gdesc
,
989 (struct Vmxnet3_TxDesc
*)ctx
.sop_txd
);
992 dev_dbg(&adapter
->netdev
->dev
,
993 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
994 (u32
)((union Vmxnet3_GenericDesc
*)ctx
.sop_txd
-
995 tq
->tx_ring
.base
), le64_to_cpu(gdesc
->txd
.addr
),
996 le32_to_cpu(gdesc
->dword
[2]), le32_to_cpu(gdesc
->dword
[3]));
998 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1000 if (le32_to_cpu(tq
->shared
->txNumDeferred
) >=
1001 le32_to_cpu(tq
->shared
->txThreshold
)) {
1002 tq
->shared
->txNumDeferred
= 0;
1003 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_TXPROD
,
1004 tq
->tx_ring
.next2fill
);
1007 return NETDEV_TX_OK
;
1010 tq
->stats
.drop_oversized_hdr
++;
1012 tq
->stats
.drop_total
++;
1014 return NETDEV_TX_OK
;
1019 vmxnet3_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1021 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1023 return vmxnet3_tq_xmit(skb
, &adapter
->tx_queue
, adapter
, netdev
);
1028 vmxnet3_rx_csum(struct vmxnet3_adapter
*adapter
,
1029 struct sk_buff
*skb
,
1030 union Vmxnet3_GenericDesc
*gdesc
)
1032 if (!gdesc
->rcd
.cnc
&& adapter
->rxcsum
) {
1033 /* typical case: TCP/UDP over IP and both csums are correct */
1034 if ((le32_to_cpu(gdesc
->dword
[3]) & VMXNET3_RCD_CSUM_OK
) ==
1035 VMXNET3_RCD_CSUM_OK
) {
1036 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1037 BUG_ON(!(gdesc
->rcd
.tcp
|| gdesc
->rcd
.udp
));
1038 BUG_ON(!(gdesc
->rcd
.v4
|| gdesc
->rcd
.v6
));
1039 BUG_ON(gdesc
->rcd
.frg
);
1041 if (gdesc
->rcd
.csum
) {
1042 skb
->csum
= htons(gdesc
->rcd
.csum
);
1043 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1045 skb_checksum_none_assert(skb
);
1049 skb_checksum_none_assert(skb
);
1055 vmxnet3_rx_error(struct vmxnet3_rx_queue
*rq
, struct Vmxnet3_RxCompDesc
*rcd
,
1056 struct vmxnet3_rx_ctx
*ctx
, struct vmxnet3_adapter
*adapter
)
1058 rq
->stats
.drop_err
++;
1060 rq
->stats
.drop_fcs
++;
1062 rq
->stats
.drop_total
++;
1065 * We do not unmap and chain the rx buffer to the skb.
1066 * We basically pretend this buffer is not used and will be recycled
1067 * by vmxnet3_rq_alloc_rx_buf()
1071 * ctx->skb may be NULL if this is the first and the only one
1075 dev_kfree_skb_irq(ctx
->skb
);
1082 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue
*rq
,
1083 struct vmxnet3_adapter
*adapter
, int quota
)
1085 static u32 rxprod_reg
[2] = {VMXNET3_REG_RXPROD
, VMXNET3_REG_RXPROD2
};
1087 struct Vmxnet3_RxCompDesc
*rcd
;
1088 struct vmxnet3_rx_ctx
*ctx
= &rq
->rx_ctx
;
1089 #ifdef __BIG_ENDIAN_BITFIELD
1090 struct Vmxnet3_RxDesc rxCmdDesc
;
1091 struct Vmxnet3_RxCompDesc rxComp
;
1093 vmxnet3_getRxComp(rcd
, &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
,
1095 while (rcd
->gen
== rq
->comp_ring
.gen
) {
1096 struct vmxnet3_rx_buf_info
*rbi
;
1097 struct sk_buff
*skb
;
1099 struct Vmxnet3_RxDesc
*rxd
;
1102 if (num_rxd
>= quota
) {
1103 /* we may stop even before we see the EOP desc of
1111 ring_idx
= rcd
->rqID
== rq
->qid
? 0 : 1;
1112 vmxnet3_getRxDesc(rxd
, &rq
->rx_ring
[ring_idx
].base
[idx
].rxd
,
1114 rbi
= rq
->buf_info
[ring_idx
] + idx
;
1116 BUG_ON(rxd
->addr
!= rbi
->dma_addr
||
1117 rxd
->len
!= rbi
->len
);
1119 if (unlikely(rcd
->eop
&& rcd
->err
)) {
1120 vmxnet3_rx_error(rq
, rcd
, ctx
, adapter
);
1124 if (rcd
->sop
) { /* first buf of the pkt */
1125 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_HEAD
||
1126 rcd
->rqID
!= rq
->qid
);
1128 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_SKB
);
1129 BUG_ON(ctx
->skb
!= NULL
|| rbi
->skb
== NULL
);
1131 if (unlikely(rcd
->len
== 0)) {
1132 /* Pretend the rx buffer is skipped. */
1133 BUG_ON(!(rcd
->sop
&& rcd
->eop
));
1134 dev_dbg(&adapter
->netdev
->dev
,
1135 "rxRing[%u][%u] 0 length\n",
1140 ctx
->skb
= rbi
->skb
;
1143 pci_unmap_single(adapter
->pdev
, rbi
->dma_addr
, rbi
->len
,
1144 PCI_DMA_FROMDEVICE
);
1146 skb_put(ctx
->skb
, rcd
->len
);
1148 BUG_ON(ctx
->skb
== NULL
);
1149 /* non SOP buffer must be type 1 in most cases */
1150 if (rbi
->buf_type
== VMXNET3_RX_BUF_PAGE
) {
1151 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_BODY
);
1154 pci_unmap_page(adapter
->pdev
,
1155 rbi
->dma_addr
, rbi
->len
,
1156 PCI_DMA_FROMDEVICE
);
1158 vmxnet3_append_frag(ctx
->skb
, rcd
, rbi
);
1163 * The only time a non-SOP buffer is type 0 is
1164 * when it's EOP and error flag is raised, which
1165 * has already been handled.
1173 skb
->len
+= skb
->data_len
;
1174 skb
->truesize
+= skb
->data_len
;
1176 vmxnet3_rx_csum(adapter
, skb
,
1177 (union Vmxnet3_GenericDesc
*)rcd
);
1178 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
1180 if (unlikely(adapter
->vlan_grp
&& rcd
->ts
)) {
1181 vlan_hwaccel_receive_skb(skb
,
1182 adapter
->vlan_grp
, rcd
->tci
);
1184 netif_receive_skb(skb
);
1191 /* device may skip some rx descs */
1192 rq
->rx_ring
[ring_idx
].next2comp
= idx
;
1193 VMXNET3_INC_RING_IDX_ONLY(rq
->rx_ring
[ring_idx
].next2comp
,
1194 rq
->rx_ring
[ring_idx
].size
);
1196 /* refill rx buffers frequently to avoid starving the h/w */
1197 num_to_alloc
= vmxnet3_cmd_ring_desc_avail(rq
->rx_ring
+
1199 if (unlikely(num_to_alloc
> VMXNET3_RX_ALLOC_THRESHOLD(rq
,
1200 ring_idx
, adapter
))) {
1201 vmxnet3_rq_alloc_rx_buf(rq
, ring_idx
, num_to_alloc
,
1204 /* if needed, update the register */
1205 if (unlikely(rq
->shared
->updateRxProd
)) {
1206 VMXNET3_WRITE_BAR0_REG(adapter
,
1207 rxprod_reg
[ring_idx
] + rq
->qid
* 8,
1208 rq
->rx_ring
[ring_idx
].next2fill
);
1209 rq
->uncommitted
[ring_idx
] = 0;
1213 vmxnet3_comp_ring_adv_next2proc(&rq
->comp_ring
);
1214 vmxnet3_getRxComp(rcd
,
1215 &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
, &rxComp
);
1223 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue
*rq
,
1224 struct vmxnet3_adapter
*adapter
)
1227 struct Vmxnet3_RxDesc
*rxd
;
1229 for (ring_idx
= 0; ring_idx
< 2; ring_idx
++) {
1230 for (i
= 0; i
< rq
->rx_ring
[ring_idx
].size
; i
++) {
1231 #ifdef __BIG_ENDIAN_BITFIELD
1232 struct Vmxnet3_RxDesc rxDesc
;
1234 vmxnet3_getRxDesc(rxd
,
1235 &rq
->rx_ring
[ring_idx
].base
[i
].rxd
, &rxDesc
);
1237 if (rxd
->btype
== VMXNET3_RXD_BTYPE_HEAD
&&
1238 rq
->buf_info
[ring_idx
][i
].skb
) {
1239 pci_unmap_single(adapter
->pdev
, rxd
->addr
,
1240 rxd
->len
, PCI_DMA_FROMDEVICE
);
1241 dev_kfree_skb(rq
->buf_info
[ring_idx
][i
].skb
);
1242 rq
->buf_info
[ring_idx
][i
].skb
= NULL
;
1243 } else if (rxd
->btype
== VMXNET3_RXD_BTYPE_BODY
&&
1244 rq
->buf_info
[ring_idx
][i
].page
) {
1245 pci_unmap_page(adapter
->pdev
, rxd
->addr
,
1246 rxd
->len
, PCI_DMA_FROMDEVICE
);
1247 put_page(rq
->buf_info
[ring_idx
][i
].page
);
1248 rq
->buf_info
[ring_idx
][i
].page
= NULL
;
1252 rq
->rx_ring
[ring_idx
].gen
= VMXNET3_INIT_GEN
;
1253 rq
->rx_ring
[ring_idx
].next2fill
=
1254 rq
->rx_ring
[ring_idx
].next2comp
= 0;
1255 rq
->uncommitted
[ring_idx
] = 0;
1258 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1259 rq
->comp_ring
.next2proc
= 0;
1263 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue
*rq
,
1264 struct vmxnet3_adapter
*adapter
)
1269 /* all rx buffers must have already been freed */
1270 for (i
= 0; i
< 2; i
++) {
1271 if (rq
->buf_info
[i
]) {
1272 for (j
= 0; j
< rq
->rx_ring
[i
].size
; j
++)
1273 BUG_ON(rq
->buf_info
[i
][j
].page
!= NULL
);
1278 kfree(rq
->buf_info
[0]);
1280 for (i
= 0; i
< 2; i
++) {
1281 if (rq
->rx_ring
[i
].base
) {
1282 pci_free_consistent(adapter
->pdev
, rq
->rx_ring
[i
].size
1283 * sizeof(struct Vmxnet3_RxDesc
),
1284 rq
->rx_ring
[i
].base
,
1285 rq
->rx_ring
[i
].basePA
);
1286 rq
->rx_ring
[i
].base
= NULL
;
1288 rq
->buf_info
[i
] = NULL
;
1291 if (rq
->comp_ring
.base
) {
1292 pci_free_consistent(adapter
->pdev
, rq
->comp_ring
.size
*
1293 sizeof(struct Vmxnet3_RxCompDesc
),
1294 rq
->comp_ring
.base
, rq
->comp_ring
.basePA
);
1295 rq
->comp_ring
.base
= NULL
;
1301 vmxnet3_rq_init(struct vmxnet3_rx_queue
*rq
,
1302 struct vmxnet3_adapter
*adapter
)
1306 /* initialize buf_info */
1307 for (i
= 0; i
< rq
->rx_ring
[0].size
; i
++) {
1309 /* 1st buf for a pkt is skbuff */
1310 if (i
% adapter
->rx_buf_per_pkt
== 0) {
1311 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_SKB
;
1312 rq
->buf_info
[0][i
].len
= adapter
->skb_buf_size
;
1313 } else { /* subsequent bufs for a pkt is frag */
1314 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1315 rq
->buf_info
[0][i
].len
= PAGE_SIZE
;
1318 for (i
= 0; i
< rq
->rx_ring
[1].size
; i
++) {
1319 rq
->buf_info
[1][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1320 rq
->buf_info
[1][i
].len
= PAGE_SIZE
;
1323 /* reset internal state and allocate buffers for both rings */
1324 for (i
= 0; i
< 2; i
++) {
1325 rq
->rx_ring
[i
].next2fill
= rq
->rx_ring
[i
].next2comp
= 0;
1326 rq
->uncommitted
[i
] = 0;
1328 memset(rq
->rx_ring
[i
].base
, 0, rq
->rx_ring
[i
].size
*
1329 sizeof(struct Vmxnet3_RxDesc
));
1330 rq
->rx_ring
[i
].gen
= VMXNET3_INIT_GEN
;
1332 if (vmxnet3_rq_alloc_rx_buf(rq
, 0, rq
->rx_ring
[0].size
- 1,
1334 /* at least has 1 rx buffer for the 1st ring */
1337 vmxnet3_rq_alloc_rx_buf(rq
, 1, rq
->rx_ring
[1].size
- 1, adapter
);
1339 /* reset the comp ring */
1340 rq
->comp_ring
.next2proc
= 0;
1341 memset(rq
->comp_ring
.base
, 0, rq
->comp_ring
.size
*
1342 sizeof(struct Vmxnet3_RxCompDesc
));
1343 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1346 rq
->rx_ctx
.skb
= NULL
;
1348 /* stats are not reset */
1354 vmxnet3_rq_create(struct vmxnet3_rx_queue
*rq
, struct vmxnet3_adapter
*adapter
)
1358 struct vmxnet3_rx_buf_info
*bi
;
1360 for (i
= 0; i
< 2; i
++) {
1362 sz
= rq
->rx_ring
[i
].size
* sizeof(struct Vmxnet3_RxDesc
);
1363 rq
->rx_ring
[i
].base
= pci_alloc_consistent(adapter
->pdev
, sz
,
1364 &rq
->rx_ring
[i
].basePA
);
1365 if (!rq
->rx_ring
[i
].base
) {
1366 printk(KERN_ERR
"%s: failed to allocate rx ring %d\n",
1367 adapter
->netdev
->name
, i
);
1372 sz
= rq
->comp_ring
.size
* sizeof(struct Vmxnet3_RxCompDesc
);
1373 rq
->comp_ring
.base
= pci_alloc_consistent(adapter
->pdev
, sz
,
1374 &rq
->comp_ring
.basePA
);
1375 if (!rq
->comp_ring
.base
) {
1376 printk(KERN_ERR
"%s: failed to allocate rx comp ring\n",
1377 adapter
->netdev
->name
);
1381 sz
= sizeof(struct vmxnet3_rx_buf_info
) * (rq
->rx_ring
[0].size
+
1382 rq
->rx_ring
[1].size
);
1383 bi
= kzalloc(sz
, GFP_KERNEL
);
1385 printk(KERN_ERR
"%s: failed to allocate rx bufinfo\n",
1386 adapter
->netdev
->name
);
1389 rq
->buf_info
[0] = bi
;
1390 rq
->buf_info
[1] = bi
+ rq
->rx_ring
[0].size
;
1395 vmxnet3_rq_destroy(rq
, adapter
);
1401 vmxnet3_do_poll(struct vmxnet3_adapter
*adapter
, int budget
)
1403 if (unlikely(adapter
->shared
->ecr
))
1404 vmxnet3_process_events(adapter
);
1406 vmxnet3_tq_tx_complete(&adapter
->tx_queue
, adapter
);
1407 return vmxnet3_rq_rx_complete(&adapter
->rx_queue
, adapter
, budget
);
1412 vmxnet3_poll(struct napi_struct
*napi
, int budget
)
1414 struct vmxnet3_adapter
*adapter
= container_of(napi
,
1415 struct vmxnet3_adapter
, napi
);
1418 rxd_done
= vmxnet3_do_poll(adapter
, budget
);
1420 if (rxd_done
< budget
) {
1421 napi_complete(napi
);
1422 vmxnet3_enable_intr(adapter
, 0);
1428 /* Interrupt handler for vmxnet3 */
1430 vmxnet3_intr(int irq
, void *dev_id
)
1432 struct net_device
*dev
= dev_id
;
1433 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1435 if (unlikely(adapter
->intr
.type
== VMXNET3_IT_INTX
)) {
1436 u32 icr
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_ICR
);
1437 if (unlikely(icr
== 0))
1443 /* disable intr if needed */
1444 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1445 vmxnet3_disable_intr(adapter
, 0);
1447 napi_schedule(&adapter
->napi
);
1452 #ifdef CONFIG_NET_POLL_CONTROLLER
1455 /* netpoll callback. */
1457 vmxnet3_netpoll(struct net_device
*netdev
)
1459 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1462 #ifdef CONFIG_PCI_MSI
1463 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
)
1464 irq
= adapter
->intr
.msix_entries
[0].vector
;
1467 irq
= adapter
->pdev
->irq
;
1470 vmxnet3_intr(irq
, netdev
);
1476 vmxnet3_request_irqs(struct vmxnet3_adapter
*adapter
)
1480 #ifdef CONFIG_PCI_MSI
1481 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
1482 /* we only use 1 MSI-X vector */
1483 err
= request_irq(adapter
->intr
.msix_entries
[0].vector
,
1484 vmxnet3_intr
, 0, adapter
->netdev
->name
,
1486 } else if (adapter
->intr
.type
== VMXNET3_IT_MSI
) {
1487 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
, 0,
1488 adapter
->netdev
->name
, adapter
->netdev
);
1492 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
,
1493 IRQF_SHARED
, adapter
->netdev
->name
,
1498 printk(KERN_ERR
"Failed to request irq %s (intr type:%d), error"
1499 ":%d\n", adapter
->netdev
->name
, adapter
->intr
.type
, err
);
1504 /* init our intr settings */
1505 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
1506 adapter
->intr
.mod_levels
[i
] = UPT1_IML_ADAPTIVE
;
1508 /* next setup intr index for all intr sources */
1509 adapter
->tx_queue
.comp_ring
.intr_idx
= 0;
1510 adapter
->rx_queue
.comp_ring
.intr_idx
= 0;
1511 adapter
->intr
.event_intr_idx
= 0;
1513 printk(KERN_INFO
"%s: intr type %u, mode %u, %u vectors "
1514 "allocated\n", adapter
->netdev
->name
, adapter
->intr
.type
,
1515 adapter
->intr
.mask_mode
, adapter
->intr
.num_intrs
);
1523 vmxnet3_free_irqs(struct vmxnet3_adapter
*adapter
)
1525 BUG_ON(adapter
->intr
.type
== VMXNET3_IT_AUTO
||
1526 adapter
->intr
.num_intrs
<= 0);
1528 switch (adapter
->intr
.type
) {
1529 #ifdef CONFIG_PCI_MSI
1530 case VMXNET3_IT_MSIX
:
1534 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
1535 free_irq(adapter
->intr
.msix_entries
[i
].vector
,
1540 case VMXNET3_IT_MSI
:
1541 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
1543 case VMXNET3_IT_INTX
:
1544 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
1552 vmxnet3_vlan_rx_register(struct net_device
*netdev
, struct vlan_group
*grp
)
1554 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1555 struct Vmxnet3_DriverShared
*shared
= adapter
->shared
;
1556 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1559 /* add vlan rx stripping. */
1560 if (adapter
->netdev
->features
& NETIF_F_HW_VLAN_RX
) {
1562 struct Vmxnet3_DSDevRead
*devRead
= &shared
->devRead
;
1563 adapter
->vlan_grp
= grp
;
1565 /* update FEATURES to device */
1566 devRead
->misc
.uptFeatures
|= UPT1_F_RXVLAN
;
1567 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1568 VMXNET3_CMD_UPDATE_FEATURE
);
1570 * Clear entire vfTable; then enable untagged pkts.
1571 * Note: setting one entry in vfTable to non-zero turns
1572 * on VLAN rx filtering.
1574 for (i
= 0; i
< VMXNET3_VFT_SIZE
; i
++)
1577 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, 0);
1578 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1579 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1581 printk(KERN_ERR
"%s: vlan_rx_register when device has "
1582 "no NETIF_F_HW_VLAN_RX\n", netdev
->name
);
1585 /* remove vlan rx stripping. */
1586 struct Vmxnet3_DSDevRead
*devRead
= &shared
->devRead
;
1587 adapter
->vlan_grp
= NULL
;
1589 if (devRead
->misc
.uptFeatures
& UPT1_F_RXVLAN
) {
1592 for (i
= 0; i
< VMXNET3_VFT_SIZE
; i
++) {
1593 /* clear entire vfTable; this also disables
1598 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1599 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1601 /* update FEATURES to device */
1602 devRead
->misc
.uptFeatures
&= ~UPT1_F_RXVLAN
;
1603 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1604 VMXNET3_CMD_UPDATE_FEATURE
);
1611 vmxnet3_restore_vlan(struct vmxnet3_adapter
*adapter
)
1613 if (adapter
->vlan_grp
) {
1615 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1616 bool activeVlan
= false;
1618 for (vid
= 0; vid
< VLAN_N_VID
; vid
++) {
1619 if (vlan_group_get_device(adapter
->vlan_grp
, vid
)) {
1620 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
1625 /* continue to allow untagged pkts */
1626 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, 0);
1633 vmxnet3_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1635 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1636 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1638 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
1639 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1640 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1645 vmxnet3_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1647 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1648 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1650 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable
, vid
);
1651 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1652 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1657 vmxnet3_copy_mc(struct net_device
*netdev
)
1660 u32 sz
= netdev_mc_count(netdev
) * ETH_ALEN
;
1662 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1664 /* We may be called with BH disabled */
1665 buf
= kmalloc(sz
, GFP_ATOMIC
);
1667 struct netdev_hw_addr
*ha
;
1670 netdev_for_each_mc_addr(ha
, netdev
)
1671 memcpy(buf
+ i
++ * ETH_ALEN
, ha
->addr
,
1680 vmxnet3_set_mc(struct net_device
*netdev
)
1682 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1683 struct Vmxnet3_RxFilterConf
*rxConf
=
1684 &adapter
->shared
->devRead
.rxFilterConf
;
1685 u8
*new_table
= NULL
;
1686 u32 new_mode
= VMXNET3_RXM_UCAST
;
1688 if (netdev
->flags
& IFF_PROMISC
)
1689 new_mode
|= VMXNET3_RXM_PROMISC
;
1691 if (netdev
->flags
& IFF_BROADCAST
)
1692 new_mode
|= VMXNET3_RXM_BCAST
;
1694 if (netdev
->flags
& IFF_ALLMULTI
)
1695 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
1697 if (!netdev_mc_empty(netdev
)) {
1698 new_table
= vmxnet3_copy_mc(netdev
);
1700 new_mode
|= VMXNET3_RXM_MCAST
;
1701 rxConf
->mfTableLen
= cpu_to_le16(
1702 netdev_mc_count(netdev
) * ETH_ALEN
);
1703 rxConf
->mfTablePA
= cpu_to_le64(virt_to_phys(
1706 printk(KERN_INFO
"%s: failed to copy mcast list"
1707 ", setting ALL_MULTI\n", netdev
->name
);
1708 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
1713 if (!(new_mode
& VMXNET3_RXM_MCAST
)) {
1714 rxConf
->mfTableLen
= 0;
1715 rxConf
->mfTablePA
= 0;
1718 if (new_mode
!= rxConf
->rxMode
) {
1719 rxConf
->rxMode
= cpu_to_le32(new_mode
);
1720 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1721 VMXNET3_CMD_UPDATE_RX_MODE
);
1724 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1725 VMXNET3_CMD_UPDATE_MAC_FILTERS
);
1732 * Set up driver_shared based on settings in adapter.
1736 vmxnet3_setup_driver_shared(struct vmxnet3_adapter
*adapter
)
1738 struct Vmxnet3_DriverShared
*shared
= adapter
->shared
;
1739 struct Vmxnet3_DSDevRead
*devRead
= &shared
->devRead
;
1740 struct Vmxnet3_TxQueueConf
*tqc
;
1741 struct Vmxnet3_RxQueueConf
*rqc
;
1744 memset(shared
, 0, sizeof(*shared
));
1746 /* driver settings */
1747 shared
->magic
= cpu_to_le32(VMXNET3_REV1_MAGIC
);
1748 devRead
->misc
.driverInfo
.version
= cpu_to_le32(
1749 VMXNET3_DRIVER_VERSION_NUM
);
1750 devRead
->misc
.driverInfo
.gos
.gosBits
= (sizeof(void *) == 4 ?
1751 VMXNET3_GOS_BITS_32
: VMXNET3_GOS_BITS_64
);
1752 devRead
->misc
.driverInfo
.gos
.gosType
= VMXNET3_GOS_TYPE_LINUX
;
1753 *((u32
*)&devRead
->misc
.driverInfo
.gos
) = cpu_to_le32(
1754 *((u32
*)&devRead
->misc
.driverInfo
.gos
));
1755 devRead
->misc
.driverInfo
.vmxnet3RevSpt
= cpu_to_le32(1);
1756 devRead
->misc
.driverInfo
.uptVerSpt
= cpu_to_le32(1);
1758 devRead
->misc
.ddPA
= cpu_to_le64(virt_to_phys(adapter
));
1759 devRead
->misc
.ddLen
= cpu_to_le32(sizeof(struct vmxnet3_adapter
));
1761 /* set up feature flags */
1762 if (adapter
->rxcsum
)
1763 devRead
->misc
.uptFeatures
|= UPT1_F_RXCSUM
;
1766 devRead
->misc
.uptFeatures
|= UPT1_F_LRO
;
1767 devRead
->misc
.maxNumRxSG
= cpu_to_le16(1 + MAX_SKB_FRAGS
);
1769 if ((adapter
->netdev
->features
& NETIF_F_HW_VLAN_RX
) &&
1770 adapter
->vlan_grp
) {
1771 devRead
->misc
.uptFeatures
|= UPT1_F_RXVLAN
;
1774 devRead
->misc
.mtu
= cpu_to_le32(adapter
->netdev
->mtu
);
1775 devRead
->misc
.queueDescPA
= cpu_to_le64(adapter
->queue_desc_pa
);
1776 devRead
->misc
.queueDescLen
= cpu_to_le32(
1777 sizeof(struct Vmxnet3_TxQueueDesc
) +
1778 sizeof(struct Vmxnet3_RxQueueDesc
));
1780 /* tx queue settings */
1781 BUG_ON(adapter
->tx_queue
.tx_ring
.base
== NULL
);
1783 devRead
->misc
.numTxQueues
= 1;
1784 tqc
= &adapter
->tqd_start
->conf
;
1785 tqc
->txRingBasePA
= cpu_to_le64(adapter
->tx_queue
.tx_ring
.basePA
);
1786 tqc
->dataRingBasePA
= cpu_to_le64(adapter
->tx_queue
.data_ring
.basePA
);
1787 tqc
->compRingBasePA
= cpu_to_le64(adapter
->tx_queue
.comp_ring
.basePA
);
1788 tqc
->ddPA
= cpu_to_le64(virt_to_phys(
1789 adapter
->tx_queue
.buf_info
));
1790 tqc
->txRingSize
= cpu_to_le32(adapter
->tx_queue
.tx_ring
.size
);
1791 tqc
->dataRingSize
= cpu_to_le32(adapter
->tx_queue
.data_ring
.size
);
1792 tqc
->compRingSize
= cpu_to_le32(adapter
->tx_queue
.comp_ring
.size
);
1793 tqc
->ddLen
= cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info
) *
1795 tqc
->intrIdx
= adapter
->tx_queue
.comp_ring
.intr_idx
;
1797 /* rx queue settings */
1798 devRead
->misc
.numRxQueues
= 1;
1799 rqc
= &adapter
->rqd_start
->conf
;
1800 rqc
->rxRingBasePA
[0] = cpu_to_le64(adapter
->rx_queue
.rx_ring
[0].basePA
);
1801 rqc
->rxRingBasePA
[1] = cpu_to_le64(adapter
->rx_queue
.rx_ring
[1].basePA
);
1802 rqc
->compRingBasePA
= cpu_to_le64(adapter
->rx_queue
.comp_ring
.basePA
);
1803 rqc
->ddPA
= cpu_to_le64(virt_to_phys(
1804 adapter
->rx_queue
.buf_info
));
1805 rqc
->rxRingSize
[0] = cpu_to_le32(adapter
->rx_queue
.rx_ring
[0].size
);
1806 rqc
->rxRingSize
[1] = cpu_to_le32(adapter
->rx_queue
.rx_ring
[1].size
);
1807 rqc
->compRingSize
= cpu_to_le32(adapter
->rx_queue
.comp_ring
.size
);
1808 rqc
->ddLen
= cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info
) *
1809 (rqc
->rxRingSize
[0] + rqc
->rxRingSize
[1]));
1810 rqc
->intrIdx
= adapter
->rx_queue
.comp_ring
.intr_idx
;
1813 devRead
->intrConf
.autoMask
= adapter
->intr
.mask_mode
==
1815 devRead
->intrConf
.numIntrs
= adapter
->intr
.num_intrs
;
1816 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
1817 devRead
->intrConf
.modLevels
[i
] = adapter
->intr
.mod_levels
[i
];
1819 devRead
->intrConf
.eventIntrIdx
= adapter
->intr
.event_intr_idx
;
1820 devRead
->intrConf
.intrCtrl
|= cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
1822 /* rx filter settings */
1823 devRead
->rxFilterConf
.rxMode
= 0;
1824 vmxnet3_restore_vlan(adapter
);
1825 /* the rest are already zeroed */
1830 vmxnet3_activate_dev(struct vmxnet3_adapter
*adapter
)
1835 dev_dbg(&adapter
->netdev
->dev
,
1836 "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
1837 " %u %u %u\n", adapter
->netdev
->name
, adapter
->skb_buf_size
,
1838 adapter
->rx_buf_per_pkt
, adapter
->tx_queue
.tx_ring
.size
,
1839 adapter
->rx_queue
.rx_ring
[0].size
,
1840 adapter
->rx_queue
.rx_ring
[1].size
);
1842 vmxnet3_tq_init(&adapter
->tx_queue
, adapter
);
1843 err
= vmxnet3_rq_init(&adapter
->rx_queue
, adapter
);
1845 printk(KERN_ERR
"Failed to init rx queue for %s: error %d\n",
1846 adapter
->netdev
->name
, err
);
1850 err
= vmxnet3_request_irqs(adapter
);
1852 printk(KERN_ERR
"Failed to setup irq for %s: error %d\n",
1853 adapter
->netdev
->name
, err
);
1857 vmxnet3_setup_driver_shared(adapter
);
1859 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, VMXNET3_GET_ADDR_LO(
1860 adapter
->shared_pa
));
1861 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, VMXNET3_GET_ADDR_HI(
1862 adapter
->shared_pa
));
1863 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1864 VMXNET3_CMD_ACTIVATE_DEV
);
1865 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
1868 printk(KERN_ERR
"Failed to activate dev %s: error %u\n",
1869 adapter
->netdev
->name
, ret
);
1873 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_RXPROD
,
1874 adapter
->rx_queue
.rx_ring
[0].next2fill
);
1875 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_RXPROD2
,
1876 adapter
->rx_queue
.rx_ring
[1].next2fill
);
1878 /* Apply the rx filter settins last. */
1879 vmxnet3_set_mc(adapter
->netdev
);
1882 * Check link state when first activating device. It will start the
1883 * tx queue if the link is up.
1885 vmxnet3_check_link(adapter
, true);
1887 napi_enable(&adapter
->napi
);
1888 vmxnet3_enable_all_intrs(adapter
);
1889 clear_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
1893 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, 0);
1894 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, 0);
1895 vmxnet3_free_irqs(adapter
);
1898 /* free up buffers we allocated */
1899 vmxnet3_rq_cleanup(&adapter
->rx_queue
, adapter
);
1905 vmxnet3_reset_dev(struct vmxnet3_adapter
*adapter
)
1907 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_RESET_DEV
);
1912 vmxnet3_quiesce_dev(struct vmxnet3_adapter
*adapter
)
1914 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
))
1918 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1919 VMXNET3_CMD_QUIESCE_DEV
);
1920 vmxnet3_disable_all_intrs(adapter
);
1922 napi_disable(&adapter
->napi
);
1923 netif_tx_disable(adapter
->netdev
);
1924 adapter
->link_speed
= 0;
1925 netif_carrier_off(adapter
->netdev
);
1927 vmxnet3_tq_cleanup(&adapter
->tx_queue
, adapter
);
1928 vmxnet3_rq_cleanup(&adapter
->rx_queue
, adapter
);
1929 vmxnet3_free_irqs(adapter
);
1935 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
1940 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACL
, tmp
);
1942 tmp
= (mac
[5] << 8) | mac
[4];
1943 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACH
, tmp
);
1948 vmxnet3_set_mac_addr(struct net_device
*netdev
, void *p
)
1950 struct sockaddr
*addr
= p
;
1951 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1953 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1954 vmxnet3_write_mac_addr(adapter
, addr
->sa_data
);
1960 /* ==================== initialization and cleanup routines ============ */
1963 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter
*adapter
, bool *dma64
)
1966 unsigned long mmio_start
, mmio_len
;
1967 struct pci_dev
*pdev
= adapter
->pdev
;
1969 err
= pci_enable_device(pdev
);
1971 printk(KERN_ERR
"Failed to enable adapter %s: error %d\n",
1972 pci_name(pdev
), err
);
1976 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
1977 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
1978 printk(KERN_ERR
"pci_set_consistent_dma_mask failed "
1979 "for adapter %s\n", pci_name(pdev
));
1985 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
1986 printk(KERN_ERR
"pci_set_dma_mask failed for adapter "
1987 "%s\n", pci_name(pdev
));
1994 err
= pci_request_selected_regions(pdev
, (1 << 2) - 1,
1995 vmxnet3_driver_name
);
1997 printk(KERN_ERR
"Failed to request region for adapter %s: "
1998 "error %d\n", pci_name(pdev
), err
);
2002 pci_set_master(pdev
);
2004 mmio_start
= pci_resource_start(pdev
, 0);
2005 mmio_len
= pci_resource_len(pdev
, 0);
2006 adapter
->hw_addr0
= ioremap(mmio_start
, mmio_len
);
2007 if (!adapter
->hw_addr0
) {
2008 printk(KERN_ERR
"Failed to map bar0 for adapter %s\n",
2014 mmio_start
= pci_resource_start(pdev
, 1);
2015 mmio_len
= pci_resource_len(pdev
, 1);
2016 adapter
->hw_addr1
= ioremap(mmio_start
, mmio_len
);
2017 if (!adapter
->hw_addr1
) {
2018 printk(KERN_ERR
"Failed to map bar1 for adapter %s\n",
2026 iounmap(adapter
->hw_addr0
);
2028 pci_release_selected_regions(pdev
, (1 << 2) - 1);
2030 pci_disable_device(pdev
);
2036 vmxnet3_free_pci_resources(struct vmxnet3_adapter
*adapter
)
2038 BUG_ON(!adapter
->pdev
);
2040 iounmap(adapter
->hw_addr0
);
2041 iounmap(adapter
->hw_addr1
);
2042 pci_release_selected_regions(adapter
->pdev
, (1 << 2) - 1);
2043 pci_disable_device(adapter
->pdev
);
2048 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter
*adapter
)
2052 if (adapter
->netdev
->mtu
<= VMXNET3_MAX_SKB_BUF_SIZE
-
2053 VMXNET3_MAX_ETH_HDR_SIZE
) {
2054 adapter
->skb_buf_size
= adapter
->netdev
->mtu
+
2055 VMXNET3_MAX_ETH_HDR_SIZE
;
2056 if (adapter
->skb_buf_size
< VMXNET3_MIN_T0_BUF_SIZE
)
2057 adapter
->skb_buf_size
= VMXNET3_MIN_T0_BUF_SIZE
;
2059 adapter
->rx_buf_per_pkt
= 1;
2061 adapter
->skb_buf_size
= VMXNET3_MAX_SKB_BUF_SIZE
;
2062 sz
= adapter
->netdev
->mtu
- VMXNET3_MAX_SKB_BUF_SIZE
+
2063 VMXNET3_MAX_ETH_HDR_SIZE
;
2064 adapter
->rx_buf_per_pkt
= 1 + (sz
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
2068 * for simplicity, force the ring0 size to be a multiple of
2069 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2071 sz
= adapter
->rx_buf_per_pkt
* VMXNET3_RING_SIZE_ALIGN
;
2072 adapter
->rx_queue
.rx_ring
[0].size
= (adapter
->rx_queue
.rx_ring
[0].size
+
2074 adapter
->rx_queue
.rx_ring
[0].size
= min_t(u32
,
2075 adapter
->rx_queue
.rx_ring
[0].size
,
2076 VMXNET3_RX_RING_MAX_SIZE
/ sz
* sz
);
2081 vmxnet3_create_queues(struct vmxnet3_adapter
*adapter
, u32 tx_ring_size
,
2082 u32 rx_ring_size
, u32 rx_ring2_size
)
2086 adapter
->tx_queue
.tx_ring
.size
= tx_ring_size
;
2087 adapter
->tx_queue
.data_ring
.size
= tx_ring_size
;
2088 adapter
->tx_queue
.comp_ring
.size
= tx_ring_size
;
2089 adapter
->tx_queue
.shared
= &adapter
->tqd_start
->ctrl
;
2090 adapter
->tx_queue
.stopped
= true;
2091 err
= vmxnet3_tq_create(&adapter
->tx_queue
, adapter
);
2095 adapter
->rx_queue
.rx_ring
[0].size
= rx_ring_size
;
2096 adapter
->rx_queue
.rx_ring
[1].size
= rx_ring2_size
;
2097 vmxnet3_adjust_rx_ring_size(adapter
);
2098 adapter
->rx_queue
.comp_ring
.size
= adapter
->rx_queue
.rx_ring
[0].size
+
2099 adapter
->rx_queue
.rx_ring
[1].size
;
2100 adapter
->rx_queue
.qid
= 0;
2101 adapter
->rx_queue
.qid2
= 1;
2102 adapter
->rx_queue
.shared
= &adapter
->rqd_start
->ctrl
;
2103 err
= vmxnet3_rq_create(&adapter
->rx_queue
, adapter
);
2105 vmxnet3_tq_destroy(&adapter
->tx_queue
, adapter
);
2111 vmxnet3_open(struct net_device
*netdev
)
2113 struct vmxnet3_adapter
*adapter
;
2116 adapter
= netdev_priv(netdev
);
2118 spin_lock_init(&adapter
->tx_queue
.tx_lock
);
2120 err
= vmxnet3_create_queues(adapter
, VMXNET3_DEF_TX_RING_SIZE
,
2121 VMXNET3_DEF_RX_RING_SIZE
,
2122 VMXNET3_DEF_RX_RING_SIZE
);
2126 err
= vmxnet3_activate_dev(adapter
);
2133 vmxnet3_rq_destroy(&adapter
->rx_queue
, adapter
);
2134 vmxnet3_tq_destroy(&adapter
->tx_queue
, adapter
);
2141 vmxnet3_close(struct net_device
*netdev
)
2143 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2146 * Reset_work may be in the middle of resetting the device, wait for its
2149 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2152 vmxnet3_quiesce_dev(adapter
);
2154 vmxnet3_rq_destroy(&adapter
->rx_queue
, adapter
);
2155 vmxnet3_tq_destroy(&adapter
->tx_queue
, adapter
);
2157 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2165 vmxnet3_force_close(struct vmxnet3_adapter
*adapter
)
2168 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2169 * vmxnet3_close() will deadlock.
2171 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
));
2173 /* we need to enable NAPI, otherwise dev_close will deadlock */
2174 napi_enable(&adapter
->napi
);
2175 dev_close(adapter
->netdev
);
2180 vmxnet3_change_mtu(struct net_device
*netdev
, int new_mtu
)
2182 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2185 if (new_mtu
< VMXNET3_MIN_MTU
|| new_mtu
> VMXNET3_MAX_MTU
)
2188 if (new_mtu
> 1500 && !adapter
->jumbo_frame
)
2191 netdev
->mtu
= new_mtu
;
2194 * Reset_work may be in the middle of resetting the device, wait for its
2197 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2200 if (netif_running(netdev
)) {
2201 vmxnet3_quiesce_dev(adapter
);
2202 vmxnet3_reset_dev(adapter
);
2204 /* we need to re-create the rx queue based on the new mtu */
2205 vmxnet3_rq_destroy(&adapter
->rx_queue
, adapter
);
2206 vmxnet3_adjust_rx_ring_size(adapter
);
2207 adapter
->rx_queue
.comp_ring
.size
=
2208 adapter
->rx_queue
.rx_ring
[0].size
+
2209 adapter
->rx_queue
.rx_ring
[1].size
;
2210 err
= vmxnet3_rq_create(&adapter
->rx_queue
, adapter
);
2212 printk(KERN_ERR
"%s: failed to re-create rx queue,"
2213 " error %d. Closing it.\n", netdev
->name
, err
);
2217 err
= vmxnet3_activate_dev(adapter
);
2219 printk(KERN_ERR
"%s: failed to re-activate, error %d. "
2220 "Closing it\n", netdev
->name
, err
);
2226 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2228 vmxnet3_force_close(adapter
);
2235 vmxnet3_declare_features(struct vmxnet3_adapter
*adapter
, bool dma64
)
2237 struct net_device
*netdev
= adapter
->netdev
;
2239 netdev
->features
= NETIF_F_SG
|
2241 NETIF_F_HW_VLAN_TX
|
2242 NETIF_F_HW_VLAN_RX
|
2243 NETIF_F_HW_VLAN_FILTER
|
2248 printk(KERN_INFO
"features: sg csum vlan jf tso tsoIPv6 lro");
2250 adapter
->rxcsum
= true;
2251 adapter
->jumbo_frame
= true;
2252 adapter
->lro
= true;
2255 netdev
->features
|= NETIF_F_HIGHDMA
;
2259 netdev
->vlan_features
= netdev
->features
;
2265 vmxnet3_read_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
2269 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACL
);
2272 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACH
);
2273 mac
[4] = tmp
& 0xff;
2274 mac
[5] = (tmp
>> 8) & 0xff;
2279 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter
*adapter
)
2284 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2285 VMXNET3_CMD_GET_CONF_INTR
);
2286 cfg
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
2287 adapter
->intr
.type
= cfg
& 0x3;
2288 adapter
->intr
.mask_mode
= (cfg
>> 2) & 0x3;
2290 if (adapter
->intr
.type
== VMXNET3_IT_AUTO
) {
2291 adapter
->intr
.type
= VMXNET3_IT_MSIX
;
2294 #ifdef CONFIG_PCI_MSI
2295 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
2298 adapter
->intr
.msix_entries
[0].entry
= 0;
2299 err
= pci_enable_msix(adapter
->pdev
, adapter
->intr
.msix_entries
,
2300 VMXNET3_LINUX_MAX_MSIX_VECT
);
2302 adapter
->intr
.num_intrs
= 1;
2303 adapter
->intr
.type
= VMXNET3_IT_MSIX
;
2306 adapter
->intr
.type
= VMXNET3_IT_MSI
;
2309 if (adapter
->intr
.type
== VMXNET3_IT_MSI
) {
2311 err
= pci_enable_msi(adapter
->pdev
);
2313 adapter
->intr
.num_intrs
= 1;
2317 #endif /* CONFIG_PCI_MSI */
2319 adapter
->intr
.type
= VMXNET3_IT_INTX
;
2321 /* INT-X related setting */
2322 adapter
->intr
.num_intrs
= 1;
2327 vmxnet3_free_intr_resources(struct vmxnet3_adapter
*adapter
)
2329 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
)
2330 pci_disable_msix(adapter
->pdev
);
2331 else if (adapter
->intr
.type
== VMXNET3_IT_MSI
)
2332 pci_disable_msi(adapter
->pdev
);
2334 BUG_ON(adapter
->intr
.type
!= VMXNET3_IT_INTX
);
2339 vmxnet3_tx_timeout(struct net_device
*netdev
)
2341 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2342 adapter
->tx_timeout_count
++;
2344 printk(KERN_ERR
"%s: tx hang\n", adapter
->netdev
->name
);
2345 schedule_work(&adapter
->work
);
2350 vmxnet3_reset_work(struct work_struct
*data
)
2352 struct vmxnet3_adapter
*adapter
;
2354 adapter
= container_of(data
, struct vmxnet3_adapter
, work
);
2356 /* if another thread is resetting the device, no need to proceed */
2357 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2360 /* if the device is closed, we must leave it alone */
2362 if (netif_running(adapter
->netdev
)) {
2363 printk(KERN_INFO
"%s: resetting\n", adapter
->netdev
->name
);
2364 vmxnet3_quiesce_dev(adapter
);
2365 vmxnet3_reset_dev(adapter
);
2366 vmxnet3_activate_dev(adapter
);
2368 printk(KERN_INFO
"%s: already closed\n", adapter
->netdev
->name
);
2372 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2376 static int __devinit
2377 vmxnet3_probe_device(struct pci_dev
*pdev
,
2378 const struct pci_device_id
*id
)
2380 static const struct net_device_ops vmxnet3_netdev_ops
= {
2381 .ndo_open
= vmxnet3_open
,
2382 .ndo_stop
= vmxnet3_close
,
2383 .ndo_start_xmit
= vmxnet3_xmit_frame
,
2384 .ndo_set_mac_address
= vmxnet3_set_mac_addr
,
2385 .ndo_change_mtu
= vmxnet3_change_mtu
,
2386 .ndo_get_stats
= vmxnet3_get_stats
,
2387 .ndo_tx_timeout
= vmxnet3_tx_timeout
,
2388 .ndo_set_multicast_list
= vmxnet3_set_mc
,
2389 .ndo_vlan_rx_register
= vmxnet3_vlan_rx_register
,
2390 .ndo_vlan_rx_add_vid
= vmxnet3_vlan_rx_add_vid
,
2391 .ndo_vlan_rx_kill_vid
= vmxnet3_vlan_rx_kill_vid
,
2392 #ifdef CONFIG_NET_POLL_CONTROLLER
2393 .ndo_poll_controller
= vmxnet3_netpoll
,
2397 bool dma64
= false; /* stupid gcc */
2399 struct net_device
*netdev
;
2400 struct vmxnet3_adapter
*adapter
;
2403 netdev
= alloc_etherdev(sizeof(struct vmxnet3_adapter
));
2405 printk(KERN_ERR
"Failed to alloc ethernet device for adapter "
2406 "%s\n", pci_name(pdev
));
2410 pci_set_drvdata(pdev
, netdev
);
2411 adapter
= netdev_priv(netdev
);
2412 adapter
->netdev
= netdev
;
2413 adapter
->pdev
= pdev
;
2415 adapter
->shared
= pci_alloc_consistent(adapter
->pdev
,
2416 sizeof(struct Vmxnet3_DriverShared
),
2417 &adapter
->shared_pa
);
2418 if (!adapter
->shared
) {
2419 printk(KERN_ERR
"Failed to allocate memory for %s\n",
2422 goto err_alloc_shared
;
2425 adapter
->tqd_start
= pci_alloc_consistent(adapter
->pdev
,
2426 sizeof(struct Vmxnet3_TxQueueDesc
) +
2427 sizeof(struct Vmxnet3_RxQueueDesc
),
2428 &adapter
->queue_desc_pa
);
2430 if (!adapter
->tqd_start
) {
2431 printk(KERN_ERR
"Failed to allocate memory for %s\n",
2434 goto err_alloc_queue_desc
;
2436 adapter
->rqd_start
= (struct Vmxnet3_RxQueueDesc
*)(adapter
->tqd_start
2439 adapter
->pm_conf
= kmalloc(sizeof(struct Vmxnet3_PMConf
), GFP_KERNEL
);
2440 if (adapter
->pm_conf
== NULL
) {
2441 printk(KERN_ERR
"Failed to allocate memory for %s\n",
2447 err
= vmxnet3_alloc_pci_resources(adapter
, &dma64
);
2451 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_VRRS
);
2453 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_VRRS
, 1);
2455 printk(KERN_ERR
"Incompatible h/w version (0x%x) for adapter"
2456 " %s\n", ver
, pci_name(pdev
));
2461 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_UVRS
);
2463 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_UVRS
, 1);
2465 printk(KERN_ERR
"Incompatible upt version (0x%x) for "
2466 "adapter %s\n", ver
, pci_name(pdev
));
2471 vmxnet3_declare_features(adapter
, dma64
);
2473 adapter
->dev_number
= atomic_read(&devices_found
);
2474 vmxnet3_alloc_intr_resources(adapter
);
2476 vmxnet3_read_mac_addr(adapter
, mac
);
2477 memcpy(netdev
->dev_addr
, mac
, netdev
->addr_len
);
2479 netdev
->netdev_ops
= &vmxnet3_netdev_ops
;
2480 netdev
->watchdog_timeo
= 5 * HZ
;
2481 vmxnet3_set_ethtool_ops(netdev
);
2483 INIT_WORK(&adapter
->work
, vmxnet3_reset_work
);
2485 netif_napi_add(netdev
, &adapter
->napi
, vmxnet3_poll
, 64);
2486 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2487 err
= register_netdev(netdev
);
2490 printk(KERN_ERR
"Failed to register adapter %s\n",
2495 set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
2496 vmxnet3_check_link(adapter
, false);
2497 atomic_inc(&devices_found
);
2501 vmxnet3_free_intr_resources(adapter
);
2503 vmxnet3_free_pci_resources(adapter
);
2505 kfree(adapter
->pm_conf
);
2507 pci_free_consistent(adapter
->pdev
, sizeof(struct Vmxnet3_TxQueueDesc
) +
2508 sizeof(struct Vmxnet3_RxQueueDesc
),
2509 adapter
->tqd_start
, adapter
->queue_desc_pa
);
2510 err_alloc_queue_desc
:
2511 pci_free_consistent(adapter
->pdev
, sizeof(struct Vmxnet3_DriverShared
),
2512 adapter
->shared
, adapter
->shared_pa
);
2514 pci_set_drvdata(pdev
, NULL
);
2515 free_netdev(netdev
);
2520 static void __devexit
2521 vmxnet3_remove_device(struct pci_dev
*pdev
)
2523 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2524 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2526 flush_scheduled_work();
2528 unregister_netdev(netdev
);
2530 vmxnet3_free_intr_resources(adapter
);
2531 vmxnet3_free_pci_resources(adapter
);
2532 kfree(adapter
->pm_conf
);
2533 pci_free_consistent(adapter
->pdev
, sizeof(struct Vmxnet3_TxQueueDesc
) +
2534 sizeof(struct Vmxnet3_RxQueueDesc
),
2535 adapter
->tqd_start
, adapter
->queue_desc_pa
);
2536 pci_free_consistent(adapter
->pdev
, sizeof(struct Vmxnet3_DriverShared
),
2537 adapter
->shared
, adapter
->shared_pa
);
2538 free_netdev(netdev
);
2545 vmxnet3_suspend(struct device
*device
)
2547 struct pci_dev
*pdev
= to_pci_dev(device
);
2548 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2549 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2550 struct Vmxnet3_PMConf
*pmConf
;
2551 struct ethhdr
*ehdr
;
2552 struct arphdr
*ahdr
;
2554 struct in_device
*in_dev
;
2555 struct in_ifaddr
*ifa
;
2558 if (!netif_running(netdev
))
2561 vmxnet3_disable_all_intrs(adapter
);
2562 vmxnet3_free_irqs(adapter
);
2563 vmxnet3_free_intr_resources(adapter
);
2565 netif_device_detach(netdev
);
2566 netif_stop_queue(netdev
);
2568 /* Create wake-up filters. */
2569 pmConf
= adapter
->pm_conf
;
2570 memset(pmConf
, 0, sizeof(*pmConf
));
2572 if (adapter
->wol
& WAKE_UCAST
) {
2573 pmConf
->filters
[i
].patternSize
= ETH_ALEN
;
2574 pmConf
->filters
[i
].maskSize
= 1;
2575 memcpy(pmConf
->filters
[i
].pattern
, netdev
->dev_addr
, ETH_ALEN
);
2576 pmConf
->filters
[i
].mask
[0] = 0x3F; /* LSB ETH_ALEN bits */
2578 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
2582 if (adapter
->wol
& WAKE_ARP
) {
2583 in_dev
= in_dev_get(netdev
);
2587 ifa
= (struct in_ifaddr
*)in_dev
->ifa_list
;
2591 pmConf
->filters
[i
].patternSize
= ETH_HLEN
+ /* Ethernet header*/
2592 sizeof(struct arphdr
) + /* ARP header */
2593 2 * ETH_ALEN
+ /* 2 Ethernet addresses*/
2594 2 * sizeof(u32
); /*2 IPv4 addresses */
2595 pmConf
->filters
[i
].maskSize
=
2596 (pmConf
->filters
[i
].patternSize
- 1) / 8 + 1;
2598 /* ETH_P_ARP in Ethernet header. */
2599 ehdr
= (struct ethhdr
*)pmConf
->filters
[i
].pattern
;
2600 ehdr
->h_proto
= htons(ETH_P_ARP
);
2602 /* ARPOP_REQUEST in ARP header. */
2603 ahdr
= (struct arphdr
*)&pmConf
->filters
[i
].pattern
[ETH_HLEN
];
2604 ahdr
->ar_op
= htons(ARPOP_REQUEST
);
2605 arpreq
= (u8
*)(ahdr
+ 1);
2607 /* The Unicast IPv4 address in 'tip' field. */
2608 arpreq
+= 2 * ETH_ALEN
+ sizeof(u32
);
2609 *(u32
*)arpreq
= ifa
->ifa_address
;
2611 /* The mask for the relevant bits. */
2612 pmConf
->filters
[i
].mask
[0] = 0x00;
2613 pmConf
->filters
[i
].mask
[1] = 0x30; /* ETH_P_ARP */
2614 pmConf
->filters
[i
].mask
[2] = 0x30; /* ARPOP_REQUEST */
2615 pmConf
->filters
[i
].mask
[3] = 0x00;
2616 pmConf
->filters
[i
].mask
[4] = 0xC0; /* IPv4 TIP */
2617 pmConf
->filters
[i
].mask
[5] = 0x03; /* IPv4 TIP */
2620 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
2625 if (adapter
->wol
& WAKE_MAGIC
)
2626 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_MAGIC
;
2628 pmConf
->numFilters
= i
;
2630 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
2631 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
2633 adapter
->shared
->devRead
.pmConfDesc
.confPA
= cpu_to_le64(virt_to_phys(
2636 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2637 VMXNET3_CMD_UPDATE_PMCFG
);
2639 pci_save_state(pdev
);
2640 pci_enable_wake(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
),
2642 pci_disable_device(pdev
);
2643 pci_set_power_state(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
));
2650 vmxnet3_resume(struct device
*device
)
2653 struct pci_dev
*pdev
= to_pci_dev(device
);
2654 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2655 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2656 struct Vmxnet3_PMConf
*pmConf
;
2658 if (!netif_running(netdev
))
2661 /* Destroy wake-up filters. */
2662 pmConf
= adapter
->pm_conf
;
2663 memset(pmConf
, 0, sizeof(*pmConf
));
2665 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
2666 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
2668 adapter
->shared
->devRead
.pmConfDesc
.confPA
= cpu_to_le64(virt_to_phys(
2671 netif_device_attach(netdev
);
2672 pci_set_power_state(pdev
, PCI_D0
);
2673 pci_restore_state(pdev
);
2674 err
= pci_enable_device_mem(pdev
);
2678 pci_enable_wake(pdev
, PCI_D0
, 0);
2680 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2681 VMXNET3_CMD_UPDATE_PMCFG
);
2682 vmxnet3_alloc_intr_resources(adapter
);
2683 vmxnet3_request_irqs(adapter
);
2684 vmxnet3_enable_all_intrs(adapter
);
2689 static const struct dev_pm_ops vmxnet3_pm_ops
= {
2690 .suspend
= vmxnet3_suspend
,
2691 .resume
= vmxnet3_resume
,
2695 static struct pci_driver vmxnet3_driver
= {
2696 .name
= vmxnet3_driver_name
,
2697 .id_table
= vmxnet3_pciid_table
,
2698 .probe
= vmxnet3_probe_device
,
2699 .remove
= __devexit_p(vmxnet3_remove_device
),
2701 .driver
.pm
= &vmxnet3_pm_ops
,
2707 vmxnet3_init_module(void)
2709 printk(KERN_INFO
"%s - version %s\n", VMXNET3_DRIVER_DESC
,
2710 VMXNET3_DRIVER_VERSION_REPORT
);
2711 return pci_register_driver(&vmxnet3_driver
);
2714 module_init(vmxnet3_init_module
);
2718 vmxnet3_exit_module(void)
2720 pci_unregister_driver(&vmxnet3_driver
);
2723 module_exit(vmxnet3_exit_module
);
2725 MODULE_AUTHOR("VMware, Inc.");
2726 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC
);
2727 MODULE_LICENSE("GPL v2");
2728 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING
);