2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
27 #include <net/ip6_checksum.h>
29 #include "vmxnet3_int.h"
31 char vmxnet3_driver_name
[] = "vmxnet3";
32 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
36 * Last entry must be all 0s
38 static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table
) = {
39 {PCI_VDEVICE(VMWARE
, PCI_DEVICE_ID_VMWARE_VMXNET3
)},
43 MODULE_DEVICE_TABLE(pci
, vmxnet3_pciid_table
);
45 static atomic_t devices_found
;
47 #define VMXNET3_MAX_DEVICES 10
48 static int enable_mq
= 1;
49 static int irq_share_mode
;
52 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
);
55 * Enable/Disable the given intr
58 vmxnet3_enable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
60 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 0);
65 vmxnet3_disable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
67 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 1);
72 * Enable/Disable all intrs used by the device
75 vmxnet3_enable_all_intrs(struct vmxnet3_adapter
*adapter
)
79 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
80 vmxnet3_enable_intr(adapter
, i
);
81 adapter
->shared
->devRead
.intrConf
.intrCtrl
&=
82 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL
);
87 vmxnet3_disable_all_intrs(struct vmxnet3_adapter
*adapter
)
91 adapter
->shared
->devRead
.intrConf
.intrCtrl
|=
92 cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
93 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
94 vmxnet3_disable_intr(adapter
, i
);
99 vmxnet3_ack_events(struct vmxnet3_adapter
*adapter
, u32 events
)
101 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_ECR
, events
);
106 vmxnet3_tq_stopped(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
113 vmxnet3_tq_start(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
116 netif_start_subqueue(adapter
->netdev
, tq
- adapter
->tx_queue
);
121 vmxnet3_tq_wake(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
124 netif_wake_subqueue(adapter
->netdev
, (tq
- adapter
->tx_queue
));
129 vmxnet3_tq_stop(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
133 netif_stop_subqueue(adapter
->netdev
, (tq
- adapter
->tx_queue
));
138 * Check the link state. This may start or stop the tx queue.
141 vmxnet3_check_link(struct vmxnet3_adapter
*adapter
, bool affectTxQueue
)
147 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
148 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_GET_LINK
);
149 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
150 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
152 adapter
->link_speed
= ret
>> 16;
153 if (ret
& 1) { /* Link is up. */
154 printk(KERN_INFO
"%s: NIC Link is Up %d Mbps\n",
155 adapter
->netdev
->name
, adapter
->link_speed
);
156 if (!netif_carrier_ok(adapter
->netdev
))
157 netif_carrier_on(adapter
->netdev
);
160 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
161 vmxnet3_tq_start(&adapter
->tx_queue
[i
],
165 printk(KERN_INFO
"%s: NIC Link is Down\n",
166 adapter
->netdev
->name
);
167 if (netif_carrier_ok(adapter
->netdev
))
168 netif_carrier_off(adapter
->netdev
);
171 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
172 vmxnet3_tq_stop(&adapter
->tx_queue
[i
], adapter
);
178 vmxnet3_process_events(struct vmxnet3_adapter
*adapter
)
182 u32 events
= le32_to_cpu(adapter
->shared
->ecr
);
186 vmxnet3_ack_events(adapter
, events
);
188 /* Check if link state has changed */
189 if (events
& VMXNET3_ECR_LINK
)
190 vmxnet3_check_link(adapter
, true);
192 /* Check if there is an error on xmit/recv queues */
193 if (events
& (VMXNET3_ECR_TQERR
| VMXNET3_ECR_RQERR
)) {
194 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
195 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
196 VMXNET3_CMD_GET_QUEUE_STATUS
);
197 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
199 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
200 if (adapter
->tqd_start
[i
].status
.stopped
)
201 dev_err(&adapter
->netdev
->dev
,
202 "%s: tq[%d] error 0x%x\n",
203 adapter
->netdev
->name
, i
, le32_to_cpu(
204 adapter
->tqd_start
[i
].status
.error
));
205 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
206 if (adapter
->rqd_start
[i
].status
.stopped
)
207 dev_err(&adapter
->netdev
->dev
,
208 "%s: rq[%d] error 0x%x\n",
209 adapter
->netdev
->name
, i
,
210 adapter
->rqd_start
[i
].status
.error
);
212 schedule_work(&adapter
->work
);
216 #ifdef __BIG_ENDIAN_BITFIELD
218 * The device expects the bitfields in shared structures to be written in
219 * little endian. When CPU is big endian, the following routines are used to
220 * correctly read and write into ABI.
221 * The general technique used here is : double word bitfields are defined in
222 * opposite order for big endian architecture. Then before reading them in
223 * driver the complete double word is translated using le32_to_cpu. Similarly
224 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
225 * double words into required format.
226 * In order to avoid touching bits in shared structure more than once, temporary
227 * descriptors are used. These are passed as srcDesc to following functions.
229 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc
*srcDesc
,
230 struct Vmxnet3_RxDesc
*dstDesc
)
232 u32
*src
= (u32
*)srcDesc
+ 2;
233 u32
*dst
= (u32
*)dstDesc
+ 2;
234 dstDesc
->addr
= le64_to_cpu(srcDesc
->addr
);
235 *dst
= le32_to_cpu(*src
);
236 dstDesc
->ext1
= le32_to_cpu(srcDesc
->ext1
);
239 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc
*srcDesc
,
240 struct Vmxnet3_TxDesc
*dstDesc
)
243 u32
*src
= (u32
*)(srcDesc
+ 1);
244 u32
*dst
= (u32
*)(dstDesc
+ 1);
246 /* Working backwards so that the gen bit is set at the end. */
247 for (i
= 2; i
> 0; i
--) {
250 *dst
= cpu_to_le32(*src
);
255 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc
*srcDesc
,
256 struct Vmxnet3_RxCompDesc
*dstDesc
)
259 u32
*src
= (u32
*)srcDesc
;
260 u32
*dst
= (u32
*)dstDesc
;
261 for (i
= 0; i
< sizeof(struct Vmxnet3_RxCompDesc
) / sizeof(u32
); i
++) {
262 *dst
= le32_to_cpu(*src
);
269 /* Used to read bitfield values from double words. */
270 static u32
get_bitfield32(const __le32
*bitfield
, u32 pos
, u32 size
)
272 u32 temp
= le32_to_cpu(*bitfield
);
273 u32 mask
= ((1 << size
) - 1) << pos
;
281 #endif /* __BIG_ENDIAN_BITFIELD */
283 #ifdef __BIG_ENDIAN_BITFIELD
285 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
286 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
287 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
288 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
289 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
290 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
291 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
292 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
293 VMXNET3_TCD_GEN_SIZE)
294 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
295 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
296 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
298 vmxnet3_RxCompToCPU((rcd), (tmp)); \
300 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
302 vmxnet3_RxDescToCPU((rxd), (tmp)); \
307 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
308 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
309 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
310 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
311 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
312 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
314 #endif /* __BIG_ENDIAN_BITFIELD */
318 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info
*tbi
,
319 struct pci_dev
*pdev
)
321 if (tbi
->map_type
== VMXNET3_MAP_SINGLE
)
322 pci_unmap_single(pdev
, tbi
->dma_addr
, tbi
->len
,
324 else if (tbi
->map_type
== VMXNET3_MAP_PAGE
)
325 pci_unmap_page(pdev
, tbi
->dma_addr
, tbi
->len
,
328 BUG_ON(tbi
->map_type
!= VMXNET3_MAP_NONE
);
330 tbi
->map_type
= VMXNET3_MAP_NONE
; /* to help debugging */
335 vmxnet3_unmap_pkt(u32 eop_idx
, struct vmxnet3_tx_queue
*tq
,
336 struct pci_dev
*pdev
, struct vmxnet3_adapter
*adapter
)
341 /* no out of order completion */
342 BUG_ON(tq
->buf_info
[eop_idx
].sop_idx
!= tq
->tx_ring
.next2comp
);
343 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq
->tx_ring
.base
[eop_idx
].txd
)) != 1);
345 skb
= tq
->buf_info
[eop_idx
].skb
;
347 tq
->buf_info
[eop_idx
].skb
= NULL
;
349 VMXNET3_INC_RING_IDX_ONLY(eop_idx
, tq
->tx_ring
.size
);
351 while (tq
->tx_ring
.next2comp
!= eop_idx
) {
352 vmxnet3_unmap_tx_buf(tq
->buf_info
+ tq
->tx_ring
.next2comp
,
355 /* update next2comp w/o tx_lock. Since we are marking more,
356 * instead of less, tx ring entries avail, the worst case is
357 * that the tx routine incorrectly re-queues a pkt due to
358 * insufficient tx ring entries.
360 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
364 dev_kfree_skb_any(skb
);
370 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue
*tq
,
371 struct vmxnet3_adapter
*adapter
)
374 union Vmxnet3_GenericDesc
*gdesc
;
376 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
377 while (VMXNET3_TCD_GET_GEN(&gdesc
->tcd
) == tq
->comp_ring
.gen
) {
378 completed
+= vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
379 &gdesc
->tcd
), tq
, adapter
->pdev
,
382 vmxnet3_comp_ring_adv_next2proc(&tq
->comp_ring
);
383 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
387 spin_lock(&tq
->tx_lock
);
388 if (unlikely(vmxnet3_tq_stopped(tq
, adapter
) &&
389 vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
) >
390 VMXNET3_WAKE_QUEUE_THRESHOLD(tq
) &&
391 netif_carrier_ok(adapter
->netdev
))) {
392 vmxnet3_tq_wake(tq
, adapter
);
394 spin_unlock(&tq
->tx_lock
);
401 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue
*tq
,
402 struct vmxnet3_adapter
*adapter
)
406 while (tq
->tx_ring
.next2comp
!= tq
->tx_ring
.next2fill
) {
407 struct vmxnet3_tx_buf_info
*tbi
;
409 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2comp
;
411 vmxnet3_unmap_tx_buf(tbi
, adapter
->pdev
);
413 dev_kfree_skb_any(tbi
->skb
);
416 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
419 /* sanity check, verify all buffers are indeed unmapped and freed */
420 for (i
= 0; i
< tq
->tx_ring
.size
; i
++) {
421 BUG_ON(tq
->buf_info
[i
].skb
!= NULL
||
422 tq
->buf_info
[i
].map_type
!= VMXNET3_MAP_NONE
);
425 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
426 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
428 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
429 tq
->comp_ring
.next2proc
= 0;
434 vmxnet3_tq_destroy(struct vmxnet3_tx_queue
*tq
,
435 struct vmxnet3_adapter
*adapter
)
437 if (tq
->tx_ring
.base
) {
438 pci_free_consistent(adapter
->pdev
, tq
->tx_ring
.size
*
439 sizeof(struct Vmxnet3_TxDesc
),
440 tq
->tx_ring
.base
, tq
->tx_ring
.basePA
);
441 tq
->tx_ring
.base
= NULL
;
443 if (tq
->data_ring
.base
) {
444 pci_free_consistent(adapter
->pdev
, tq
->data_ring
.size
*
445 sizeof(struct Vmxnet3_TxDataDesc
),
446 tq
->data_ring
.base
, tq
->data_ring
.basePA
);
447 tq
->data_ring
.base
= NULL
;
449 if (tq
->comp_ring
.base
) {
450 pci_free_consistent(adapter
->pdev
, tq
->comp_ring
.size
*
451 sizeof(struct Vmxnet3_TxCompDesc
),
452 tq
->comp_ring
.base
, tq
->comp_ring
.basePA
);
453 tq
->comp_ring
.base
= NULL
;
460 /* Destroy all tx queues */
462 vmxnet3_tq_destroy_all(struct vmxnet3_adapter
*adapter
)
466 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
467 vmxnet3_tq_destroy(&adapter
->tx_queue
[i
], adapter
);
472 vmxnet3_tq_init(struct vmxnet3_tx_queue
*tq
,
473 struct vmxnet3_adapter
*adapter
)
477 /* reset the tx ring contents to 0 and reset the tx ring states */
478 memset(tq
->tx_ring
.base
, 0, tq
->tx_ring
.size
*
479 sizeof(struct Vmxnet3_TxDesc
));
480 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
481 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
483 memset(tq
->data_ring
.base
, 0, tq
->data_ring
.size
*
484 sizeof(struct Vmxnet3_TxDataDesc
));
486 /* reset the tx comp ring contents to 0 and reset comp ring states */
487 memset(tq
->comp_ring
.base
, 0, tq
->comp_ring
.size
*
488 sizeof(struct Vmxnet3_TxCompDesc
));
489 tq
->comp_ring
.next2proc
= 0;
490 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
492 /* reset the bookkeeping data */
493 memset(tq
->buf_info
, 0, sizeof(tq
->buf_info
[0]) * tq
->tx_ring
.size
);
494 for (i
= 0; i
< tq
->tx_ring
.size
; i
++)
495 tq
->buf_info
[i
].map_type
= VMXNET3_MAP_NONE
;
497 /* stats are not reset */
502 vmxnet3_tq_create(struct vmxnet3_tx_queue
*tq
,
503 struct vmxnet3_adapter
*adapter
)
505 BUG_ON(tq
->tx_ring
.base
|| tq
->data_ring
.base
||
506 tq
->comp_ring
.base
|| tq
->buf_info
);
508 tq
->tx_ring
.base
= pci_alloc_consistent(adapter
->pdev
, tq
->tx_ring
.size
509 * sizeof(struct Vmxnet3_TxDesc
),
510 &tq
->tx_ring
.basePA
);
511 if (!tq
->tx_ring
.base
) {
512 printk(KERN_ERR
"%s: failed to allocate tx ring\n",
513 adapter
->netdev
->name
);
517 tq
->data_ring
.base
= pci_alloc_consistent(adapter
->pdev
,
519 sizeof(struct Vmxnet3_TxDataDesc
),
520 &tq
->data_ring
.basePA
);
521 if (!tq
->data_ring
.base
) {
522 printk(KERN_ERR
"%s: failed to allocate data ring\n",
523 adapter
->netdev
->name
);
527 tq
->comp_ring
.base
= pci_alloc_consistent(adapter
->pdev
,
529 sizeof(struct Vmxnet3_TxCompDesc
),
530 &tq
->comp_ring
.basePA
);
531 if (!tq
->comp_ring
.base
) {
532 printk(KERN_ERR
"%s: failed to allocate tx comp ring\n",
533 adapter
->netdev
->name
);
537 tq
->buf_info
= kcalloc(tq
->tx_ring
.size
, sizeof(tq
->buf_info
[0]),
540 printk(KERN_ERR
"%s: failed to allocate tx bufinfo\n",
541 adapter
->netdev
->name
);
548 vmxnet3_tq_destroy(tq
, adapter
);
553 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter
*adapter
)
557 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
558 vmxnet3_tq_cleanup(&adapter
->tx_queue
[i
], adapter
);
562 * starting from ring->next2fill, allocate rx buffers for the given ring
563 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
564 * are allocated or allocation fails
568 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue
*rq
, u32 ring_idx
,
569 int num_to_alloc
, struct vmxnet3_adapter
*adapter
)
571 int num_allocated
= 0;
572 struct vmxnet3_rx_buf_info
*rbi_base
= rq
->buf_info
[ring_idx
];
573 struct vmxnet3_cmd_ring
*ring
= &rq
->rx_ring
[ring_idx
];
576 while (num_allocated
< num_to_alloc
) {
577 struct vmxnet3_rx_buf_info
*rbi
;
578 union Vmxnet3_GenericDesc
*gd
;
580 rbi
= rbi_base
+ ring
->next2fill
;
581 gd
= ring
->base
+ ring
->next2fill
;
583 if (rbi
->buf_type
== VMXNET3_RX_BUF_SKB
) {
584 if (rbi
->skb
== NULL
) {
585 rbi
->skb
= dev_alloc_skb(rbi
->len
+
587 if (unlikely(rbi
->skb
== NULL
)) {
588 rq
->stats
.rx_buf_alloc_failure
++;
591 rbi
->skb
->dev
= adapter
->netdev
;
593 skb_reserve(rbi
->skb
, NET_IP_ALIGN
);
594 rbi
->dma_addr
= pci_map_single(adapter
->pdev
,
595 rbi
->skb
->data
, rbi
->len
,
598 /* rx buffer skipped by the device */
600 val
= VMXNET3_RXD_BTYPE_HEAD
<< VMXNET3_RXD_BTYPE_SHIFT
;
602 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_PAGE
||
603 rbi
->len
!= PAGE_SIZE
);
605 if (rbi
->page
== NULL
) {
606 rbi
->page
= alloc_page(GFP_ATOMIC
);
607 if (unlikely(rbi
->page
== NULL
)) {
608 rq
->stats
.rx_buf_alloc_failure
++;
611 rbi
->dma_addr
= pci_map_page(adapter
->pdev
,
612 rbi
->page
, 0, PAGE_SIZE
,
615 /* rx buffers skipped by the device */
617 val
= VMXNET3_RXD_BTYPE_BODY
<< VMXNET3_RXD_BTYPE_SHIFT
;
620 BUG_ON(rbi
->dma_addr
== 0);
621 gd
->rxd
.addr
= cpu_to_le64(rbi
->dma_addr
);
622 gd
->dword
[2] = cpu_to_le32((ring
->gen
<< VMXNET3_RXD_GEN_SHIFT
)
626 vmxnet3_cmd_ring_adv_next2fill(ring
);
628 rq
->uncommitted
[ring_idx
] += num_allocated
;
630 dev_dbg(&adapter
->netdev
->dev
,
631 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
632 "%u, uncommited %u\n", num_allocated
, ring
->next2fill
,
633 ring
->next2comp
, rq
->uncommitted
[ring_idx
]);
635 /* so that the device can distinguish a full ring and an empty ring */
636 BUG_ON(num_allocated
!= 0 && ring
->next2fill
== ring
->next2comp
);
638 return num_allocated
;
643 vmxnet3_append_frag(struct sk_buff
*skb
, struct Vmxnet3_RxCompDesc
*rcd
,
644 struct vmxnet3_rx_buf_info
*rbi
)
646 struct skb_frag_struct
*frag
= skb_shinfo(skb
)->frags
+
647 skb_shinfo(skb
)->nr_frags
;
649 BUG_ON(skb_shinfo(skb
)->nr_frags
>= MAX_SKB_FRAGS
);
651 frag
->page
= rbi
->page
;
652 frag
->page_offset
= 0;
653 frag
->size
= rcd
->len
;
654 skb
->data_len
+= frag
->size
;
655 skb_shinfo(skb
)->nr_frags
++;
660 vmxnet3_map_pkt(struct sk_buff
*skb
, struct vmxnet3_tx_ctx
*ctx
,
661 struct vmxnet3_tx_queue
*tq
, struct pci_dev
*pdev
,
662 struct vmxnet3_adapter
*adapter
)
665 unsigned long buf_offset
;
667 union Vmxnet3_GenericDesc
*gdesc
;
668 struct vmxnet3_tx_buf_info
*tbi
= NULL
;
670 BUG_ON(ctx
->copy_size
> skb_headlen(skb
));
672 /* use the previous gen bit for the SOP desc */
673 dw2
= (tq
->tx_ring
.gen
^ 0x1) << VMXNET3_TXD_GEN_SHIFT
;
675 ctx
->sop_txd
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
676 gdesc
= ctx
->sop_txd
; /* both loops below can be skipped */
678 /* no need to map the buffer if headers are copied */
679 if (ctx
->copy_size
) {
680 ctx
->sop_txd
->txd
.addr
= cpu_to_le64(tq
->data_ring
.basePA
+
681 tq
->tx_ring
.next2fill
*
682 sizeof(struct Vmxnet3_TxDataDesc
));
683 ctx
->sop_txd
->dword
[2] = cpu_to_le32(dw2
| ctx
->copy_size
);
684 ctx
->sop_txd
->dword
[3] = 0;
686 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
687 tbi
->map_type
= VMXNET3_MAP_NONE
;
689 dev_dbg(&adapter
->netdev
->dev
,
690 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
691 tq
->tx_ring
.next2fill
,
692 le64_to_cpu(ctx
->sop_txd
->txd
.addr
),
693 ctx
->sop_txd
->dword
[2], ctx
->sop_txd
->dword
[3]);
694 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
696 /* use the right gen for non-SOP desc */
697 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
700 /* linear part can use multiple tx desc if it's big */
701 len
= skb_headlen(skb
) - ctx
->copy_size
;
702 buf_offset
= ctx
->copy_size
;
706 if (len
< VMXNET3_MAX_TX_BUF_SIZE
) {
710 buf_size
= VMXNET3_MAX_TX_BUF_SIZE
;
711 /* spec says that for TxDesc.len, 0 == 2^14 */
714 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
715 tbi
->map_type
= VMXNET3_MAP_SINGLE
;
716 tbi
->dma_addr
= pci_map_single(adapter
->pdev
,
717 skb
->data
+ buf_offset
, buf_size
,
722 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
723 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
725 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
726 gdesc
->dword
[2] = cpu_to_le32(dw2
);
729 dev_dbg(&adapter
->netdev
->dev
,
730 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
731 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
732 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
733 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
734 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
737 buf_offset
+= buf_size
;
740 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
741 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
743 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
744 tbi
->map_type
= VMXNET3_MAP_PAGE
;
745 tbi
->dma_addr
= pci_map_page(adapter
->pdev
, frag
->page
,
746 frag
->page_offset
, frag
->size
,
749 tbi
->len
= frag
->size
;
751 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
752 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
754 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
755 gdesc
->dword
[2] = cpu_to_le32(dw2
| frag
->size
);
758 dev_dbg(&adapter
->netdev
->dev
,
759 "txd[%u]: 0x%llu %u %u\n",
760 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
761 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
762 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
763 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
766 ctx
->eop_txd
= gdesc
;
768 /* set the last buf_info for the pkt */
770 tbi
->sop_idx
= ctx
->sop_txd
- tq
->tx_ring
.base
;
774 /* Init all tx queues */
776 vmxnet3_tq_init_all(struct vmxnet3_adapter
*adapter
)
780 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
781 vmxnet3_tq_init(&adapter
->tx_queue
[i
], adapter
);
786 * parse and copy relevant protocol headers:
787 * For a tso pkt, relevant headers are L2/3/4 including options
788 * For a pkt requesting csum offloading, they are L2/3 and may include L4
789 * if it's a TCP/UDP pkt
792 * -1: error happens during parsing
793 * 0: protocol headers parsed, but too big to be copied
794 * 1: protocol headers parsed and copied
797 * 1. related *ctx fields are updated.
798 * 2. ctx->copy_size is # of bytes copied
799 * 3. the portion copied is guaranteed to be in the linear part
803 vmxnet3_parse_and_copy_hdr(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
804 struct vmxnet3_tx_ctx
*ctx
,
805 struct vmxnet3_adapter
*adapter
)
807 struct Vmxnet3_TxDataDesc
*tdd
;
809 if (ctx
->mss
) { /* TSO */
810 ctx
->eth_ip_hdr_size
= skb_transport_offset(skb
);
811 ctx
->l4_hdr_size
= ((struct tcphdr
*)
812 skb_transport_header(skb
))->doff
* 4;
813 ctx
->copy_size
= ctx
->eth_ip_hdr_size
+ ctx
->l4_hdr_size
;
815 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
816 ctx
->eth_ip_hdr_size
= skb_checksum_start_offset(skb
);
819 struct iphdr
*iph
= (struct iphdr
*)
820 skb_network_header(skb
);
821 if (iph
->protocol
== IPPROTO_TCP
)
822 ctx
->l4_hdr_size
= ((struct tcphdr
*)
823 skb_transport_header(skb
))->doff
* 4;
824 else if (iph
->protocol
== IPPROTO_UDP
)
826 * Use tcp header size so that bytes to
827 * be copied are more than required by
831 sizeof(struct tcphdr
);
833 ctx
->l4_hdr_size
= 0;
835 /* for simplicity, don't copy L4 headers */
836 ctx
->l4_hdr_size
= 0;
838 ctx
->copy_size
= ctx
->eth_ip_hdr_size
+
841 ctx
->eth_ip_hdr_size
= 0;
842 ctx
->l4_hdr_size
= 0;
843 /* copy as much as allowed */
844 ctx
->copy_size
= min((unsigned int)VMXNET3_HDR_COPY_SIZE
848 /* make sure headers are accessible directly */
849 if (unlikely(!pskb_may_pull(skb
, ctx
->copy_size
)))
853 if (unlikely(ctx
->copy_size
> VMXNET3_HDR_COPY_SIZE
)) {
854 tq
->stats
.oversized_hdr
++;
859 tdd
= tq
->data_ring
.base
+ tq
->tx_ring
.next2fill
;
861 memcpy(tdd
->data
, skb
->data
, ctx
->copy_size
);
862 dev_dbg(&adapter
->netdev
->dev
,
863 "copy %u bytes to dataRing[%u]\n",
864 ctx
->copy_size
, tq
->tx_ring
.next2fill
);
873 vmxnet3_prepare_tso(struct sk_buff
*skb
,
874 struct vmxnet3_tx_ctx
*ctx
)
876 struct tcphdr
*tcph
= (struct tcphdr
*)skb_transport_header(skb
);
878 struct iphdr
*iph
= (struct iphdr
*)skb_network_header(skb
);
880 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
883 struct ipv6hdr
*iph
= (struct ipv6hdr
*)skb_network_header(skb
);
884 tcph
->check
= ~csum_ipv6_magic(&iph
->saddr
, &iph
->daddr
, 0,
891 * Transmits a pkt thru a given tq
893 * NETDEV_TX_OK: descriptors are setup successfully
894 * NETDEV_TX_OK: error occurred, the pkt is dropped
895 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
898 * 1. tx ring may be changed
899 * 2. tq stats may be updated accordingly
900 * 3. shared->txNumDeferred may be updated
904 vmxnet3_tq_xmit(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
905 struct vmxnet3_adapter
*adapter
, struct net_device
*netdev
)
910 struct vmxnet3_tx_ctx ctx
;
911 union Vmxnet3_GenericDesc
*gdesc
;
912 #ifdef __BIG_ENDIAN_BITFIELD
913 /* Use temporary descriptor to avoid touching bits multiple times */
914 union Vmxnet3_GenericDesc tempTxDesc
;
917 /* conservatively estimate # of descriptors to use */
918 count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) +
919 skb_shinfo(skb
)->nr_frags
+ 1;
921 ctx
.ipv4
= (skb
->protocol
== cpu_to_be16(ETH_P_IP
));
923 ctx
.mss
= skb_shinfo(skb
)->gso_size
;
925 if (skb_header_cloned(skb
)) {
926 if (unlikely(pskb_expand_head(skb
, 0, 0,
928 tq
->stats
.drop_tso
++;
931 tq
->stats
.copy_skb_header
++;
933 vmxnet3_prepare_tso(skb
, &ctx
);
935 if (unlikely(count
> VMXNET3_MAX_TXD_PER_PKT
)) {
937 /* non-tso pkts must not use more than
938 * VMXNET3_MAX_TXD_PER_PKT entries
940 if (skb_linearize(skb
) != 0) {
941 tq
->stats
.drop_too_many_frags
++;
944 tq
->stats
.linearized
++;
946 /* recalculate the # of descriptors to use */
947 count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) + 1;
951 spin_lock_irqsave(&tq
->tx_lock
, flags
);
953 if (count
> vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
)) {
954 tq
->stats
.tx_ring_full
++;
955 dev_dbg(&adapter
->netdev
->dev
,
956 "tx queue stopped on %s, next2comp %u"
957 " next2fill %u\n", adapter
->netdev
->name
,
958 tq
->tx_ring
.next2comp
, tq
->tx_ring
.next2fill
);
960 vmxnet3_tq_stop(tq
, adapter
);
961 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
962 return NETDEV_TX_BUSY
;
966 ret
= vmxnet3_parse_and_copy_hdr(skb
, tq
, &ctx
, adapter
);
968 BUG_ON(ret
<= 0 && ctx
.copy_size
!= 0);
969 /* hdrs parsed, check against other limits */
971 if (unlikely(ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
>
972 VMXNET3_MAX_TX_BUF_SIZE
)) {
976 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
977 if (unlikely(ctx
.eth_ip_hdr_size
+
979 VMXNET3_MAX_CSUM_OFFSET
)) {
985 tq
->stats
.drop_hdr_inspect_err
++;
986 goto unlock_drop_pkt
;
989 /* fill tx descs related to addr & len */
990 vmxnet3_map_pkt(skb
, &ctx
, tq
, adapter
->pdev
, adapter
);
992 /* setup the EOP desc */
993 ctx
.eop_txd
->dword
[3] = cpu_to_le32(VMXNET3_TXD_CQ
| VMXNET3_TXD_EOP
);
995 /* setup the SOP desc */
996 #ifdef __BIG_ENDIAN_BITFIELD
998 gdesc
->dword
[2] = ctx
.sop_txd
->dword
[2];
999 gdesc
->dword
[3] = ctx
.sop_txd
->dword
[3];
1001 gdesc
= ctx
.sop_txd
;
1004 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
;
1005 gdesc
->txd
.om
= VMXNET3_OM_TSO
;
1006 gdesc
->txd
.msscof
= ctx
.mss
;
1007 le32_add_cpu(&tq
->shared
->txNumDeferred
, (skb
->len
-
1008 gdesc
->txd
.hlen
+ ctx
.mss
- 1) / ctx
.mss
);
1010 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1011 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
;
1012 gdesc
->txd
.om
= VMXNET3_OM_CSUM
;
1013 gdesc
->txd
.msscof
= ctx
.eth_ip_hdr_size
+
1017 gdesc
->txd
.msscof
= 0;
1019 le32_add_cpu(&tq
->shared
->txNumDeferred
, 1);
1022 if (vlan_tx_tag_present(skb
)) {
1024 gdesc
->txd
.tci
= vlan_tx_tag_get(skb
);
1027 /* finally flips the GEN bit of the SOP desc. */
1028 gdesc
->dword
[2] = cpu_to_le32(le32_to_cpu(gdesc
->dword
[2]) ^
1030 #ifdef __BIG_ENDIAN_BITFIELD
1031 /* Finished updating in bitfields of Tx Desc, so write them in original
1034 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc
*)gdesc
,
1035 (struct Vmxnet3_TxDesc
*)ctx
.sop_txd
);
1036 gdesc
= ctx
.sop_txd
;
1038 dev_dbg(&adapter
->netdev
->dev
,
1039 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1040 (u32
)((union Vmxnet3_GenericDesc
*)ctx
.sop_txd
-
1041 tq
->tx_ring
.base
), le64_to_cpu(gdesc
->txd
.addr
),
1042 le32_to_cpu(gdesc
->dword
[2]), le32_to_cpu(gdesc
->dword
[3]));
1044 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1046 if (le32_to_cpu(tq
->shared
->txNumDeferred
) >=
1047 le32_to_cpu(tq
->shared
->txThreshold
)) {
1048 tq
->shared
->txNumDeferred
= 0;
1049 VMXNET3_WRITE_BAR0_REG(adapter
,
1050 VMXNET3_REG_TXPROD
+ tq
->qid
* 8,
1051 tq
->tx_ring
.next2fill
);
1054 return NETDEV_TX_OK
;
1057 tq
->stats
.drop_oversized_hdr
++;
1059 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1061 tq
->stats
.drop_total
++;
1063 return NETDEV_TX_OK
;
1068 vmxnet3_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1070 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1072 BUG_ON(skb
->queue_mapping
> adapter
->num_tx_queues
);
1073 return vmxnet3_tq_xmit(skb
,
1074 &adapter
->tx_queue
[skb
->queue_mapping
],
1080 vmxnet3_rx_csum(struct vmxnet3_adapter
*adapter
,
1081 struct sk_buff
*skb
,
1082 union Vmxnet3_GenericDesc
*gdesc
)
1084 if (!gdesc
->rcd
.cnc
&& adapter
->netdev
->features
& NETIF_F_RXCSUM
) {
1085 /* typical case: TCP/UDP over IP and both csums are correct */
1086 if ((le32_to_cpu(gdesc
->dword
[3]) & VMXNET3_RCD_CSUM_OK
) ==
1087 VMXNET3_RCD_CSUM_OK
) {
1088 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1089 BUG_ON(!(gdesc
->rcd
.tcp
|| gdesc
->rcd
.udp
));
1090 BUG_ON(!(gdesc
->rcd
.v4
|| gdesc
->rcd
.v6
));
1091 BUG_ON(gdesc
->rcd
.frg
);
1093 if (gdesc
->rcd
.csum
) {
1094 skb
->csum
= htons(gdesc
->rcd
.csum
);
1095 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1097 skb_checksum_none_assert(skb
);
1101 skb_checksum_none_assert(skb
);
1107 vmxnet3_rx_error(struct vmxnet3_rx_queue
*rq
, struct Vmxnet3_RxCompDesc
*rcd
,
1108 struct vmxnet3_rx_ctx
*ctx
, struct vmxnet3_adapter
*adapter
)
1110 rq
->stats
.drop_err
++;
1112 rq
->stats
.drop_fcs
++;
1114 rq
->stats
.drop_total
++;
1117 * We do not unmap and chain the rx buffer to the skb.
1118 * We basically pretend this buffer is not used and will be recycled
1119 * by vmxnet3_rq_alloc_rx_buf()
1123 * ctx->skb may be NULL if this is the first and the only one
1127 dev_kfree_skb_irq(ctx
->skb
);
1134 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue
*rq
,
1135 struct vmxnet3_adapter
*adapter
, int quota
)
1137 static const u32 rxprod_reg
[2] = {
1138 VMXNET3_REG_RXPROD
, VMXNET3_REG_RXPROD2
1141 struct Vmxnet3_RxCompDesc
*rcd
;
1142 struct vmxnet3_rx_ctx
*ctx
= &rq
->rx_ctx
;
1143 #ifdef __BIG_ENDIAN_BITFIELD
1144 struct Vmxnet3_RxDesc rxCmdDesc
;
1145 struct Vmxnet3_RxCompDesc rxComp
;
1147 vmxnet3_getRxComp(rcd
, &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
,
1149 while (rcd
->gen
== rq
->comp_ring
.gen
) {
1150 struct vmxnet3_rx_buf_info
*rbi
;
1151 struct sk_buff
*skb
;
1153 struct Vmxnet3_RxDesc
*rxd
;
1156 if (num_rxd
>= quota
) {
1157 /* we may stop even before we see the EOP desc of
1163 BUG_ON(rcd
->rqID
!= rq
->qid
&& rcd
->rqID
!= rq
->qid2
);
1165 ring_idx
= rcd
->rqID
< adapter
->num_rx_queues
? 0 : 1;
1166 vmxnet3_getRxDesc(rxd
, &rq
->rx_ring
[ring_idx
].base
[idx
].rxd
,
1168 rbi
= rq
->buf_info
[ring_idx
] + idx
;
1170 BUG_ON(rxd
->addr
!= rbi
->dma_addr
||
1171 rxd
->len
!= rbi
->len
);
1173 if (unlikely(rcd
->eop
&& rcd
->err
)) {
1174 vmxnet3_rx_error(rq
, rcd
, ctx
, adapter
);
1178 if (rcd
->sop
) { /* first buf of the pkt */
1179 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_HEAD
||
1180 rcd
->rqID
!= rq
->qid
);
1182 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_SKB
);
1183 BUG_ON(ctx
->skb
!= NULL
|| rbi
->skb
== NULL
);
1185 if (unlikely(rcd
->len
== 0)) {
1186 /* Pretend the rx buffer is skipped. */
1187 BUG_ON(!(rcd
->sop
&& rcd
->eop
));
1188 dev_dbg(&adapter
->netdev
->dev
,
1189 "rxRing[%u][%u] 0 length\n",
1194 ctx
->skb
= rbi
->skb
;
1197 pci_unmap_single(adapter
->pdev
, rbi
->dma_addr
, rbi
->len
,
1198 PCI_DMA_FROMDEVICE
);
1200 skb_put(ctx
->skb
, rcd
->len
);
1202 BUG_ON(ctx
->skb
== NULL
);
1203 /* non SOP buffer must be type 1 in most cases */
1204 if (rbi
->buf_type
== VMXNET3_RX_BUF_PAGE
) {
1205 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_BODY
);
1208 pci_unmap_page(adapter
->pdev
,
1209 rbi
->dma_addr
, rbi
->len
,
1210 PCI_DMA_FROMDEVICE
);
1212 vmxnet3_append_frag(ctx
->skb
, rcd
, rbi
);
1217 * The only time a non-SOP buffer is type 0 is
1218 * when it's EOP and error flag is raised, which
1219 * has already been handled.
1227 skb
->len
+= skb
->data_len
;
1228 skb
->truesize
+= skb
->data_len
;
1230 vmxnet3_rx_csum(adapter
, skb
,
1231 (union Vmxnet3_GenericDesc
*)rcd
);
1232 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
1234 if (unlikely(adapter
->vlan_grp
&& rcd
->ts
)) {
1235 vlan_hwaccel_receive_skb(skb
,
1236 adapter
->vlan_grp
, rcd
->tci
);
1238 netif_receive_skb(skb
);
1245 /* device may skip some rx descs */
1246 rq
->rx_ring
[ring_idx
].next2comp
= idx
;
1247 VMXNET3_INC_RING_IDX_ONLY(rq
->rx_ring
[ring_idx
].next2comp
,
1248 rq
->rx_ring
[ring_idx
].size
);
1250 /* refill rx buffers frequently to avoid starving the h/w */
1251 num_to_alloc
= vmxnet3_cmd_ring_desc_avail(rq
->rx_ring
+
1253 if (unlikely(num_to_alloc
> VMXNET3_RX_ALLOC_THRESHOLD(rq
,
1254 ring_idx
, adapter
))) {
1255 vmxnet3_rq_alloc_rx_buf(rq
, ring_idx
, num_to_alloc
,
1258 /* if needed, update the register */
1259 if (unlikely(rq
->shared
->updateRxProd
)) {
1260 VMXNET3_WRITE_BAR0_REG(adapter
,
1261 rxprod_reg
[ring_idx
] + rq
->qid
* 8,
1262 rq
->rx_ring
[ring_idx
].next2fill
);
1263 rq
->uncommitted
[ring_idx
] = 0;
1267 vmxnet3_comp_ring_adv_next2proc(&rq
->comp_ring
);
1268 vmxnet3_getRxComp(rcd
,
1269 &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
, &rxComp
);
1277 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue
*rq
,
1278 struct vmxnet3_adapter
*adapter
)
1281 struct Vmxnet3_RxDesc
*rxd
;
1283 for (ring_idx
= 0; ring_idx
< 2; ring_idx
++) {
1284 for (i
= 0; i
< rq
->rx_ring
[ring_idx
].size
; i
++) {
1285 #ifdef __BIG_ENDIAN_BITFIELD
1286 struct Vmxnet3_RxDesc rxDesc
;
1288 vmxnet3_getRxDesc(rxd
,
1289 &rq
->rx_ring
[ring_idx
].base
[i
].rxd
, &rxDesc
);
1291 if (rxd
->btype
== VMXNET3_RXD_BTYPE_HEAD
&&
1292 rq
->buf_info
[ring_idx
][i
].skb
) {
1293 pci_unmap_single(adapter
->pdev
, rxd
->addr
,
1294 rxd
->len
, PCI_DMA_FROMDEVICE
);
1295 dev_kfree_skb(rq
->buf_info
[ring_idx
][i
].skb
);
1296 rq
->buf_info
[ring_idx
][i
].skb
= NULL
;
1297 } else if (rxd
->btype
== VMXNET3_RXD_BTYPE_BODY
&&
1298 rq
->buf_info
[ring_idx
][i
].page
) {
1299 pci_unmap_page(adapter
->pdev
, rxd
->addr
,
1300 rxd
->len
, PCI_DMA_FROMDEVICE
);
1301 put_page(rq
->buf_info
[ring_idx
][i
].page
);
1302 rq
->buf_info
[ring_idx
][i
].page
= NULL
;
1306 rq
->rx_ring
[ring_idx
].gen
= VMXNET3_INIT_GEN
;
1307 rq
->rx_ring
[ring_idx
].next2fill
=
1308 rq
->rx_ring
[ring_idx
].next2comp
= 0;
1309 rq
->uncommitted
[ring_idx
] = 0;
1312 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1313 rq
->comp_ring
.next2proc
= 0;
1318 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter
*adapter
)
1322 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1323 vmxnet3_rq_cleanup(&adapter
->rx_queue
[i
], adapter
);
1327 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue
*rq
,
1328 struct vmxnet3_adapter
*adapter
)
1333 /* all rx buffers must have already been freed */
1334 for (i
= 0; i
< 2; i
++) {
1335 if (rq
->buf_info
[i
]) {
1336 for (j
= 0; j
< rq
->rx_ring
[i
].size
; j
++)
1337 BUG_ON(rq
->buf_info
[i
][j
].page
!= NULL
);
1342 kfree(rq
->buf_info
[0]);
1344 for (i
= 0; i
< 2; i
++) {
1345 if (rq
->rx_ring
[i
].base
) {
1346 pci_free_consistent(adapter
->pdev
, rq
->rx_ring
[i
].size
1347 * sizeof(struct Vmxnet3_RxDesc
),
1348 rq
->rx_ring
[i
].base
,
1349 rq
->rx_ring
[i
].basePA
);
1350 rq
->rx_ring
[i
].base
= NULL
;
1352 rq
->buf_info
[i
] = NULL
;
1355 if (rq
->comp_ring
.base
) {
1356 pci_free_consistent(adapter
->pdev
, rq
->comp_ring
.size
*
1357 sizeof(struct Vmxnet3_RxCompDesc
),
1358 rq
->comp_ring
.base
, rq
->comp_ring
.basePA
);
1359 rq
->comp_ring
.base
= NULL
;
1365 vmxnet3_rq_init(struct vmxnet3_rx_queue
*rq
,
1366 struct vmxnet3_adapter
*adapter
)
1370 /* initialize buf_info */
1371 for (i
= 0; i
< rq
->rx_ring
[0].size
; i
++) {
1373 /* 1st buf for a pkt is skbuff */
1374 if (i
% adapter
->rx_buf_per_pkt
== 0) {
1375 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_SKB
;
1376 rq
->buf_info
[0][i
].len
= adapter
->skb_buf_size
;
1377 } else { /* subsequent bufs for a pkt is frag */
1378 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1379 rq
->buf_info
[0][i
].len
= PAGE_SIZE
;
1382 for (i
= 0; i
< rq
->rx_ring
[1].size
; i
++) {
1383 rq
->buf_info
[1][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1384 rq
->buf_info
[1][i
].len
= PAGE_SIZE
;
1387 /* reset internal state and allocate buffers for both rings */
1388 for (i
= 0; i
< 2; i
++) {
1389 rq
->rx_ring
[i
].next2fill
= rq
->rx_ring
[i
].next2comp
= 0;
1390 rq
->uncommitted
[i
] = 0;
1392 memset(rq
->rx_ring
[i
].base
, 0, rq
->rx_ring
[i
].size
*
1393 sizeof(struct Vmxnet3_RxDesc
));
1394 rq
->rx_ring
[i
].gen
= VMXNET3_INIT_GEN
;
1396 if (vmxnet3_rq_alloc_rx_buf(rq
, 0, rq
->rx_ring
[0].size
- 1,
1398 /* at least has 1 rx buffer for the 1st ring */
1401 vmxnet3_rq_alloc_rx_buf(rq
, 1, rq
->rx_ring
[1].size
- 1, adapter
);
1403 /* reset the comp ring */
1404 rq
->comp_ring
.next2proc
= 0;
1405 memset(rq
->comp_ring
.base
, 0, rq
->comp_ring
.size
*
1406 sizeof(struct Vmxnet3_RxCompDesc
));
1407 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1410 rq
->rx_ctx
.skb
= NULL
;
1412 /* stats are not reset */
1418 vmxnet3_rq_init_all(struct vmxnet3_adapter
*adapter
)
1422 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1423 err
= vmxnet3_rq_init(&adapter
->rx_queue
[i
], adapter
);
1424 if (unlikely(err
)) {
1425 dev_err(&adapter
->netdev
->dev
, "%s: failed to "
1426 "initialize rx queue%i\n",
1427 adapter
->netdev
->name
, i
);
1437 vmxnet3_rq_create(struct vmxnet3_rx_queue
*rq
, struct vmxnet3_adapter
*adapter
)
1441 struct vmxnet3_rx_buf_info
*bi
;
1443 for (i
= 0; i
< 2; i
++) {
1445 sz
= rq
->rx_ring
[i
].size
* sizeof(struct Vmxnet3_RxDesc
);
1446 rq
->rx_ring
[i
].base
= pci_alloc_consistent(adapter
->pdev
, sz
,
1447 &rq
->rx_ring
[i
].basePA
);
1448 if (!rq
->rx_ring
[i
].base
) {
1449 printk(KERN_ERR
"%s: failed to allocate rx ring %d\n",
1450 adapter
->netdev
->name
, i
);
1455 sz
= rq
->comp_ring
.size
* sizeof(struct Vmxnet3_RxCompDesc
);
1456 rq
->comp_ring
.base
= pci_alloc_consistent(adapter
->pdev
, sz
,
1457 &rq
->comp_ring
.basePA
);
1458 if (!rq
->comp_ring
.base
) {
1459 printk(KERN_ERR
"%s: failed to allocate rx comp ring\n",
1460 adapter
->netdev
->name
);
1464 sz
= sizeof(struct vmxnet3_rx_buf_info
) * (rq
->rx_ring
[0].size
+
1465 rq
->rx_ring
[1].size
);
1466 bi
= kzalloc(sz
, GFP_KERNEL
);
1468 printk(KERN_ERR
"%s: failed to allocate rx bufinfo\n",
1469 adapter
->netdev
->name
);
1472 rq
->buf_info
[0] = bi
;
1473 rq
->buf_info
[1] = bi
+ rq
->rx_ring
[0].size
;
1478 vmxnet3_rq_destroy(rq
, adapter
);
1484 vmxnet3_rq_create_all(struct vmxnet3_adapter
*adapter
)
1488 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1489 err
= vmxnet3_rq_create(&adapter
->rx_queue
[i
], adapter
);
1490 if (unlikely(err
)) {
1491 dev_err(&adapter
->netdev
->dev
,
1492 "%s: failed to create rx queue%i\n",
1493 adapter
->netdev
->name
, i
);
1499 vmxnet3_rq_destroy_all(adapter
);
1504 /* Multiple queue aware polling function for tx and rx */
1507 vmxnet3_do_poll(struct vmxnet3_adapter
*adapter
, int budget
)
1509 int rcd_done
= 0, i
;
1510 if (unlikely(adapter
->shared
->ecr
))
1511 vmxnet3_process_events(adapter
);
1512 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1513 vmxnet3_tq_tx_complete(&adapter
->tx_queue
[i
], adapter
);
1515 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1516 rcd_done
+= vmxnet3_rq_rx_complete(&adapter
->rx_queue
[i
],
1523 vmxnet3_poll(struct napi_struct
*napi
, int budget
)
1525 struct vmxnet3_rx_queue
*rx_queue
= container_of(napi
,
1526 struct vmxnet3_rx_queue
, napi
);
1529 rxd_done
= vmxnet3_do_poll(rx_queue
->adapter
, budget
);
1531 if (rxd_done
< budget
) {
1532 napi_complete(napi
);
1533 vmxnet3_enable_all_intrs(rx_queue
->adapter
);
1539 * NAPI polling function for MSI-X mode with multiple Rx queues
1540 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1544 vmxnet3_poll_rx_only(struct napi_struct
*napi
, int budget
)
1546 struct vmxnet3_rx_queue
*rq
= container_of(napi
,
1547 struct vmxnet3_rx_queue
, napi
);
1548 struct vmxnet3_adapter
*adapter
= rq
->adapter
;
1551 /* When sharing interrupt with corresponding tx queue, process
1552 * tx completions in that queue as well
1554 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
) {
1555 struct vmxnet3_tx_queue
*tq
=
1556 &adapter
->tx_queue
[rq
- adapter
->rx_queue
];
1557 vmxnet3_tq_tx_complete(tq
, adapter
);
1560 rxd_done
= vmxnet3_rq_rx_complete(rq
, adapter
, budget
);
1562 if (rxd_done
< budget
) {
1563 napi_complete(napi
);
1564 vmxnet3_enable_intr(adapter
, rq
->comp_ring
.intr_idx
);
1570 #ifdef CONFIG_PCI_MSI
1573 * Handle completion interrupts on tx queues
1574 * Returns whether or not the intr is handled
1578 vmxnet3_msix_tx(int irq
, void *data
)
1580 struct vmxnet3_tx_queue
*tq
= data
;
1581 struct vmxnet3_adapter
*adapter
= tq
->adapter
;
1583 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1584 vmxnet3_disable_intr(adapter
, tq
->comp_ring
.intr_idx
);
1586 /* Handle the case where only one irq is allocate for all tx queues */
1587 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
) {
1589 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1590 struct vmxnet3_tx_queue
*txq
= &adapter
->tx_queue
[i
];
1591 vmxnet3_tq_tx_complete(txq
, adapter
);
1594 vmxnet3_tq_tx_complete(tq
, adapter
);
1596 vmxnet3_enable_intr(adapter
, tq
->comp_ring
.intr_idx
);
1603 * Handle completion interrupts on rx queues. Returns whether or not the
1608 vmxnet3_msix_rx(int irq
, void *data
)
1610 struct vmxnet3_rx_queue
*rq
= data
;
1611 struct vmxnet3_adapter
*adapter
= rq
->adapter
;
1613 /* disable intr if needed */
1614 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1615 vmxnet3_disable_intr(adapter
, rq
->comp_ring
.intr_idx
);
1616 napi_schedule(&rq
->napi
);
1622 *----------------------------------------------------------------------------
1624 * vmxnet3_msix_event --
1626 * vmxnet3 msix event intr handler
1629 * whether or not the intr is handled
1631 *----------------------------------------------------------------------------
1635 vmxnet3_msix_event(int irq
, void *data
)
1637 struct net_device
*dev
= data
;
1638 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1640 /* disable intr if needed */
1641 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1642 vmxnet3_disable_intr(adapter
, adapter
->intr
.event_intr_idx
);
1644 if (adapter
->shared
->ecr
)
1645 vmxnet3_process_events(adapter
);
1647 vmxnet3_enable_intr(adapter
, adapter
->intr
.event_intr_idx
);
1652 #endif /* CONFIG_PCI_MSI */
1655 /* Interrupt handler for vmxnet3 */
1657 vmxnet3_intr(int irq
, void *dev_id
)
1659 struct net_device
*dev
= dev_id
;
1660 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1662 if (adapter
->intr
.type
== VMXNET3_IT_INTX
) {
1663 u32 icr
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_ICR
);
1664 if (unlikely(icr
== 0))
1670 /* disable intr if needed */
1671 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1672 vmxnet3_disable_all_intrs(adapter
);
1674 napi_schedule(&adapter
->rx_queue
[0].napi
);
1679 #ifdef CONFIG_NET_POLL_CONTROLLER
1681 /* netpoll callback. */
1683 vmxnet3_netpoll(struct net_device
*netdev
)
1685 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1687 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1688 vmxnet3_disable_all_intrs(adapter
);
1690 vmxnet3_do_poll(adapter
, adapter
->rx_queue
[0].rx_ring
[0].size
);
1691 vmxnet3_enable_all_intrs(adapter
);
1694 #endif /* CONFIG_NET_POLL_CONTROLLER */
1697 vmxnet3_request_irqs(struct vmxnet3_adapter
*adapter
)
1699 struct vmxnet3_intr
*intr
= &adapter
->intr
;
1703 #ifdef CONFIG_PCI_MSI
1704 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
1705 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1706 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
) {
1707 sprintf(adapter
->tx_queue
[i
].name
, "%s-tx-%d",
1708 adapter
->netdev
->name
, vector
);
1710 intr
->msix_entries
[vector
].vector
,
1712 adapter
->tx_queue
[i
].name
,
1713 &adapter
->tx_queue
[i
]);
1715 sprintf(adapter
->tx_queue
[i
].name
, "%s-rxtx-%d",
1716 adapter
->netdev
->name
, vector
);
1719 dev_err(&adapter
->netdev
->dev
,
1720 "Failed to request irq for MSIX, %s, "
1722 adapter
->tx_queue
[i
].name
, err
);
1726 /* Handle the case where only 1 MSIx was allocated for
1728 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
) {
1729 for (; i
< adapter
->num_tx_queues
; i
++)
1730 adapter
->tx_queue
[i
].comp_ring
.intr_idx
1735 adapter
->tx_queue
[i
].comp_ring
.intr_idx
1739 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
)
1742 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1743 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
)
1744 sprintf(adapter
->rx_queue
[i
].name
, "%s-rx-%d",
1745 adapter
->netdev
->name
, vector
);
1747 sprintf(adapter
->rx_queue
[i
].name
, "%s-rxtx-%d",
1748 adapter
->netdev
->name
, vector
);
1749 err
= request_irq(intr
->msix_entries
[vector
].vector
,
1751 adapter
->rx_queue
[i
].name
,
1752 &(adapter
->rx_queue
[i
]));
1754 printk(KERN_ERR
"Failed to request irq for MSIX"
1756 adapter
->rx_queue
[i
].name
, err
);
1760 adapter
->rx_queue
[i
].comp_ring
.intr_idx
= vector
++;
1763 sprintf(intr
->event_msi_vector_name
, "%s-event-%d",
1764 adapter
->netdev
->name
, vector
);
1765 err
= request_irq(intr
->msix_entries
[vector
].vector
,
1766 vmxnet3_msix_event
, 0,
1767 intr
->event_msi_vector_name
, adapter
->netdev
);
1768 intr
->event_intr_idx
= vector
;
1770 } else if (intr
->type
== VMXNET3_IT_MSI
) {
1771 adapter
->num_rx_queues
= 1;
1772 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
, 0,
1773 adapter
->netdev
->name
, adapter
->netdev
);
1776 adapter
->num_rx_queues
= 1;
1777 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
,
1778 IRQF_SHARED
, adapter
->netdev
->name
,
1780 #ifdef CONFIG_PCI_MSI
1783 intr
->num_intrs
= vector
+ 1;
1785 printk(KERN_ERR
"Failed to request irq %s (intr type:%d), error"
1786 ":%d\n", adapter
->netdev
->name
, intr
->type
, err
);
1788 /* Number of rx queues will not change after this */
1789 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1790 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
1792 rq
->qid2
= i
+ adapter
->num_rx_queues
;
1797 /* init our intr settings */
1798 for (i
= 0; i
< intr
->num_intrs
; i
++)
1799 intr
->mod_levels
[i
] = UPT1_IML_ADAPTIVE
;
1800 if (adapter
->intr
.type
!= VMXNET3_IT_MSIX
) {
1801 adapter
->intr
.event_intr_idx
= 0;
1802 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1803 adapter
->tx_queue
[i
].comp_ring
.intr_idx
= 0;
1804 adapter
->rx_queue
[0].comp_ring
.intr_idx
= 0;
1807 printk(KERN_INFO
"%s: intr type %u, mode %u, %u vectors "
1808 "allocated\n", adapter
->netdev
->name
, intr
->type
,
1809 intr
->mask_mode
, intr
->num_intrs
);
1817 vmxnet3_free_irqs(struct vmxnet3_adapter
*adapter
)
1819 struct vmxnet3_intr
*intr
= &adapter
->intr
;
1820 BUG_ON(intr
->type
== VMXNET3_IT_AUTO
|| intr
->num_intrs
<= 0);
1822 switch (intr
->type
) {
1823 #ifdef CONFIG_PCI_MSI
1824 case VMXNET3_IT_MSIX
:
1828 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
) {
1829 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1830 free_irq(intr
->msix_entries
[vector
++].vector
,
1831 &(adapter
->tx_queue
[i
]));
1832 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
)
1837 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1838 free_irq(intr
->msix_entries
[vector
++].vector
,
1839 &(adapter
->rx_queue
[i
]));
1842 free_irq(intr
->msix_entries
[vector
].vector
,
1844 BUG_ON(vector
>= intr
->num_intrs
);
1848 case VMXNET3_IT_MSI
:
1849 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
1851 case VMXNET3_IT_INTX
:
1852 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
1860 vmxnet3_vlan_rx_register(struct net_device
*netdev
, struct vlan_group
*grp
)
1862 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1863 struct Vmxnet3_DriverShared
*shared
= adapter
->shared
;
1864 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1865 unsigned long flags
;
1868 /* add vlan rx stripping. */
1869 if (adapter
->netdev
->features
& NETIF_F_HW_VLAN_RX
) {
1871 adapter
->vlan_grp
= grp
;
1874 * Clear entire vfTable; then enable untagged pkts.
1875 * Note: setting one entry in vfTable to non-zero turns
1876 * on VLAN rx filtering.
1878 for (i
= 0; i
< VMXNET3_VFT_SIZE
; i
++)
1881 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, 0);
1882 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
1883 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1884 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1885 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
1887 printk(KERN_ERR
"%s: vlan_rx_register when device has "
1888 "no NETIF_F_HW_VLAN_RX\n", netdev
->name
);
1891 /* remove vlan rx stripping. */
1892 struct Vmxnet3_DSDevRead
*devRead
= &shared
->devRead
;
1893 adapter
->vlan_grp
= NULL
;
1895 if (devRead
->misc
.uptFeatures
& UPT1_F_RXVLAN
) {
1898 for (i
= 0; i
< VMXNET3_VFT_SIZE
; i
++) {
1899 /* clear entire vfTable; this also disables
1904 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
1905 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1906 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1907 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
1914 vmxnet3_restore_vlan(struct vmxnet3_adapter
*adapter
)
1916 if (adapter
->vlan_grp
) {
1918 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1919 bool activeVlan
= false;
1921 for (vid
= 0; vid
< VLAN_N_VID
; vid
++) {
1922 if (vlan_group_get_device(adapter
->vlan_grp
, vid
)) {
1923 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
1928 /* continue to allow untagged pkts */
1929 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, 0);
1936 vmxnet3_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1938 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1939 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1940 unsigned long flags
;
1942 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
1943 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
1944 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1945 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1946 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
1951 vmxnet3_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1953 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1954 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1955 unsigned long flags
;
1957 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable
, vid
);
1958 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
1959 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1960 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1961 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
1966 vmxnet3_copy_mc(struct net_device
*netdev
)
1969 u32 sz
= netdev_mc_count(netdev
) * ETH_ALEN
;
1971 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1973 /* We may be called with BH disabled */
1974 buf
= kmalloc(sz
, GFP_ATOMIC
);
1976 struct netdev_hw_addr
*ha
;
1979 netdev_for_each_mc_addr(ha
, netdev
)
1980 memcpy(buf
+ i
++ * ETH_ALEN
, ha
->addr
,
1989 vmxnet3_set_mc(struct net_device
*netdev
)
1991 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1992 unsigned long flags
;
1993 struct Vmxnet3_RxFilterConf
*rxConf
=
1994 &adapter
->shared
->devRead
.rxFilterConf
;
1995 u8
*new_table
= NULL
;
1996 u32 new_mode
= VMXNET3_RXM_UCAST
;
1998 if (netdev
->flags
& IFF_PROMISC
)
1999 new_mode
|= VMXNET3_RXM_PROMISC
;
2001 if (netdev
->flags
& IFF_BROADCAST
)
2002 new_mode
|= VMXNET3_RXM_BCAST
;
2004 if (netdev
->flags
& IFF_ALLMULTI
)
2005 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
2007 if (!netdev_mc_empty(netdev
)) {
2008 new_table
= vmxnet3_copy_mc(netdev
);
2010 new_mode
|= VMXNET3_RXM_MCAST
;
2011 rxConf
->mfTableLen
= cpu_to_le16(
2012 netdev_mc_count(netdev
) * ETH_ALEN
);
2013 rxConf
->mfTablePA
= cpu_to_le64(virt_to_phys(
2016 printk(KERN_INFO
"%s: failed to copy mcast list"
2017 ", setting ALL_MULTI\n", netdev
->name
);
2018 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
2023 if (!(new_mode
& VMXNET3_RXM_MCAST
)) {
2024 rxConf
->mfTableLen
= 0;
2025 rxConf
->mfTablePA
= 0;
2028 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2029 if (new_mode
!= rxConf
->rxMode
) {
2030 rxConf
->rxMode
= cpu_to_le32(new_mode
);
2031 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2032 VMXNET3_CMD_UPDATE_RX_MODE
);
2035 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2036 VMXNET3_CMD_UPDATE_MAC_FILTERS
);
2037 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2043 vmxnet3_rq_destroy_all(struct vmxnet3_adapter
*adapter
)
2047 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2048 vmxnet3_rq_destroy(&adapter
->rx_queue
[i
], adapter
);
2053 * Set up driver_shared based on settings in adapter.
2057 vmxnet3_setup_driver_shared(struct vmxnet3_adapter
*adapter
)
2059 struct Vmxnet3_DriverShared
*shared
= adapter
->shared
;
2060 struct Vmxnet3_DSDevRead
*devRead
= &shared
->devRead
;
2061 struct Vmxnet3_TxQueueConf
*tqc
;
2062 struct Vmxnet3_RxQueueConf
*rqc
;
2065 memset(shared
, 0, sizeof(*shared
));
2067 /* driver settings */
2068 shared
->magic
= cpu_to_le32(VMXNET3_REV1_MAGIC
);
2069 devRead
->misc
.driverInfo
.version
= cpu_to_le32(
2070 VMXNET3_DRIVER_VERSION_NUM
);
2071 devRead
->misc
.driverInfo
.gos
.gosBits
= (sizeof(void *) == 4 ?
2072 VMXNET3_GOS_BITS_32
: VMXNET3_GOS_BITS_64
);
2073 devRead
->misc
.driverInfo
.gos
.gosType
= VMXNET3_GOS_TYPE_LINUX
;
2074 *((u32
*)&devRead
->misc
.driverInfo
.gos
) = cpu_to_le32(
2075 *((u32
*)&devRead
->misc
.driverInfo
.gos
));
2076 devRead
->misc
.driverInfo
.vmxnet3RevSpt
= cpu_to_le32(1);
2077 devRead
->misc
.driverInfo
.uptVerSpt
= cpu_to_le32(1);
2079 devRead
->misc
.ddPA
= cpu_to_le64(virt_to_phys(adapter
));
2080 devRead
->misc
.ddLen
= cpu_to_le32(sizeof(struct vmxnet3_adapter
));
2082 /* set up feature flags */
2083 if (adapter
->netdev
->features
& NETIF_F_RXCSUM
)
2084 devRead
->misc
.uptFeatures
|= UPT1_F_RXCSUM
;
2086 if (adapter
->netdev
->features
& NETIF_F_LRO
) {
2087 devRead
->misc
.uptFeatures
|= UPT1_F_LRO
;
2088 devRead
->misc
.maxNumRxSG
= cpu_to_le16(1 + MAX_SKB_FRAGS
);
2090 if (adapter
->netdev
->features
& NETIF_F_HW_VLAN_RX
)
2091 devRead
->misc
.uptFeatures
|= UPT1_F_RXVLAN
;
2093 devRead
->misc
.mtu
= cpu_to_le32(adapter
->netdev
->mtu
);
2094 devRead
->misc
.queueDescPA
= cpu_to_le64(adapter
->queue_desc_pa
);
2095 devRead
->misc
.queueDescLen
= cpu_to_le32(
2096 adapter
->num_tx_queues
* sizeof(struct Vmxnet3_TxQueueDesc
) +
2097 adapter
->num_rx_queues
* sizeof(struct Vmxnet3_RxQueueDesc
));
2099 /* tx queue settings */
2100 devRead
->misc
.numTxQueues
= adapter
->num_tx_queues
;
2101 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2102 struct vmxnet3_tx_queue
*tq
= &adapter
->tx_queue
[i
];
2103 BUG_ON(adapter
->tx_queue
[i
].tx_ring
.base
== NULL
);
2104 tqc
= &adapter
->tqd_start
[i
].conf
;
2105 tqc
->txRingBasePA
= cpu_to_le64(tq
->tx_ring
.basePA
);
2106 tqc
->dataRingBasePA
= cpu_to_le64(tq
->data_ring
.basePA
);
2107 tqc
->compRingBasePA
= cpu_to_le64(tq
->comp_ring
.basePA
);
2108 tqc
->ddPA
= cpu_to_le64(virt_to_phys(tq
->buf_info
));
2109 tqc
->txRingSize
= cpu_to_le32(tq
->tx_ring
.size
);
2110 tqc
->dataRingSize
= cpu_to_le32(tq
->data_ring
.size
);
2111 tqc
->compRingSize
= cpu_to_le32(tq
->comp_ring
.size
);
2112 tqc
->ddLen
= cpu_to_le32(
2113 sizeof(struct vmxnet3_tx_buf_info
) *
2115 tqc
->intrIdx
= tq
->comp_ring
.intr_idx
;
2118 /* rx queue settings */
2119 devRead
->misc
.numRxQueues
= adapter
->num_rx_queues
;
2120 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2121 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2122 rqc
= &adapter
->rqd_start
[i
].conf
;
2123 rqc
->rxRingBasePA
[0] = cpu_to_le64(rq
->rx_ring
[0].basePA
);
2124 rqc
->rxRingBasePA
[1] = cpu_to_le64(rq
->rx_ring
[1].basePA
);
2125 rqc
->compRingBasePA
= cpu_to_le64(rq
->comp_ring
.basePA
);
2126 rqc
->ddPA
= cpu_to_le64(virt_to_phys(
2128 rqc
->rxRingSize
[0] = cpu_to_le32(rq
->rx_ring
[0].size
);
2129 rqc
->rxRingSize
[1] = cpu_to_le32(rq
->rx_ring
[1].size
);
2130 rqc
->compRingSize
= cpu_to_le32(rq
->comp_ring
.size
);
2131 rqc
->ddLen
= cpu_to_le32(
2132 sizeof(struct vmxnet3_rx_buf_info
) *
2133 (rqc
->rxRingSize
[0] +
2134 rqc
->rxRingSize
[1]));
2135 rqc
->intrIdx
= rq
->comp_ring
.intr_idx
;
2139 memset(adapter
->rss_conf
, 0, sizeof(*adapter
->rss_conf
));
2142 struct UPT1_RSSConf
*rssConf
= adapter
->rss_conf
;
2143 devRead
->misc
.uptFeatures
|= UPT1_F_RSS
;
2144 devRead
->misc
.numRxQueues
= adapter
->num_rx_queues
;
2145 rssConf
->hashType
= UPT1_RSS_HASH_TYPE_TCP_IPV4
|
2146 UPT1_RSS_HASH_TYPE_IPV4
|
2147 UPT1_RSS_HASH_TYPE_TCP_IPV6
|
2148 UPT1_RSS_HASH_TYPE_IPV6
;
2149 rssConf
->hashFunc
= UPT1_RSS_HASH_FUNC_TOEPLITZ
;
2150 rssConf
->hashKeySize
= UPT1_RSS_MAX_KEY_SIZE
;
2151 rssConf
->indTableSize
= VMXNET3_RSS_IND_TABLE_SIZE
;
2152 get_random_bytes(&rssConf
->hashKey
[0], rssConf
->hashKeySize
);
2153 for (i
= 0; i
< rssConf
->indTableSize
; i
++)
2154 rssConf
->indTable
[i
] = i
% adapter
->num_rx_queues
;
2156 devRead
->rssConfDesc
.confVer
= 1;
2157 devRead
->rssConfDesc
.confLen
= sizeof(*rssConf
);
2158 devRead
->rssConfDesc
.confPA
= virt_to_phys(rssConf
);
2161 #endif /* VMXNET3_RSS */
2164 devRead
->intrConf
.autoMask
= adapter
->intr
.mask_mode
==
2166 devRead
->intrConf
.numIntrs
= adapter
->intr
.num_intrs
;
2167 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
2168 devRead
->intrConf
.modLevels
[i
] = adapter
->intr
.mod_levels
[i
];
2170 devRead
->intrConf
.eventIntrIdx
= adapter
->intr
.event_intr_idx
;
2171 devRead
->intrConf
.intrCtrl
|= cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
2173 /* rx filter settings */
2174 devRead
->rxFilterConf
.rxMode
= 0;
2175 vmxnet3_restore_vlan(adapter
);
2176 vmxnet3_write_mac_addr(adapter
, adapter
->netdev
->dev_addr
);
2178 /* the rest are already zeroed */
2183 vmxnet3_activate_dev(struct vmxnet3_adapter
*adapter
)
2187 unsigned long flags
;
2189 dev_dbg(&adapter
->netdev
->dev
, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2190 " ring sizes %u %u %u\n", adapter
->netdev
->name
,
2191 adapter
->skb_buf_size
, adapter
->rx_buf_per_pkt
,
2192 adapter
->tx_queue
[0].tx_ring
.size
,
2193 adapter
->rx_queue
[0].rx_ring
[0].size
,
2194 adapter
->rx_queue
[0].rx_ring
[1].size
);
2196 vmxnet3_tq_init_all(adapter
);
2197 err
= vmxnet3_rq_init_all(adapter
);
2199 printk(KERN_ERR
"Failed to init rx queue for %s: error %d\n",
2200 adapter
->netdev
->name
, err
);
2204 err
= vmxnet3_request_irqs(adapter
);
2206 printk(KERN_ERR
"Failed to setup irq for %s: error %d\n",
2207 adapter
->netdev
->name
, err
);
2211 vmxnet3_setup_driver_shared(adapter
);
2213 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, VMXNET3_GET_ADDR_LO(
2214 adapter
->shared_pa
));
2215 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, VMXNET3_GET_ADDR_HI(
2216 adapter
->shared_pa
));
2217 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2218 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2219 VMXNET3_CMD_ACTIVATE_DEV
);
2220 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
2221 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2224 printk(KERN_ERR
"Failed to activate dev %s: error %u\n",
2225 adapter
->netdev
->name
, ret
);
2230 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2231 VMXNET3_WRITE_BAR0_REG(adapter
,
2232 VMXNET3_REG_RXPROD
+ i
* VMXNET3_REG_ALIGN
,
2233 adapter
->rx_queue
[i
].rx_ring
[0].next2fill
);
2234 VMXNET3_WRITE_BAR0_REG(adapter
, (VMXNET3_REG_RXPROD2
+
2235 (i
* VMXNET3_REG_ALIGN
)),
2236 adapter
->rx_queue
[i
].rx_ring
[1].next2fill
);
2239 /* Apply the rx filter settins last. */
2240 vmxnet3_set_mc(adapter
->netdev
);
2243 * Check link state when first activating device. It will start the
2244 * tx queue if the link is up.
2246 vmxnet3_check_link(adapter
, true);
2247 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2248 napi_enable(&adapter
->rx_queue
[i
].napi
);
2249 vmxnet3_enable_all_intrs(adapter
);
2250 clear_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
2254 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, 0);
2255 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, 0);
2256 vmxnet3_free_irqs(adapter
);
2259 /* free up buffers we allocated */
2260 vmxnet3_rq_cleanup_all(adapter
);
2266 vmxnet3_reset_dev(struct vmxnet3_adapter
*adapter
)
2268 unsigned long flags
;
2269 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2270 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_RESET_DEV
);
2271 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2276 vmxnet3_quiesce_dev(struct vmxnet3_adapter
*adapter
)
2279 unsigned long flags
;
2280 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
))
2284 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2285 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2286 VMXNET3_CMD_QUIESCE_DEV
);
2287 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2288 vmxnet3_disable_all_intrs(adapter
);
2290 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2291 napi_disable(&adapter
->rx_queue
[i
].napi
);
2292 netif_tx_disable(adapter
->netdev
);
2293 adapter
->link_speed
= 0;
2294 netif_carrier_off(adapter
->netdev
);
2296 vmxnet3_tq_cleanup_all(adapter
);
2297 vmxnet3_rq_cleanup_all(adapter
);
2298 vmxnet3_free_irqs(adapter
);
2304 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
2309 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACL
, tmp
);
2311 tmp
= (mac
[5] << 8) | mac
[4];
2312 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACH
, tmp
);
2317 vmxnet3_set_mac_addr(struct net_device
*netdev
, void *p
)
2319 struct sockaddr
*addr
= p
;
2320 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2322 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2323 vmxnet3_write_mac_addr(adapter
, addr
->sa_data
);
2329 /* ==================== initialization and cleanup routines ============ */
2332 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter
*adapter
, bool *dma64
)
2335 unsigned long mmio_start
, mmio_len
;
2336 struct pci_dev
*pdev
= adapter
->pdev
;
2338 err
= pci_enable_device(pdev
);
2340 printk(KERN_ERR
"Failed to enable adapter %s: error %d\n",
2341 pci_name(pdev
), err
);
2345 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
2346 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
2347 printk(KERN_ERR
"pci_set_consistent_dma_mask failed "
2348 "for adapter %s\n", pci_name(pdev
));
2354 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
2355 printk(KERN_ERR
"pci_set_dma_mask failed for adapter "
2356 "%s\n", pci_name(pdev
));
2363 err
= pci_request_selected_regions(pdev
, (1 << 2) - 1,
2364 vmxnet3_driver_name
);
2366 printk(KERN_ERR
"Failed to request region for adapter %s: "
2367 "error %d\n", pci_name(pdev
), err
);
2371 pci_set_master(pdev
);
2373 mmio_start
= pci_resource_start(pdev
, 0);
2374 mmio_len
= pci_resource_len(pdev
, 0);
2375 adapter
->hw_addr0
= ioremap(mmio_start
, mmio_len
);
2376 if (!adapter
->hw_addr0
) {
2377 printk(KERN_ERR
"Failed to map bar0 for adapter %s\n",
2383 mmio_start
= pci_resource_start(pdev
, 1);
2384 mmio_len
= pci_resource_len(pdev
, 1);
2385 adapter
->hw_addr1
= ioremap(mmio_start
, mmio_len
);
2386 if (!adapter
->hw_addr1
) {
2387 printk(KERN_ERR
"Failed to map bar1 for adapter %s\n",
2395 iounmap(adapter
->hw_addr0
);
2397 pci_release_selected_regions(pdev
, (1 << 2) - 1);
2399 pci_disable_device(pdev
);
2405 vmxnet3_free_pci_resources(struct vmxnet3_adapter
*adapter
)
2407 BUG_ON(!adapter
->pdev
);
2409 iounmap(adapter
->hw_addr0
);
2410 iounmap(adapter
->hw_addr1
);
2411 pci_release_selected_regions(adapter
->pdev
, (1 << 2) - 1);
2412 pci_disable_device(adapter
->pdev
);
2417 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter
*adapter
)
2419 size_t sz
, i
, ring0_size
, ring1_size
, comp_size
;
2420 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[0];
2423 if (adapter
->netdev
->mtu
<= VMXNET3_MAX_SKB_BUF_SIZE
-
2424 VMXNET3_MAX_ETH_HDR_SIZE
) {
2425 adapter
->skb_buf_size
= adapter
->netdev
->mtu
+
2426 VMXNET3_MAX_ETH_HDR_SIZE
;
2427 if (adapter
->skb_buf_size
< VMXNET3_MIN_T0_BUF_SIZE
)
2428 adapter
->skb_buf_size
= VMXNET3_MIN_T0_BUF_SIZE
;
2430 adapter
->rx_buf_per_pkt
= 1;
2432 adapter
->skb_buf_size
= VMXNET3_MAX_SKB_BUF_SIZE
;
2433 sz
= adapter
->netdev
->mtu
- VMXNET3_MAX_SKB_BUF_SIZE
+
2434 VMXNET3_MAX_ETH_HDR_SIZE
;
2435 adapter
->rx_buf_per_pkt
= 1 + (sz
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
2439 * for simplicity, force the ring0 size to be a multiple of
2440 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2442 sz
= adapter
->rx_buf_per_pkt
* VMXNET3_RING_SIZE_ALIGN
;
2443 ring0_size
= adapter
->rx_queue
[0].rx_ring
[0].size
;
2444 ring0_size
= (ring0_size
+ sz
- 1) / sz
* sz
;
2445 ring0_size
= min_t(u32
, ring0_size
, VMXNET3_RX_RING_MAX_SIZE
/
2447 ring1_size
= adapter
->rx_queue
[0].rx_ring
[1].size
;
2448 comp_size
= ring0_size
+ ring1_size
;
2450 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2451 rq
= &adapter
->rx_queue
[i
];
2452 rq
->rx_ring
[0].size
= ring0_size
;
2453 rq
->rx_ring
[1].size
= ring1_size
;
2454 rq
->comp_ring
.size
= comp_size
;
2460 vmxnet3_create_queues(struct vmxnet3_adapter
*adapter
, u32 tx_ring_size
,
2461 u32 rx_ring_size
, u32 rx_ring2_size
)
2465 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2466 struct vmxnet3_tx_queue
*tq
= &adapter
->tx_queue
[i
];
2467 tq
->tx_ring
.size
= tx_ring_size
;
2468 tq
->data_ring
.size
= tx_ring_size
;
2469 tq
->comp_ring
.size
= tx_ring_size
;
2470 tq
->shared
= &adapter
->tqd_start
[i
].ctrl
;
2472 tq
->adapter
= adapter
;
2474 err
= vmxnet3_tq_create(tq
, adapter
);
2476 * Too late to change num_tx_queues. We cannot do away with
2477 * lesser number of queues than what we asked for
2483 adapter
->rx_queue
[0].rx_ring
[0].size
= rx_ring_size
;
2484 adapter
->rx_queue
[0].rx_ring
[1].size
= rx_ring2_size
;
2485 vmxnet3_adjust_rx_ring_size(adapter
);
2486 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2487 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2488 /* qid and qid2 for rx queues will be assigned later when num
2489 * of rx queues is finalized after allocating intrs */
2490 rq
->shared
= &adapter
->rqd_start
[i
].ctrl
;
2491 rq
->adapter
= adapter
;
2492 err
= vmxnet3_rq_create(rq
, adapter
);
2495 printk(KERN_ERR
"Could not allocate any rx"
2496 "queues. Aborting.\n");
2499 printk(KERN_INFO
"Number of rx queues changed "
2501 adapter
->num_rx_queues
= i
;
2509 vmxnet3_tq_destroy_all(adapter
);
2514 vmxnet3_open(struct net_device
*netdev
)
2516 struct vmxnet3_adapter
*adapter
;
2519 adapter
= netdev_priv(netdev
);
2521 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2522 spin_lock_init(&adapter
->tx_queue
[i
].tx_lock
);
2524 err
= vmxnet3_create_queues(adapter
, VMXNET3_DEF_TX_RING_SIZE
,
2525 VMXNET3_DEF_RX_RING_SIZE
,
2526 VMXNET3_DEF_RX_RING_SIZE
);
2530 err
= vmxnet3_activate_dev(adapter
);
2537 vmxnet3_rq_destroy_all(adapter
);
2538 vmxnet3_tq_destroy_all(adapter
);
2545 vmxnet3_close(struct net_device
*netdev
)
2547 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2550 * Reset_work may be in the middle of resetting the device, wait for its
2553 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2556 vmxnet3_quiesce_dev(adapter
);
2558 vmxnet3_rq_destroy_all(adapter
);
2559 vmxnet3_tq_destroy_all(adapter
);
2561 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2569 vmxnet3_force_close(struct vmxnet3_adapter
*adapter
)
2574 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2575 * vmxnet3_close() will deadlock.
2577 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
));
2579 /* we need to enable NAPI, otherwise dev_close will deadlock */
2580 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2581 napi_enable(&adapter
->rx_queue
[i
].napi
);
2582 dev_close(adapter
->netdev
);
2587 vmxnet3_change_mtu(struct net_device
*netdev
, int new_mtu
)
2589 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2592 if (new_mtu
< VMXNET3_MIN_MTU
|| new_mtu
> VMXNET3_MAX_MTU
)
2595 netdev
->mtu
= new_mtu
;
2598 * Reset_work may be in the middle of resetting the device, wait for its
2601 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2604 if (netif_running(netdev
)) {
2605 vmxnet3_quiesce_dev(adapter
);
2606 vmxnet3_reset_dev(adapter
);
2608 /* we need to re-create the rx queue based on the new mtu */
2609 vmxnet3_rq_destroy_all(adapter
);
2610 vmxnet3_adjust_rx_ring_size(adapter
);
2611 err
= vmxnet3_rq_create_all(adapter
);
2613 printk(KERN_ERR
"%s: failed to re-create rx queues,"
2614 " error %d. Closing it.\n", netdev
->name
, err
);
2618 err
= vmxnet3_activate_dev(adapter
);
2620 printk(KERN_ERR
"%s: failed to re-activate, error %d. "
2621 "Closing it\n", netdev
->name
, err
);
2627 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2629 vmxnet3_force_close(adapter
);
2636 vmxnet3_declare_features(struct vmxnet3_adapter
*adapter
, bool dma64
)
2638 struct net_device
*netdev
= adapter
->netdev
;
2640 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
2641 NETIF_F_HW_CSUM
| NETIF_F_HW_VLAN_TX
|
2642 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_LRO
;
2644 netdev
->features
|= NETIF_F_HIGHDMA
;
2645 netdev
->vlan_features
= netdev
->hw_features
& ~NETIF_F_HW_VLAN_TX
;
2646 netdev
->features
= netdev
->hw_features
|
2647 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
2649 netdev_info(adapter
->netdev
,
2650 "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
2651 dma64
? " highDMA" : "");
2656 vmxnet3_read_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
2660 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACL
);
2663 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACH
);
2664 mac
[4] = tmp
& 0xff;
2665 mac
[5] = (tmp
>> 8) & 0xff;
2668 #ifdef CONFIG_PCI_MSI
2671 * Enable MSIx vectors.
2673 * 0 on successful enabling of required vectors,
2674 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
2676 * number of vectors which can be enabled otherwise (this number is smaller
2677 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2681 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter
*adapter
,
2684 int err
= 0, vector_threshold
;
2685 vector_threshold
= VMXNET3_LINUX_MIN_MSIX_VECT
;
2687 while (vectors
>= vector_threshold
) {
2688 err
= pci_enable_msix(adapter
->pdev
, adapter
->intr
.msix_entries
,
2691 adapter
->intr
.num_intrs
= vectors
;
2693 } else if (err
< 0) {
2694 printk(KERN_ERR
"Failed to enable MSI-X for %s, error"
2695 " %d\n", adapter
->netdev
->name
, err
);
2697 } else if (err
< vector_threshold
) {
2700 /* If fails to enable required number of MSI-x vectors
2701 * try enabling minimum number of vectors required.
2703 vectors
= vector_threshold
;
2704 printk(KERN_ERR
"Failed to enable %d MSI-X for %s, try"
2705 " %d instead\n", vectors
, adapter
->netdev
->name
,
2710 printk(KERN_INFO
"Number of MSI-X interrupts which can be allocatedi"
2711 " are lower than min threshold required.\n");
2716 #endif /* CONFIG_PCI_MSI */
2719 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter
*adapter
)
2722 unsigned long flags
;
2725 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2726 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2727 VMXNET3_CMD_GET_CONF_INTR
);
2728 cfg
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
2729 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2730 adapter
->intr
.type
= cfg
& 0x3;
2731 adapter
->intr
.mask_mode
= (cfg
>> 2) & 0x3;
2733 if (adapter
->intr
.type
== VMXNET3_IT_AUTO
) {
2734 adapter
->intr
.type
= VMXNET3_IT_MSIX
;
2737 #ifdef CONFIG_PCI_MSI
2738 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
2739 int vector
, err
= 0;
2741 adapter
->intr
.num_intrs
= (adapter
->share_intr
==
2742 VMXNET3_INTR_TXSHARE
) ? 1 :
2743 adapter
->num_tx_queues
;
2744 adapter
->intr
.num_intrs
+= (adapter
->share_intr
==
2745 VMXNET3_INTR_BUDDYSHARE
) ? 0 :
2746 adapter
->num_rx_queues
;
2747 adapter
->intr
.num_intrs
+= 1; /* for link event */
2749 adapter
->intr
.num_intrs
= (adapter
->intr
.num_intrs
>
2750 VMXNET3_LINUX_MIN_MSIX_VECT
2751 ? adapter
->intr
.num_intrs
:
2752 VMXNET3_LINUX_MIN_MSIX_VECT
);
2754 for (vector
= 0; vector
< adapter
->intr
.num_intrs
; vector
++)
2755 adapter
->intr
.msix_entries
[vector
].entry
= vector
;
2757 err
= vmxnet3_acquire_msix_vectors(adapter
,
2758 adapter
->intr
.num_intrs
);
2759 /* If we cannot allocate one MSIx vector per queue
2760 * then limit the number of rx queues to 1
2762 if (err
== VMXNET3_LINUX_MIN_MSIX_VECT
) {
2763 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
2764 || adapter
->num_rx_queues
!= 1) {
2765 adapter
->share_intr
= VMXNET3_INTR_TXSHARE
;
2766 printk(KERN_ERR
"Number of rx queues : 1\n");
2767 adapter
->num_rx_queues
= 1;
2768 adapter
->intr
.num_intrs
=
2769 VMXNET3_LINUX_MIN_MSIX_VECT
;
2776 /* If we cannot allocate MSIx vectors use only one rx queue */
2777 printk(KERN_INFO
"Failed to enable MSI-X for %s, error %d."
2778 "#rx queues : 1, try MSI\n", adapter
->netdev
->name
, err
);
2780 adapter
->intr
.type
= VMXNET3_IT_MSI
;
2783 if (adapter
->intr
.type
== VMXNET3_IT_MSI
) {
2785 err
= pci_enable_msi(adapter
->pdev
);
2787 adapter
->num_rx_queues
= 1;
2788 adapter
->intr
.num_intrs
= 1;
2792 #endif /* CONFIG_PCI_MSI */
2794 adapter
->num_rx_queues
= 1;
2795 printk(KERN_INFO
"Using INTx interrupt, #Rx queues: 1.\n");
2796 adapter
->intr
.type
= VMXNET3_IT_INTX
;
2798 /* INT-X related setting */
2799 adapter
->intr
.num_intrs
= 1;
2804 vmxnet3_free_intr_resources(struct vmxnet3_adapter
*adapter
)
2806 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
)
2807 pci_disable_msix(adapter
->pdev
);
2808 else if (adapter
->intr
.type
== VMXNET3_IT_MSI
)
2809 pci_disable_msi(adapter
->pdev
);
2811 BUG_ON(adapter
->intr
.type
!= VMXNET3_IT_INTX
);
2816 vmxnet3_tx_timeout(struct net_device
*netdev
)
2818 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2819 adapter
->tx_timeout_count
++;
2821 printk(KERN_ERR
"%s: tx hang\n", adapter
->netdev
->name
);
2822 schedule_work(&adapter
->work
);
2823 netif_wake_queue(adapter
->netdev
);
2828 vmxnet3_reset_work(struct work_struct
*data
)
2830 struct vmxnet3_adapter
*adapter
;
2832 adapter
= container_of(data
, struct vmxnet3_adapter
, work
);
2834 /* if another thread is resetting the device, no need to proceed */
2835 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2838 /* if the device is closed, we must leave it alone */
2840 if (netif_running(adapter
->netdev
)) {
2841 printk(KERN_INFO
"%s: resetting\n", adapter
->netdev
->name
);
2842 vmxnet3_quiesce_dev(adapter
);
2843 vmxnet3_reset_dev(adapter
);
2844 vmxnet3_activate_dev(adapter
);
2846 printk(KERN_INFO
"%s: already closed\n", adapter
->netdev
->name
);
2850 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2854 static int __devinit
2855 vmxnet3_probe_device(struct pci_dev
*pdev
,
2856 const struct pci_device_id
*id
)
2858 static const struct net_device_ops vmxnet3_netdev_ops
= {
2859 .ndo_open
= vmxnet3_open
,
2860 .ndo_stop
= vmxnet3_close
,
2861 .ndo_start_xmit
= vmxnet3_xmit_frame
,
2862 .ndo_set_mac_address
= vmxnet3_set_mac_addr
,
2863 .ndo_change_mtu
= vmxnet3_change_mtu
,
2864 .ndo_set_features
= vmxnet3_set_features
,
2865 .ndo_get_stats64
= vmxnet3_get_stats64
,
2866 .ndo_tx_timeout
= vmxnet3_tx_timeout
,
2867 .ndo_set_multicast_list
= vmxnet3_set_mc
,
2868 .ndo_vlan_rx_register
= vmxnet3_vlan_rx_register
,
2869 .ndo_vlan_rx_add_vid
= vmxnet3_vlan_rx_add_vid
,
2870 .ndo_vlan_rx_kill_vid
= vmxnet3_vlan_rx_kill_vid
,
2871 #ifdef CONFIG_NET_POLL_CONTROLLER
2872 .ndo_poll_controller
= vmxnet3_netpoll
,
2876 bool dma64
= false; /* stupid gcc */
2878 struct net_device
*netdev
;
2879 struct vmxnet3_adapter
*adapter
;
2885 if (!pci_msi_enabled())
2890 num_rx_queues
= min(VMXNET3_DEVICE_MAX_RX_QUEUES
,
2891 (int)num_online_cpus());
2897 num_tx_queues
= min(VMXNET3_DEVICE_MAX_TX_QUEUES
,
2898 (int)num_online_cpus());
2902 netdev
= alloc_etherdev_mq(sizeof(struct vmxnet3_adapter
),
2903 max(num_tx_queues
, num_rx_queues
));
2904 printk(KERN_INFO
"# of Tx queues : %d, # of Rx queues : %d\n",
2905 num_tx_queues
, num_rx_queues
);
2908 printk(KERN_ERR
"Failed to alloc ethernet device for adapter "
2909 "%s\n", pci_name(pdev
));
2913 pci_set_drvdata(pdev
, netdev
);
2914 adapter
= netdev_priv(netdev
);
2915 adapter
->netdev
= netdev
;
2916 adapter
->pdev
= pdev
;
2918 spin_lock_init(&adapter
->cmd_lock
);
2919 adapter
->shared
= pci_alloc_consistent(adapter
->pdev
,
2920 sizeof(struct Vmxnet3_DriverShared
),
2921 &adapter
->shared_pa
);
2922 if (!adapter
->shared
) {
2923 printk(KERN_ERR
"Failed to allocate memory for %s\n",
2926 goto err_alloc_shared
;
2929 adapter
->num_rx_queues
= num_rx_queues
;
2930 adapter
->num_tx_queues
= num_tx_queues
;
2932 size
= sizeof(struct Vmxnet3_TxQueueDesc
) * adapter
->num_tx_queues
;
2933 size
+= sizeof(struct Vmxnet3_RxQueueDesc
) * adapter
->num_rx_queues
;
2934 adapter
->tqd_start
= pci_alloc_consistent(adapter
->pdev
, size
,
2935 &adapter
->queue_desc_pa
);
2937 if (!adapter
->tqd_start
) {
2938 printk(KERN_ERR
"Failed to allocate memory for %s\n",
2941 goto err_alloc_queue_desc
;
2943 adapter
->rqd_start
= (struct Vmxnet3_RxQueueDesc
*)(adapter
->tqd_start
+
2944 adapter
->num_tx_queues
);
2946 adapter
->pm_conf
= kmalloc(sizeof(struct Vmxnet3_PMConf
), GFP_KERNEL
);
2947 if (adapter
->pm_conf
== NULL
) {
2948 printk(KERN_ERR
"Failed to allocate memory for %s\n",
2956 adapter
->rss_conf
= kmalloc(sizeof(struct UPT1_RSSConf
), GFP_KERNEL
);
2957 if (adapter
->rss_conf
== NULL
) {
2958 printk(KERN_ERR
"Failed to allocate memory for %s\n",
2963 #endif /* VMXNET3_RSS */
2965 err
= vmxnet3_alloc_pci_resources(adapter
, &dma64
);
2969 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_VRRS
);
2971 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_VRRS
, 1);
2973 printk(KERN_ERR
"Incompatible h/w version (0x%x) for adapter"
2974 " %s\n", ver
, pci_name(pdev
));
2979 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_UVRS
);
2981 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_UVRS
, 1);
2983 printk(KERN_ERR
"Incompatible upt version (0x%x) for "
2984 "adapter %s\n", ver
, pci_name(pdev
));
2989 vmxnet3_declare_features(adapter
, dma64
);
2991 adapter
->dev_number
= atomic_read(&devices_found
);
2993 adapter
->share_intr
= irq_share_mode
;
2994 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
&&
2995 adapter
->num_tx_queues
!= adapter
->num_rx_queues
)
2996 adapter
->share_intr
= VMXNET3_INTR_DONTSHARE
;
2998 vmxnet3_alloc_intr_resources(adapter
);
3001 if (adapter
->num_rx_queues
> 1 &&
3002 adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3003 adapter
->rss
= true;
3004 printk(KERN_INFO
"RSS is enabled.\n");
3006 adapter
->rss
= false;
3010 vmxnet3_read_mac_addr(adapter
, mac
);
3011 memcpy(netdev
->dev_addr
, mac
, netdev
->addr_len
);
3013 netdev
->netdev_ops
= &vmxnet3_netdev_ops
;
3014 vmxnet3_set_ethtool_ops(netdev
);
3015 netdev
->watchdog_timeo
= 5 * HZ
;
3017 INIT_WORK(&adapter
->work
, vmxnet3_reset_work
);
3019 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3021 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3022 netif_napi_add(adapter
->netdev
,
3023 &adapter
->rx_queue
[i
].napi
,
3024 vmxnet3_poll_rx_only
, 64);
3027 netif_napi_add(adapter
->netdev
, &adapter
->rx_queue
[0].napi
,
3031 netif_set_real_num_tx_queues(adapter
->netdev
, adapter
->num_tx_queues
);
3032 netif_set_real_num_rx_queues(adapter
->netdev
, adapter
->num_rx_queues
);
3034 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3035 err
= register_netdev(netdev
);
3038 printk(KERN_ERR
"Failed to register adapter %s\n",
3043 set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
3044 vmxnet3_check_link(adapter
, false);
3045 atomic_inc(&devices_found
);
3049 vmxnet3_free_intr_resources(adapter
);
3051 vmxnet3_free_pci_resources(adapter
);
3054 kfree(adapter
->rss_conf
);
3057 kfree(adapter
->pm_conf
);
3059 pci_free_consistent(adapter
->pdev
, size
, adapter
->tqd_start
,
3060 adapter
->queue_desc_pa
);
3061 err_alloc_queue_desc
:
3062 pci_free_consistent(adapter
->pdev
, sizeof(struct Vmxnet3_DriverShared
),
3063 adapter
->shared
, adapter
->shared_pa
);
3065 pci_set_drvdata(pdev
, NULL
);
3066 free_netdev(netdev
);
3071 static void __devexit
3072 vmxnet3_remove_device(struct pci_dev
*pdev
)
3074 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3075 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3081 num_rx_queues
= min(VMXNET3_DEVICE_MAX_RX_QUEUES
,
3082 (int)num_online_cpus());
3087 cancel_work_sync(&adapter
->work
);
3089 unregister_netdev(netdev
);
3091 vmxnet3_free_intr_resources(adapter
);
3092 vmxnet3_free_pci_resources(adapter
);
3094 kfree(adapter
->rss_conf
);
3096 kfree(adapter
->pm_conf
);
3098 size
= sizeof(struct Vmxnet3_TxQueueDesc
) * adapter
->num_tx_queues
;
3099 size
+= sizeof(struct Vmxnet3_RxQueueDesc
) * num_rx_queues
;
3100 pci_free_consistent(adapter
->pdev
, size
, adapter
->tqd_start
,
3101 adapter
->queue_desc_pa
);
3102 pci_free_consistent(adapter
->pdev
, sizeof(struct Vmxnet3_DriverShared
),
3103 adapter
->shared
, adapter
->shared_pa
);
3104 free_netdev(netdev
);
3111 vmxnet3_suspend(struct device
*device
)
3113 struct pci_dev
*pdev
= to_pci_dev(device
);
3114 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3115 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3116 struct Vmxnet3_PMConf
*pmConf
;
3117 struct ethhdr
*ehdr
;
3118 struct arphdr
*ahdr
;
3120 struct in_device
*in_dev
;
3121 struct in_ifaddr
*ifa
;
3122 unsigned long flags
;
3125 if (!netif_running(netdev
))
3128 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3129 napi_disable(&adapter
->rx_queue
[i
].napi
);
3131 vmxnet3_disable_all_intrs(adapter
);
3132 vmxnet3_free_irqs(adapter
);
3133 vmxnet3_free_intr_resources(adapter
);
3135 netif_device_detach(netdev
);
3136 netif_tx_stop_all_queues(netdev
);
3138 /* Create wake-up filters. */
3139 pmConf
= adapter
->pm_conf
;
3140 memset(pmConf
, 0, sizeof(*pmConf
));
3142 if (adapter
->wol
& WAKE_UCAST
) {
3143 pmConf
->filters
[i
].patternSize
= ETH_ALEN
;
3144 pmConf
->filters
[i
].maskSize
= 1;
3145 memcpy(pmConf
->filters
[i
].pattern
, netdev
->dev_addr
, ETH_ALEN
);
3146 pmConf
->filters
[i
].mask
[0] = 0x3F; /* LSB ETH_ALEN bits */
3148 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
3152 if (adapter
->wol
& WAKE_ARP
) {
3153 in_dev
= in_dev_get(netdev
);
3157 ifa
= (struct in_ifaddr
*)in_dev
->ifa_list
;
3161 pmConf
->filters
[i
].patternSize
= ETH_HLEN
+ /* Ethernet header*/
3162 sizeof(struct arphdr
) + /* ARP header */
3163 2 * ETH_ALEN
+ /* 2 Ethernet addresses*/
3164 2 * sizeof(u32
); /*2 IPv4 addresses */
3165 pmConf
->filters
[i
].maskSize
=
3166 (pmConf
->filters
[i
].patternSize
- 1) / 8 + 1;
3168 /* ETH_P_ARP in Ethernet header. */
3169 ehdr
= (struct ethhdr
*)pmConf
->filters
[i
].pattern
;
3170 ehdr
->h_proto
= htons(ETH_P_ARP
);
3172 /* ARPOP_REQUEST in ARP header. */
3173 ahdr
= (struct arphdr
*)&pmConf
->filters
[i
].pattern
[ETH_HLEN
];
3174 ahdr
->ar_op
= htons(ARPOP_REQUEST
);
3175 arpreq
= (u8
*)(ahdr
+ 1);
3177 /* The Unicast IPv4 address in 'tip' field. */
3178 arpreq
+= 2 * ETH_ALEN
+ sizeof(u32
);
3179 *(u32
*)arpreq
= ifa
->ifa_address
;
3181 /* The mask for the relevant bits. */
3182 pmConf
->filters
[i
].mask
[0] = 0x00;
3183 pmConf
->filters
[i
].mask
[1] = 0x30; /* ETH_P_ARP */
3184 pmConf
->filters
[i
].mask
[2] = 0x30; /* ARPOP_REQUEST */
3185 pmConf
->filters
[i
].mask
[3] = 0x00;
3186 pmConf
->filters
[i
].mask
[4] = 0xC0; /* IPv4 TIP */
3187 pmConf
->filters
[i
].mask
[5] = 0x03; /* IPv4 TIP */
3190 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
3195 if (adapter
->wol
& WAKE_MAGIC
)
3196 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_MAGIC
;
3198 pmConf
->numFilters
= i
;
3200 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
3201 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
3203 adapter
->shared
->devRead
.pmConfDesc
.confPA
= cpu_to_le64(virt_to_phys(
3206 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3207 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3208 VMXNET3_CMD_UPDATE_PMCFG
);
3209 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3211 pci_save_state(pdev
);
3212 pci_enable_wake(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
),
3214 pci_disable_device(pdev
);
3215 pci_set_power_state(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
));
3222 vmxnet3_resume(struct device
*device
)
3225 unsigned long flags
;
3226 struct pci_dev
*pdev
= to_pci_dev(device
);
3227 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3228 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3229 struct Vmxnet3_PMConf
*pmConf
;
3231 if (!netif_running(netdev
))
3234 /* Destroy wake-up filters. */
3235 pmConf
= adapter
->pm_conf
;
3236 memset(pmConf
, 0, sizeof(*pmConf
));
3238 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
3239 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
3241 adapter
->shared
->devRead
.pmConfDesc
.confPA
= cpu_to_le64(virt_to_phys(
3244 netif_device_attach(netdev
);
3245 pci_set_power_state(pdev
, PCI_D0
);
3246 pci_restore_state(pdev
);
3247 err
= pci_enable_device_mem(pdev
);
3251 pci_enable_wake(pdev
, PCI_D0
, 0);
3253 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3254 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3255 VMXNET3_CMD_UPDATE_PMCFG
);
3256 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3257 vmxnet3_alloc_intr_resources(adapter
);
3258 vmxnet3_request_irqs(adapter
);
3259 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3260 napi_enable(&adapter
->rx_queue
[i
].napi
);
3261 vmxnet3_enable_all_intrs(adapter
);
3266 static const struct dev_pm_ops vmxnet3_pm_ops
= {
3267 .suspend
= vmxnet3_suspend
,
3268 .resume
= vmxnet3_resume
,
3272 static struct pci_driver vmxnet3_driver
= {
3273 .name
= vmxnet3_driver_name
,
3274 .id_table
= vmxnet3_pciid_table
,
3275 .probe
= vmxnet3_probe_device
,
3276 .remove
= __devexit_p(vmxnet3_remove_device
),
3278 .driver
.pm
= &vmxnet3_pm_ops
,
3284 vmxnet3_init_module(void)
3286 printk(KERN_INFO
"%s - version %s\n", VMXNET3_DRIVER_DESC
,
3287 VMXNET3_DRIVER_VERSION_REPORT
);
3288 return pci_register_driver(&vmxnet3_driver
);
3291 module_init(vmxnet3_init_module
);
3295 vmxnet3_exit_module(void)
3297 pci_unregister_driver(&vmxnet3_driver
);
3300 module_exit(vmxnet3_exit_module
);
3302 MODULE_AUTHOR("VMware, Inc.");
3303 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC
);
3304 MODULE_LICENSE("GPL v2");
3305 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING
);