qlcnic: 83xx data path routines
[linux-2.6/cjktty.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_ctx.c
blob3098a14c67f070c40b22b1446af12d768cefab3e
1 /*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
8 #include "qlcnic.h"
10 static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
11 {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
12 {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
13 {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
14 {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
15 {QLCNIC_CMD_INTRPT_TEST, 4, 1},
16 {QLCNIC_CMD_SET_MTU, 4, 1},
17 {QLCNIC_CMD_READ_PHY, 4, 2},
18 {QLCNIC_CMD_WRITE_PHY, 5, 1},
19 {QLCNIC_CMD_READ_HW_REG, 4, 1},
20 {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
21 {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
22 {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
23 {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
24 {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
25 {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
26 {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
27 {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
28 {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
29 {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
30 {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
31 {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
32 {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
33 {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
35 {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
36 {QLCNIC_CMD_CONFIG_PORT, 4, 1},
37 {QLCNIC_CMD_TEMP_SIZE, 4, 4},
38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
39 {QLCNIC_CMD_SET_DRV_VER, 4, 1},
42 static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
44 return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
45 (0xcafe << 16);
48 /* Allocate mailbox registers */
49 int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
50 struct qlcnic_adapter *adapter, u32 type)
52 int i, size;
53 const struct qlcnic_mailbox_metadata *mbx_tbl;
55 mbx_tbl = qlcnic_mbx_tbl;
56 size = ARRAY_SIZE(qlcnic_mbx_tbl);
57 for (i = 0; i < size; i++) {
58 if (type == mbx_tbl[i].cmd) {
59 mbx->req.num = mbx_tbl[i].in_args;
60 mbx->rsp.num = mbx_tbl[i].out_args;
61 mbx->req.arg = kcalloc(mbx->req.num,
62 sizeof(u32), GFP_ATOMIC);
63 if (!mbx->req.arg)
64 return -ENOMEM;
65 mbx->rsp.arg = kcalloc(mbx->rsp.num,
66 sizeof(u32), GFP_ATOMIC);
67 if (!mbx->rsp.arg) {
68 kfree(mbx->req.arg);
69 mbx->req.arg = NULL;
70 return -ENOMEM;
72 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
73 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
74 mbx->req.arg[0] = type;
75 break;
78 return 0;
81 /* Free up mailbox registers */
82 void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
84 kfree(cmd->req.arg);
85 cmd->req.arg = NULL;
86 kfree(cmd->rsp.arg);
87 cmd->rsp.arg = NULL;
90 static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
92 int i;
94 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
95 if (adapter->npars[i].pci_func == pci_func)
96 return i;
99 return -1;
102 static u32
103 qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
105 u32 rsp;
106 int timeout = 0;
108 do {
109 /* give atleast 1ms for firmware to respond */
110 mdelay(1);
112 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
113 return QLCNIC_CDRP_RSP_TIMEOUT;
115 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
116 } while (!QLCNIC_CDRP_IS_RSP(rsp));
118 return rsp;
121 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
122 struct qlcnic_cmd_args *cmd)
124 int i;
125 u32 rsp;
126 u32 signature;
127 struct pci_dev *pdev = adapter->pdev;
128 struct qlcnic_hardware_context *ahw = adapter->ahw;
130 signature = qlcnic_get_cmd_signature(ahw);
132 /* Acquire semaphore before accessing CRB */
133 if (qlcnic_api_lock(adapter)) {
134 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
135 return cmd->rsp.arg[0];
138 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
139 for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++)
140 QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
141 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
142 QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
143 rsp = qlcnic_poll_rsp(adapter);
145 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
146 dev_err(&pdev->dev, "card response timeout.\n");
147 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
148 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
149 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1));
150 dev_err(&pdev->dev, "failed card response code:0x%x\n",
151 cmd->rsp.arg[0]);
152 } else if (rsp == QLCNIC_CDRP_RSP_OK)
153 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
155 for (i = 1; i < cmd->rsp.num; i++)
156 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i));
158 /* Release semaphore */
159 qlcnic_api_unlock(adapter);
160 return cmd->rsp.arg[0];
163 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
165 int err = 0;
166 void *tmp_addr;
167 struct qlcnic_cmd_args cmd;
168 dma_addr_t tmp_addr_t = 0;
170 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, 0x1000,
171 &tmp_addr_t, GFP_KERNEL);
172 if (!tmp_addr) {
173 dev_err(&adapter->pdev->dev,
174 "Can't get memory for FW dump template\n");
175 return -ENOMEM;
178 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
179 err = -ENOMEM;
180 goto free_mem;
183 cmd.req.arg[1] = LSD(tmp_addr_t);
184 cmd.req.arg[2] = MSD(tmp_addr_t);
185 cmd.req.arg[3] = 0x1000;
186 err = qlcnic_issue_cmd(adapter, &cmd);
189 qlcnic_free_mbx_args(&cmd);
191 free_mem:
192 dma_free_coherent(&adapter->pdev->dev, 0x1000, tmp_addr, tmp_addr_t);
194 return err;
198 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
200 int err = 0;
201 struct qlcnic_cmd_args cmd;
202 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
204 if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
205 return err;
206 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
207 cmd.req.arg[1] = recv_ctx->context_id;
208 cmd.req.arg[2] = mtu;
210 err = qlcnic_issue_cmd(adapter, &cmd);
211 if (err) {
212 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
213 err = -EIO;
215 qlcnic_free_mbx_args(&cmd);
216 return err;
219 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
221 void *addr;
222 struct qlcnic_hostrq_rx_ctx *prq;
223 struct qlcnic_cardrsp_rx_ctx *prsp;
224 struct qlcnic_hostrq_rds_ring *prq_rds;
225 struct qlcnic_hostrq_sds_ring *prq_sds;
226 struct qlcnic_cardrsp_rds_ring *prsp_rds;
227 struct qlcnic_cardrsp_sds_ring *prsp_sds;
228 struct qlcnic_host_rds_ring *rds_ring;
229 struct qlcnic_host_sds_ring *sds_ring;
230 struct qlcnic_cmd_args cmd;
232 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
233 u64 phys_addr;
235 u8 i, nrds_rings, nsds_rings;
236 u16 temp_u16;
237 size_t rq_size, rsp_size;
238 u32 cap, reg, val, reg2;
239 int err;
241 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
243 nrds_rings = adapter->max_rds_rings;
244 nsds_rings = adapter->max_sds_rings;
246 rq_size =
247 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
248 nsds_rings);
249 rsp_size =
250 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
251 nsds_rings);
253 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
254 &hostrq_phys_addr, GFP_KERNEL);
255 if (addr == NULL)
256 return -ENOMEM;
257 prq = addr;
259 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
260 &cardrsp_phys_addr, GFP_KERNEL);
261 if (addr == NULL) {
262 err = -ENOMEM;
263 goto out_free_rq;
265 prsp = addr;
267 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
269 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
270 | QLCNIC_CAP0_VALIDOFF);
271 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
273 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
274 prq->valid_field_offset = cpu_to_le16(temp_u16);
275 prq->txrx_sds_binding = nsds_rings - 1;
277 prq->capabilities[0] = cpu_to_le32(cap);
278 prq->host_int_crb_mode =
279 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
280 prq->host_rds_crb_mode =
281 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
283 prq->num_rds_rings = cpu_to_le16(nrds_rings);
284 prq->num_sds_rings = cpu_to_le16(nsds_rings);
285 prq->rds_ring_offset = 0;
287 val = le32_to_cpu(prq->rds_ring_offset) +
288 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
289 prq->sds_ring_offset = cpu_to_le32(val);
291 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
292 le32_to_cpu(prq->rds_ring_offset));
294 for (i = 0; i < nrds_rings; i++) {
296 rds_ring = &recv_ctx->rds_rings[i];
297 rds_ring->producer = 0;
299 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
300 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
301 prq_rds[i].ring_kind = cpu_to_le32(i);
302 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
305 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
306 le32_to_cpu(prq->sds_ring_offset));
308 for (i = 0; i < nsds_rings; i++) {
310 sds_ring = &recv_ctx->sds_rings[i];
311 sds_ring->consumer = 0;
312 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
314 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
315 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
316 prq_sds[i].msi_index = cpu_to_le16(i);
319 phys_addr = hostrq_phys_addr;
320 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
321 cmd.req.arg[1] = MSD(phys_addr);
322 cmd.req.arg[2] = LSD(phys_addr);
323 cmd.req.arg[3] = rq_size;
324 err = qlcnic_issue_cmd(adapter, &cmd);
325 if (err) {
326 dev_err(&adapter->pdev->dev,
327 "Failed to create rx ctx in firmware%d\n", err);
328 goto out_free_rsp;
331 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
332 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
334 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
335 rds_ring = &recv_ctx->rds_rings[i];
337 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
338 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
341 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
342 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
344 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
345 sds_ring = &recv_ctx->sds_rings[i];
347 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
348 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
350 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
351 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
354 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
355 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
356 recv_ctx->virt_port = prsp->virt_port;
358 out_free_rsp:
359 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
360 cardrsp_phys_addr);
361 qlcnic_free_mbx_args(&cmd);
362 out_free_rq:
363 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
364 return err;
367 static void
368 qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
370 int err;
371 struct qlcnic_cmd_args cmd;
372 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
374 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
375 cmd.req.arg[1] = recv_ctx->context_id;
376 err = qlcnic_issue_cmd(adapter, &cmd);
377 if (err)
378 dev_err(&adapter->pdev->dev,
379 "Failed to destroy rx ctx in firmware\n");
381 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
382 qlcnic_free_mbx_args(&cmd);
385 int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
386 struct qlcnic_host_tx_ring *tx_ring,
387 int ring)
389 struct qlcnic_hostrq_tx_ctx *prq;
390 struct qlcnic_hostrq_cds_ring *prq_cds;
391 struct qlcnic_cardrsp_tx_ctx *prsp;
392 void *rq_addr, *rsp_addr;
393 size_t rq_size, rsp_size;
394 u32 temp;
395 struct qlcnic_cmd_args cmd;
396 int err;
397 u64 phys_addr;
398 dma_addr_t rq_phys_addr, rsp_phys_addr;
400 /* reset host resources */
401 tx_ring->producer = 0;
402 tx_ring->sw_consumer = 0;
403 *(tx_ring->hw_consumer) = 0;
405 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
406 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
407 &rq_phys_addr, GFP_KERNEL);
408 if (!rq_addr)
409 return -ENOMEM;
411 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
412 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
413 &rsp_phys_addr, GFP_KERNEL);
414 if (!rsp_addr) {
415 err = -ENOMEM;
416 goto out_free_rq;
419 memset(rq_addr, 0, rq_size);
420 prq = rq_addr;
422 memset(rsp_addr, 0, rsp_size);
423 prsp = rsp_addr;
425 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
427 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
428 QLCNIC_CAP0_LSO);
429 prq->capabilities[0] = cpu_to_le32(temp);
431 prq->host_int_crb_mode =
432 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
433 prq->msi_index = 0;
435 prq->interrupt_ctl = 0;
436 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
438 prq_cds = &prq->cds_ring;
440 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
441 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
443 phys_addr = rq_phys_addr;
445 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
446 cmd.req.arg[1] = MSD(phys_addr);
447 cmd.req.arg[2] = LSD(phys_addr);
448 cmd.req.arg[3] = rq_size;
449 err = qlcnic_issue_cmd(adapter, &cmd);
451 if (err == QLCNIC_RCODE_SUCCESS) {
452 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
453 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
454 tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
455 } else {
456 dev_err(&adapter->pdev->dev,
457 "Failed to create tx ctx in firmware%d\n", err);
458 err = -EIO;
461 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
462 rsp_phys_addr);
464 out_free_rq:
465 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
466 qlcnic_free_mbx_args(&cmd);
468 return err;
471 static void
472 qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter,
473 struct qlcnic_host_tx_ring *tx_ring)
475 struct qlcnic_cmd_args cmd;
477 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
478 cmd.req.arg[1] = tx_ring->ctx_id;
479 if (qlcnic_issue_cmd(adapter, &cmd))
480 dev_err(&adapter->pdev->dev,
481 "Failed to destroy tx ctx in firmware\n");
482 qlcnic_free_mbx_args(&cmd);
486 qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
488 int err;
489 struct qlcnic_cmd_args cmd;
491 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
492 cmd.req.arg[1] = config;
493 err = qlcnic_issue_cmd(adapter, &cmd);
494 qlcnic_free_mbx_args(&cmd);
495 return err;
498 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
500 void *addr;
501 int err, ring;
502 struct qlcnic_recv_context *recv_ctx;
503 struct qlcnic_host_rds_ring *rds_ring;
504 struct qlcnic_host_sds_ring *sds_ring;
505 struct qlcnic_host_tx_ring *tx_ring;
506 __le32 *ptr;
508 struct pci_dev *pdev = adapter->pdev;
510 recv_ctx = adapter->recv_ctx;
512 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
513 tx_ring = &adapter->tx_ring[ring];
514 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
515 &tx_ring->hw_cons_phys_addr,
516 GFP_KERNEL);
518 if (ptr == NULL) {
519 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
520 return -ENOMEM;
522 tx_ring->hw_consumer = ptr;
523 /* cmd desc ring */
524 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
525 &tx_ring->phys_addr,
526 GFP_KERNEL);
528 if (addr == NULL) {
529 dev_err(&pdev->dev,
530 "failed to allocate tx desc ring\n");
531 err = -ENOMEM;
532 goto err_out_free;
535 tx_ring->desc_head = addr;
538 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
539 rds_ring = &recv_ctx->rds_rings[ring];
540 addr = dma_alloc_coherent(&adapter->pdev->dev,
541 RCV_DESC_RINGSIZE(rds_ring),
542 &rds_ring->phys_addr, GFP_KERNEL);
543 if (addr == NULL) {
544 dev_err(&pdev->dev,
545 "failed to allocate rds ring [%d]\n", ring);
546 err = -ENOMEM;
547 goto err_out_free;
549 rds_ring->desc_head = addr;
553 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
554 sds_ring = &recv_ctx->sds_rings[ring];
556 addr = dma_alloc_coherent(&adapter->pdev->dev,
557 STATUS_DESC_RINGSIZE(sds_ring),
558 &sds_ring->phys_addr, GFP_KERNEL);
559 if (addr == NULL) {
560 dev_err(&pdev->dev,
561 "failed to allocate sds ring [%d]\n", ring);
562 err = -ENOMEM;
563 goto err_out_free;
565 sds_ring->desc_head = addr;
568 return 0;
570 err_out_free:
571 qlcnic_free_hw_resources(adapter);
572 return err;
575 int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
577 int i, err, ring;
579 if (dev->flags & QLCNIC_NEED_FLR) {
580 pci_reset_function(dev->pdev);
581 dev->flags &= ~QLCNIC_NEED_FLR;
584 err = qlcnic_fw_cmd_create_rx_ctx(dev);
585 if (err)
586 return err;
588 for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
589 err = qlcnic_fw_cmd_create_tx_ctx(dev,
590 &dev->tx_ring[ring],
591 ring);
592 if (err) {
593 qlcnic_fw_cmd_destroy_rx_ctx(dev);
594 if (ring == 0)
595 return err;
597 for (i = 0; i < ring; i++)
598 qlcnic_fw_cmd_destroy_tx_ctx(dev,
599 &dev->tx_ring[i]);
601 return err;
605 set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
606 return 0;
609 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
611 int ring;
613 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
614 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
615 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
616 qlcnic_fw_cmd_destroy_tx_ctx(adapter,
617 &adapter->tx_ring[ring]);
618 /* Allow dma queues to drain after context reset */
619 mdelay(20);
623 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
625 struct qlcnic_recv_context *recv_ctx;
626 struct qlcnic_host_rds_ring *rds_ring;
627 struct qlcnic_host_sds_ring *sds_ring;
628 struct qlcnic_host_tx_ring *tx_ring;
629 int ring;
631 recv_ctx = adapter->recv_ctx;
633 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
634 tx_ring = &adapter->tx_ring[ring];
635 if (tx_ring->hw_consumer != NULL) {
636 dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
637 tx_ring->hw_consumer,
638 tx_ring->hw_cons_phys_addr);
640 tx_ring->hw_consumer = NULL;
643 if (tx_ring->desc_head != NULL) {
644 dma_free_coherent(&adapter->pdev->dev,
645 TX_DESC_RINGSIZE(tx_ring),
646 tx_ring->desc_head,
647 tx_ring->phys_addr);
648 tx_ring->desc_head = NULL;
652 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
653 rds_ring = &recv_ctx->rds_rings[ring];
655 if (rds_ring->desc_head != NULL) {
656 dma_free_coherent(&adapter->pdev->dev,
657 RCV_DESC_RINGSIZE(rds_ring),
658 rds_ring->desc_head,
659 rds_ring->phys_addr);
660 rds_ring->desc_head = NULL;
664 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
665 sds_ring = &recv_ctx->sds_rings[ring];
667 if (sds_ring->desc_head != NULL) {
668 dma_free_coherent(&adapter->pdev->dev,
669 STATUS_DESC_RINGSIZE(sds_ring),
670 sds_ring->desc_head,
671 sds_ring->phys_addr);
672 sds_ring->desc_head = NULL;
678 int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
680 int err, i;
681 struct qlcnic_cmd_args cmd;
682 u32 mac_low, mac_high;
684 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
685 cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
686 err = qlcnic_issue_cmd(adapter, &cmd);
688 if (err == QLCNIC_RCODE_SUCCESS) {
689 mac_low = cmd.rsp.arg[1];
690 mac_high = cmd.rsp.arg[2];
692 for (i = 0; i < 2; i++)
693 mac[i] = (u8) (mac_high >> ((1 - i) * 8));
694 for (i = 2; i < 6; i++)
695 mac[i] = (u8) (mac_low >> ((5 - i) * 8));
696 } else {
697 dev_err(&adapter->pdev->dev,
698 "Failed to get mac address%d\n", err);
699 err = -EIO;
701 qlcnic_free_mbx_args(&cmd);
702 return err;
705 /* Get info of a NIC partition */
706 int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
707 struct qlcnic_info *npar_info, u8 func_id)
709 int err;
710 dma_addr_t nic_dma_t;
711 const struct qlcnic_info_le *nic_info;
712 void *nic_info_addr;
713 struct qlcnic_cmd_args cmd;
714 size_t nic_size = sizeof(struct qlcnic_info_le);
716 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
717 &nic_dma_t, GFP_KERNEL);
718 if (!nic_info_addr)
719 return -ENOMEM;
720 memset(nic_info_addr, 0, nic_size);
722 nic_info = nic_info_addr;
724 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
725 cmd.req.arg[1] = MSD(nic_dma_t);
726 cmd.req.arg[2] = LSD(nic_dma_t);
727 cmd.req.arg[3] = (func_id << 16 | nic_size);
728 err = qlcnic_issue_cmd(adapter, &cmd);
729 if (err != QLCNIC_RCODE_SUCCESS) {
730 dev_err(&adapter->pdev->dev,
731 "Failed to get nic info%d\n", err);
732 err = -EIO;
733 } else {
734 npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
735 npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
736 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
737 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
738 npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
739 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
740 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
741 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
742 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
743 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
746 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
747 nic_dma_t);
748 qlcnic_free_mbx_args(&cmd);
750 return err;
753 /* Configure a NIC partition */
754 int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
755 struct qlcnic_info *nic)
757 int err = -EIO;
758 dma_addr_t nic_dma_t;
759 void *nic_info_addr;
760 struct qlcnic_cmd_args cmd;
761 struct qlcnic_info_le *nic_info;
762 size_t nic_size = sizeof(struct qlcnic_info_le);
764 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
765 return err;
767 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
768 &nic_dma_t, GFP_KERNEL);
769 if (!nic_info_addr)
770 return -ENOMEM;
772 memset(nic_info_addr, 0, nic_size);
773 nic_info = nic_info_addr;
775 nic_info->pci_func = cpu_to_le16(nic->pci_func);
776 nic_info->op_mode = cpu_to_le16(nic->op_mode);
777 nic_info->phys_port = cpu_to_le16(nic->phys_port);
778 nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
779 nic_info->capabilities = cpu_to_le32(nic->capabilities);
780 nic_info->max_mac_filters = nic->max_mac_filters;
781 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
782 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
783 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
784 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
786 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
787 cmd.req.arg[1] = MSD(nic_dma_t);
788 cmd.req.arg[2] = LSD(nic_dma_t);
789 cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
790 err = qlcnic_issue_cmd(adapter, &cmd);
792 if (err != QLCNIC_RCODE_SUCCESS) {
793 dev_err(&adapter->pdev->dev,
794 "Failed to set nic info%d\n", err);
795 err = -EIO;
798 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
799 nic_dma_t);
800 qlcnic_free_mbx_args(&cmd);
802 return err;
805 /* Get PCI Info of a partition */
806 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
807 struct qlcnic_pci_info *pci_info)
809 int err = 0, i;
810 struct qlcnic_cmd_args cmd;
811 dma_addr_t pci_info_dma_t;
812 struct qlcnic_pci_info_le *npar;
813 void *pci_info_addr;
814 size_t npar_size = sizeof(struct qlcnic_pci_info_le);
815 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
817 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
818 &pci_info_dma_t, GFP_KERNEL);
819 if (!pci_info_addr)
820 return -ENOMEM;
821 memset(pci_info_addr, 0, pci_size);
823 npar = pci_info_addr;
824 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
825 cmd.req.arg[1] = MSD(pci_info_dma_t);
826 cmd.req.arg[2] = LSD(pci_info_dma_t);
827 cmd.req.arg[3] = pci_size;
828 err = qlcnic_issue_cmd(adapter, &cmd);
830 adapter->ahw->act_pci_func = 0;
831 if (err == QLCNIC_RCODE_SUCCESS) {
832 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
833 pci_info->id = le16_to_cpu(npar->id);
834 pci_info->active = le16_to_cpu(npar->active);
835 pci_info->type = le16_to_cpu(npar->type);
836 if (pci_info->type == QLCNIC_TYPE_NIC)
837 adapter->ahw->act_pci_func++;
838 pci_info->default_port =
839 le16_to_cpu(npar->default_port);
840 pci_info->tx_min_bw =
841 le16_to_cpu(npar->tx_min_bw);
842 pci_info->tx_max_bw =
843 le16_to_cpu(npar->tx_max_bw);
844 memcpy(pci_info->mac, npar->mac, ETH_ALEN);
846 } else {
847 dev_err(&adapter->pdev->dev,
848 "Failed to get PCI Info%d\n", err);
849 err = -EIO;
852 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
853 pci_info_dma_t);
854 qlcnic_free_mbx_args(&cmd);
856 return err;
859 /* Configure eSwitch for port mirroring */
860 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
861 u8 enable_mirroring, u8 pci_func)
863 int err = -EIO;
864 u32 arg1;
865 struct qlcnic_cmd_args cmd;
867 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
868 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
869 return err;
871 arg1 = id | (enable_mirroring ? BIT_4 : 0);
872 arg1 |= pci_func << 8;
874 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING);
875 cmd.req.arg[1] = arg1;
876 err = qlcnic_issue_cmd(adapter, &cmd);
878 if (err != QLCNIC_RCODE_SUCCESS)
879 dev_err(&adapter->pdev->dev,
880 "Failed to configure port mirroring%d on eswitch:%d\n",
881 pci_func, id);
882 else
883 dev_info(&adapter->pdev->dev,
884 "Configured eSwitch %d for port mirroring:%d\n",
885 id, pci_func);
886 qlcnic_free_mbx_args(&cmd);
888 return err;
891 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
892 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
894 size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
895 struct qlcnic_esw_stats_le *stats;
896 dma_addr_t stats_dma_t;
897 void *stats_addr;
898 u32 arg1;
899 struct qlcnic_cmd_args cmd;
900 int err;
902 if (esw_stats == NULL)
903 return -ENOMEM;
905 if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
906 (func != adapter->ahw->pci_func)) {
907 dev_err(&adapter->pdev->dev,
908 "Not privilege to query stats for func=%d", func);
909 return -EIO;
912 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
913 &stats_dma_t, GFP_KERNEL);
914 if (!stats_addr) {
915 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
916 return -ENOMEM;
918 memset(stats_addr, 0, stats_size);
920 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
921 arg1 |= rx_tx << 15 | stats_size << 16;
923 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
924 cmd.req.arg[1] = arg1;
925 cmd.req.arg[2] = MSD(stats_dma_t);
926 cmd.req.arg[3] = LSD(stats_dma_t);
927 err = qlcnic_issue_cmd(adapter, &cmd);
929 if (!err) {
930 stats = stats_addr;
931 esw_stats->context_id = le16_to_cpu(stats->context_id);
932 esw_stats->version = le16_to_cpu(stats->version);
933 esw_stats->size = le16_to_cpu(stats->size);
934 esw_stats->multicast_frames =
935 le64_to_cpu(stats->multicast_frames);
936 esw_stats->broadcast_frames =
937 le64_to_cpu(stats->broadcast_frames);
938 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
939 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
940 esw_stats->local_frames = le64_to_cpu(stats->local_frames);
941 esw_stats->errors = le64_to_cpu(stats->errors);
942 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
945 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
946 stats_dma_t);
947 qlcnic_free_mbx_args(&cmd);
949 return err;
952 /* This routine will retrieve the MAC statistics from firmware */
953 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
954 struct qlcnic_mac_statistics *mac_stats)
956 struct qlcnic_mac_statistics_le *stats;
957 struct qlcnic_cmd_args cmd;
958 size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
959 dma_addr_t stats_dma_t;
960 void *stats_addr;
961 int err;
963 if (mac_stats == NULL)
964 return -ENOMEM;
966 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
967 &stats_dma_t, GFP_KERNEL);
968 if (!stats_addr) {
969 dev_err(&adapter->pdev->dev,
970 "%s: Unable to allocate memory.\n", __func__);
971 return -ENOMEM;
973 memset(stats_addr, 0, stats_size);
974 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
975 cmd.req.arg[1] = stats_size << 16;
976 cmd.req.arg[2] = MSD(stats_dma_t);
977 cmd.req.arg[3] = LSD(stats_dma_t);
978 err = qlcnic_issue_cmd(adapter, &cmd);
979 if (!err) {
980 stats = stats_addr;
981 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
982 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
983 mac_stats->mac_tx_mcast_pkts =
984 le64_to_cpu(stats->mac_tx_mcast_pkts);
985 mac_stats->mac_tx_bcast_pkts =
986 le64_to_cpu(stats->mac_tx_bcast_pkts);
987 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
988 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
989 mac_stats->mac_rx_mcast_pkts =
990 le64_to_cpu(stats->mac_rx_mcast_pkts);
991 mac_stats->mac_rx_length_error =
992 le64_to_cpu(stats->mac_rx_length_error);
993 mac_stats->mac_rx_length_small =
994 le64_to_cpu(stats->mac_rx_length_small);
995 mac_stats->mac_rx_length_large =
996 le64_to_cpu(stats->mac_rx_length_large);
997 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
998 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
999 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
1000 } else {
1001 dev_err(&adapter->pdev->dev,
1002 "%s: Get mac stats failed, err=%d.\n", __func__, err);
1005 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
1006 stats_dma_t);
1008 qlcnic_free_mbx_args(&cmd);
1010 return err;
1013 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1014 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
1016 struct __qlcnic_esw_statistics port_stats;
1017 u8 i;
1018 int ret = -EIO;
1020 if (esw_stats == NULL)
1021 return -ENOMEM;
1022 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1023 return -EIO;
1024 if (adapter->npars == NULL)
1025 return -EIO;
1027 memset(esw_stats, 0, sizeof(u64));
1028 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
1029 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
1030 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
1031 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
1032 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
1033 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
1034 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
1035 esw_stats->context_id = eswitch;
1037 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
1038 if (adapter->npars[i].phy_port != eswitch)
1039 continue;
1041 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1042 if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
1043 rx_tx, &port_stats))
1044 continue;
1046 esw_stats->size = port_stats.size;
1047 esw_stats->version = port_stats.version;
1048 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
1049 port_stats.unicast_frames);
1050 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
1051 port_stats.multicast_frames);
1052 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
1053 port_stats.broadcast_frames);
1054 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
1055 port_stats.dropped_frames);
1056 QLCNIC_ADD_ESW_STATS(esw_stats->errors,
1057 port_stats.errors);
1058 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
1059 port_stats.local_frames);
1060 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
1061 port_stats.numbytes);
1062 ret = 0;
1064 return ret;
1067 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
1068 const u8 port, const u8 rx_tx)
1070 int err;
1071 u32 arg1;
1072 struct qlcnic_cmd_args cmd;
1074 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1075 return -EIO;
1077 if (func_esw == QLCNIC_STATS_PORT) {
1078 if (port >= QLCNIC_MAX_PCI_FUNC)
1079 goto err_ret;
1080 } else if (func_esw == QLCNIC_STATS_ESWITCH) {
1081 if (port >= QLCNIC_NIU_MAX_XG_PORTS)
1082 goto err_ret;
1083 } else {
1084 goto err_ret;
1087 if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
1088 goto err_ret;
1090 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
1091 arg1 |= BIT_14 | rx_tx << 15;
1093 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
1094 cmd.req.arg[1] = arg1;
1095 err = qlcnic_issue_cmd(adapter, &cmd);
1096 qlcnic_free_mbx_args(&cmd);
1097 return err;
1099 err_ret:
1100 dev_err(&adapter->pdev->dev,
1101 "Invalid args func_esw %d port %d rx_ctx %d\n",
1102 func_esw, port, rx_tx);
1103 return -EIO;
1106 static int
1107 __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1108 u32 *arg1, u32 *arg2)
1110 int err = -EIO;
1111 struct qlcnic_cmd_args cmd;
1112 u8 pci_func;
1113 pci_func = (*arg1 >> 8);
1115 qlcnic_alloc_mbx_args(&cmd, adapter,
1116 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
1117 cmd.req.arg[1] = *arg1;
1118 err = qlcnic_issue_cmd(adapter, &cmd);
1119 *arg1 = cmd.rsp.arg[1];
1120 *arg2 = cmd.rsp.arg[2];
1121 qlcnic_free_mbx_args(&cmd);
1123 if (err == QLCNIC_RCODE_SUCCESS)
1124 dev_info(&adapter->pdev->dev,
1125 "eSwitch port config for pci func %d\n", pci_func);
1126 else
1127 dev_err(&adapter->pdev->dev,
1128 "Failed to get eswitch port config for pci func %d\n",
1129 pci_func);
1130 return err;
1132 /* Configure eSwitch port
1133 op_mode = 0 for setting default port behavior
1134 op_mode = 1 for setting vlan id
1135 op_mode = 2 for deleting vlan id
1136 op_type = 0 for vlan_id
1137 op_type = 1 for port vlan_id
1139 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1140 struct qlcnic_esw_func_cfg *esw_cfg)
1142 int err = -EIO, index;
1143 u32 arg1, arg2 = 0;
1144 struct qlcnic_cmd_args cmd;
1145 u8 pci_func;
1147 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1148 return err;
1149 pci_func = esw_cfg->pci_func;
1150 index = qlcnic_is_valid_nic_func(adapter, pci_func);
1151 if (index < 0)
1152 return err;
1153 arg1 = (adapter->npars[index].phy_port & BIT_0);
1154 arg1 |= (pci_func << 8);
1156 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1157 return err;
1158 arg1 &= ~(0x0ff << 8);
1159 arg1 |= (pci_func << 8);
1160 arg1 &= ~(BIT_2 | BIT_3);
1161 switch (esw_cfg->op_mode) {
1162 case QLCNIC_PORT_DEFAULTS:
1163 arg1 |= (BIT_4 | BIT_6 | BIT_7);
1164 arg2 |= (BIT_0 | BIT_1);
1165 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1166 arg2 |= (BIT_2 | BIT_3);
1167 if (!(esw_cfg->discard_tagged))
1168 arg1 &= ~BIT_4;
1169 if (!(esw_cfg->promisc_mode))
1170 arg1 &= ~BIT_6;
1171 if (!(esw_cfg->mac_override))
1172 arg1 &= ~BIT_7;
1173 if (!(esw_cfg->mac_anti_spoof))
1174 arg2 &= ~BIT_0;
1175 if (!(esw_cfg->offload_flags & BIT_0))
1176 arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
1177 if (!(esw_cfg->offload_flags & BIT_1))
1178 arg2 &= ~BIT_2;
1179 if (!(esw_cfg->offload_flags & BIT_2))
1180 arg2 &= ~BIT_3;
1181 break;
1182 case QLCNIC_ADD_VLAN:
1183 arg1 |= (BIT_2 | BIT_5);
1184 arg1 |= (esw_cfg->vlan_id << 16);
1185 break;
1186 case QLCNIC_DEL_VLAN:
1187 arg1 |= (BIT_3 | BIT_5);
1188 arg1 &= ~(0x0ffff << 16);
1189 break;
1190 default:
1191 return err;
1194 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH);
1195 cmd.req.arg[1] = arg1;
1196 cmd.req.arg[2] = arg2;
1197 err = qlcnic_issue_cmd(adapter, &cmd);
1198 qlcnic_free_mbx_args(&cmd);
1200 if (err != QLCNIC_RCODE_SUCCESS)
1201 dev_err(&adapter->pdev->dev,
1202 "Failed to configure eswitch pci func %d\n", pci_func);
1203 else
1204 dev_info(&adapter->pdev->dev,
1205 "Configured eSwitch for pci func %d\n", pci_func);
1207 return err;
1211 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1212 struct qlcnic_esw_func_cfg *esw_cfg)
1214 u32 arg1, arg2;
1215 int index;
1216 u8 phy_port;
1218 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
1219 index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
1220 if (index < 0)
1221 return -EIO;
1222 phy_port = adapter->npars[index].phy_port;
1223 } else {
1224 phy_port = adapter->ahw->physical_port;
1226 arg1 = phy_port;
1227 arg1 |= (esw_cfg->pci_func << 8);
1228 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1229 return -EIO;
1231 esw_cfg->discard_tagged = !!(arg1 & BIT_4);
1232 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
1233 esw_cfg->promisc_mode = !!(arg1 & BIT_6);
1234 esw_cfg->mac_override = !!(arg1 & BIT_7);
1235 esw_cfg->vlan_id = LSW(arg1 >> 16);
1236 esw_cfg->mac_anti_spoof = (arg2 & 0x1);
1237 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
1239 return 0;