2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
24 return wrb
->payload
.embedded_payload
;
27 static void be_mcc_notify(struct be_adapter
*adapter
)
29 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
32 if (be_error(adapter
))
35 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
36 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
39 iowrite32(val
, adapter
->db
+ DB_MCCQ_OFFSET
);
42 /* To check if valid bit is set, check the entire word as we don't know
43 * the endianness of the data (old entry is host endian while a new entry is
45 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
47 if (compl->flags
!= 0) {
48 compl->flags
= le32_to_cpu(compl->flags
);
49 BUG_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
56 /* Need to reset the entire word that houses the valid bit */
57 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
62 static struct be_cmd_resp_hdr
*be_decode_resp_hdr(u32 tag0
, u32 tag1
)
67 addr
= ((addr
<< 16) << 16) | tag0
;
71 static int be_mcc_compl_process(struct be_adapter
*adapter
,
72 struct be_mcc_compl
*compl)
74 u16 compl_status
, extd_status
;
75 struct be_cmd_resp_hdr
*resp_hdr
;
76 u8 opcode
= 0, subsystem
= 0;
78 /* Just swap the status to host endian; mcc tag is opaquely copied
80 be_dws_le_to_cpu(compl, 4);
82 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
83 CQE_STATUS_COMPL_MASK
;
85 resp_hdr
= be_decode_resp_hdr(compl->tag0
, compl->tag1
);
88 opcode
= resp_hdr
->opcode
;
89 subsystem
= resp_hdr
->subsystem
;
92 if (((opcode
== OPCODE_COMMON_WRITE_FLASHROM
) ||
93 (opcode
== OPCODE_COMMON_WRITE_OBJECT
)) &&
94 (subsystem
== CMD_SUBSYSTEM_COMMON
)) {
95 adapter
->flash_status
= compl_status
;
96 complete(&adapter
->flash_compl
);
99 if (compl_status
== MCC_STATUS_SUCCESS
) {
100 if (((opcode
== OPCODE_ETH_GET_STATISTICS
) ||
101 (opcode
== OPCODE_ETH_GET_PPORT_STATS
)) &&
102 (subsystem
== CMD_SUBSYSTEM_ETH
)) {
103 be_parse_stats(adapter
);
104 adapter
->stats_cmd_sent
= false;
106 if (opcode
== OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
&&
107 subsystem
== CMD_SUBSYSTEM_COMMON
) {
108 struct be_cmd_resp_get_cntl_addnl_attribs
*resp
=
110 adapter
->drv_stats
.be_on_die_temperature
=
111 resp
->on_die_temperature
;
114 if (opcode
== OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
)
115 adapter
->be_get_temp_freq
= 0;
117 if (compl_status
== MCC_STATUS_NOT_SUPPORTED
||
118 compl_status
== MCC_STATUS_ILLEGAL_REQUEST
)
121 if (compl_status
== MCC_STATUS_UNAUTHORIZED_REQUEST
) {
122 dev_warn(&adapter
->pdev
->dev
,
123 "VF is not privileged to issue opcode %d-%d\n",
126 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
127 CQE_STATUS_EXTD_MASK
;
128 dev_err(&adapter
->pdev
->dev
,
129 "opcode %d-%d failed:status %d-%d\n",
130 opcode
, subsystem
, compl_status
, extd_status
);
137 /* Link state evt is a string of bytes; no need for endian swapping */
138 static void be_async_link_state_process(struct be_adapter
*adapter
,
139 struct be_async_event_link_state
*evt
)
141 /* When link status changes, link speed must be re-queried from FW */
142 adapter
->phy
.link_speed
= -1;
144 /* Ignore physical link event */
145 if (lancer_chip(adapter
) &&
146 !(evt
->port_link_status
& LOGICAL_LINK_STATUS_MASK
))
149 /* For the initial link status do not rely on the ASYNC event as
150 * it may not be received in some cases.
152 if (adapter
->flags
& BE_FLAGS_LINK_STATUS_INIT
)
153 be_link_status_update(adapter
, evt
->port_link_status
);
156 /* Grp5 CoS Priority evt */
157 static void be_async_grp5_cos_priority_process(struct be_adapter
*adapter
,
158 struct be_async_event_grp5_cos_priority
*evt
)
161 adapter
->vlan_prio_bmap
= evt
->available_priority_bmap
;
162 adapter
->recommended_prio
&= ~VLAN_PRIO_MASK
;
163 adapter
->recommended_prio
=
164 evt
->reco_default_priority
<< VLAN_PRIO_SHIFT
;
168 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
169 static void be_async_grp5_qos_speed_process(struct be_adapter
*adapter
,
170 struct be_async_event_grp5_qos_link_speed
*evt
)
172 if (adapter
->phy
.link_speed
>= 0 &&
173 evt
->physical_port
== adapter
->port_num
)
174 adapter
->phy
.link_speed
= le16_to_cpu(evt
->qos_link_speed
) * 10;
178 static void be_async_grp5_pvid_state_process(struct be_adapter
*adapter
,
179 struct be_async_event_grp5_pvid_state
*evt
)
182 adapter
->pvid
= le16_to_cpu(evt
->tag
) & VLAN_VID_MASK
;
187 static void be_async_grp5_evt_process(struct be_adapter
*adapter
,
188 u32 trailer
, struct be_mcc_compl
*evt
)
192 event_type
= (trailer
>> ASYNC_TRAILER_EVENT_TYPE_SHIFT
) &
193 ASYNC_TRAILER_EVENT_TYPE_MASK
;
195 switch (event_type
) {
196 case ASYNC_EVENT_COS_PRIORITY
:
197 be_async_grp5_cos_priority_process(adapter
,
198 (struct be_async_event_grp5_cos_priority
*)evt
);
200 case ASYNC_EVENT_QOS_SPEED
:
201 be_async_grp5_qos_speed_process(adapter
,
202 (struct be_async_event_grp5_qos_link_speed
*)evt
);
204 case ASYNC_EVENT_PVID_STATE
:
205 be_async_grp5_pvid_state_process(adapter
,
206 (struct be_async_event_grp5_pvid_state
*)evt
);
209 dev_warn(&adapter
->pdev
->dev
, "Unknown grp5 event!\n");
214 static inline bool is_link_state_evt(u32 trailer
)
216 return ((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
217 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
218 ASYNC_EVENT_CODE_LINK_STATE
;
221 static inline bool is_grp5_evt(u32 trailer
)
223 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
224 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
225 ASYNC_EVENT_CODE_GRP_5
);
228 static struct be_mcc_compl
*be_mcc_compl_get(struct be_adapter
*adapter
)
230 struct be_queue_info
*mcc_cq
= &adapter
->mcc_obj
.cq
;
231 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
233 if (be_mcc_compl_is_new(compl)) {
234 queue_tail_inc(mcc_cq
);
240 void be_async_mcc_enable(struct be_adapter
*adapter
)
242 spin_lock_bh(&adapter
->mcc_cq_lock
);
244 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, true, 0);
245 adapter
->mcc_obj
.rearm_cq
= true;
247 spin_unlock_bh(&adapter
->mcc_cq_lock
);
250 void be_async_mcc_disable(struct be_adapter
*adapter
)
252 adapter
->mcc_obj
.rearm_cq
= false;
255 int be_process_mcc(struct be_adapter
*adapter
)
257 struct be_mcc_compl
*compl;
258 int num
= 0, status
= 0;
259 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
261 spin_lock(&adapter
->mcc_cq_lock
);
262 while ((compl = be_mcc_compl_get(adapter
))) {
263 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
264 /* Interpret flags as an async trailer */
265 if (is_link_state_evt(compl->flags
))
266 be_async_link_state_process(adapter
,
267 (struct be_async_event_link_state
*) compl);
268 else if (is_grp5_evt(compl->flags
))
269 be_async_grp5_evt_process(adapter
,
270 compl->flags
, compl);
271 } else if (compl->flags
& CQE_FLAGS_COMPLETED_MASK
) {
272 status
= be_mcc_compl_process(adapter
, compl);
273 atomic_dec(&mcc_obj
->q
.used
);
275 be_mcc_compl_use(compl);
280 be_cq_notify(adapter
, mcc_obj
->cq
.id
, mcc_obj
->rearm_cq
, num
);
282 spin_unlock(&adapter
->mcc_cq_lock
);
286 /* Wait till no more pending mcc requests are present */
287 static int be_mcc_wait_compl(struct be_adapter
*adapter
)
289 #define mcc_timeout 120000 /* 12s timeout */
291 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
293 for (i
= 0; i
< mcc_timeout
; i
++) {
294 if (be_error(adapter
))
298 status
= be_process_mcc(adapter
);
301 if (atomic_read(&mcc_obj
->q
.used
) == 0)
305 if (i
== mcc_timeout
) {
306 dev_err(&adapter
->pdev
->dev
, "FW not responding\n");
307 adapter
->fw_timeout
= true;
313 /* Notify MCC requests and wait for completion */
314 static int be_mcc_notify_wait(struct be_adapter
*adapter
)
317 struct be_mcc_wrb
*wrb
;
318 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
319 u16 index
= mcc_obj
->q
.head
;
320 struct be_cmd_resp_hdr
*resp
;
322 index_dec(&index
, mcc_obj
->q
.len
);
323 wrb
= queue_index_node(&mcc_obj
->q
, index
);
325 resp
= be_decode_resp_hdr(wrb
->tag0
, wrb
->tag1
);
327 be_mcc_notify(adapter
);
329 status
= be_mcc_wait_compl(adapter
);
333 status
= resp
->status
;
338 static int be_mbox_db_ready_wait(struct be_adapter
*adapter
, void __iomem
*db
)
344 if (be_error(adapter
))
347 ready
= ioread32(db
);
348 if (ready
== 0xffffffff)
351 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
356 dev_err(&adapter
->pdev
->dev
, "FW not responding\n");
357 adapter
->fw_timeout
= true;
358 be_detect_error(adapter
);
370 * Insert the mailbox address into the doorbell in two steps
371 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
373 static int be_mbox_notify_wait(struct be_adapter
*adapter
)
377 void __iomem
*db
= adapter
->db
+ MPU_MAILBOX_DB_OFFSET
;
378 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
379 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
380 struct be_mcc_compl
*compl = &mbox
->compl;
382 /* wait for ready to be set */
383 status
= be_mbox_db_ready_wait(adapter
, db
);
387 val
|= MPU_MAILBOX_DB_HI_MASK
;
388 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
389 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
392 /* wait for ready to be set */
393 status
= be_mbox_db_ready_wait(adapter
, db
);
398 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
399 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
402 status
= be_mbox_db_ready_wait(adapter
, db
);
406 /* A cq entry has been made now */
407 if (be_mcc_compl_is_new(compl)) {
408 status
= be_mcc_compl_process(adapter
, &mbox
->compl);
409 be_mcc_compl_use(compl);
413 dev_err(&adapter
->pdev
->dev
, "invalid mailbox completion\n");
419 static int be_POST_stage_get(struct be_adapter
*adapter
, u16
*stage
)
423 if (lancer_chip(adapter
))
424 sem
= ioread32(adapter
->db
+ MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET
);
426 sem
= ioread32(adapter
->csr
+ MPU_EP_SEMAPHORE_OFFSET
);
428 *stage
= sem
& EP_SEMAPHORE_POST_STAGE_MASK
;
429 if ((sem
>> EP_SEMAPHORE_POST_ERR_SHIFT
) & EP_SEMAPHORE_POST_ERR_MASK
)
435 int lancer_wait_ready(struct be_adapter
*adapter
)
437 #define SLIPORT_READY_TIMEOUT 30
441 for (i
= 0; i
< SLIPORT_READY_TIMEOUT
; i
++) {
442 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
443 if (sliport_status
& SLIPORT_STATUS_RDY_MASK
)
449 if (i
== SLIPORT_READY_TIMEOUT
)
455 int lancer_test_and_set_rdy_state(struct be_adapter
*adapter
)
458 u32 sliport_status
, err
, reset_needed
;
459 status
= lancer_wait_ready(adapter
);
461 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
462 err
= sliport_status
& SLIPORT_STATUS_ERR_MASK
;
463 reset_needed
= sliport_status
& SLIPORT_STATUS_RN_MASK
;
464 if (err
&& reset_needed
) {
465 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
466 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
468 /* check adapter has corrected the error */
469 status
= lancer_wait_ready(adapter
);
470 sliport_status
= ioread32(adapter
->db
+
471 SLIPORT_STATUS_OFFSET
);
472 sliport_status
&= (SLIPORT_STATUS_ERR_MASK
|
473 SLIPORT_STATUS_RN_MASK
);
474 if (status
|| sliport_status
)
476 } else if (err
|| reset_needed
) {
483 int be_fw_wait_ready(struct be_adapter
*adapter
)
486 int status
, timeout
= 0;
487 struct device
*dev
= &adapter
->pdev
->dev
;
489 if (lancer_chip(adapter
)) {
490 status
= lancer_wait_ready(adapter
);
495 status
= be_POST_stage_get(adapter
, &stage
);
497 dev_err(dev
, "POST error; stage=0x%x\n", stage
);
499 } else if (stage
!= POST_STAGE_ARMFW_RDY
) {
500 if (msleep_interruptible(2000)) {
501 dev_err(dev
, "Waiting for POST aborted\n");
508 } while (timeout
< 60);
510 dev_err(dev
, "POST timeout; stage=0x%x\n", stage
);
515 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
517 return &wrb
->payload
.sgl
[0];
521 /* Don't touch the hdr after it's prepared */
522 /* mem will be NULL for embedded commands */
523 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
524 u8 subsystem
, u8 opcode
, int cmd_len
,
525 struct be_mcc_wrb
*wrb
, struct be_dma_mem
*mem
)
528 unsigned long addr
= (unsigned long)req_hdr
;
531 req_hdr
->opcode
= opcode
;
532 req_hdr
->subsystem
= subsystem
;
533 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
534 req_hdr
->version
= 0;
536 wrb
->tag0
= req_addr
& 0xFFFFFFFF;
537 wrb
->tag1
= upper_32_bits(req_addr
);
539 wrb
->payload_length
= cmd_len
;
541 wrb
->embedded
|= (1 & MCC_WRB_SGE_CNT_MASK
) <<
542 MCC_WRB_SGE_CNT_SHIFT
;
543 sge
= nonembedded_sgl(wrb
);
544 sge
->pa_hi
= cpu_to_le32(upper_32_bits(mem
->dma
));
545 sge
->pa_lo
= cpu_to_le32(mem
->dma
& 0xFFFFFFFF);
546 sge
->len
= cpu_to_le32(mem
->size
);
548 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
549 be_dws_cpu_to_le(wrb
, 8);
552 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
553 struct be_dma_mem
*mem
)
555 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
556 u64 dma
= (u64
)mem
->dma
;
558 for (i
= 0; i
< buf_pages
; i
++) {
559 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
560 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
565 /* Converts interrupt delay in microseconds to multiplier value */
566 static u32
eq_delay_to_mult(u32 usec_delay
)
568 #define MAX_INTR_RATE 651042
569 const u32 round
= 10;
575 u32 interrupt_rate
= 1000000 / usec_delay
;
576 /* Max delay, corresponding to the lowest interrupt rate */
577 if (interrupt_rate
== 0)
580 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
581 multiplier
/= interrupt_rate
;
582 /* Round the multiplier to the closest value.*/
583 multiplier
= (multiplier
+ round
/2) / round
;
584 multiplier
= min(multiplier
, (u32
)1023);
590 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_adapter
*adapter
)
592 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
593 struct be_mcc_wrb
*wrb
594 = &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
595 memset(wrb
, 0, sizeof(*wrb
));
599 static struct be_mcc_wrb
*wrb_from_mccq(struct be_adapter
*adapter
)
601 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
602 struct be_mcc_wrb
*wrb
;
604 if (atomic_read(&mccq
->used
) >= mccq
->len
) {
605 dev_err(&adapter
->pdev
->dev
, "Out of MCCQ wrbs\n");
609 wrb
= queue_head_node(mccq
);
610 queue_head_inc(mccq
);
611 atomic_inc(&mccq
->used
);
612 memset(wrb
, 0, sizeof(*wrb
));
616 /* Tell fw we're about to start firing cmds by writing a
617 * special pattern across the wrb hdr; uses mbox
619 int be_cmd_fw_init(struct be_adapter
*adapter
)
624 if (lancer_chip(adapter
))
627 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
630 wrb
= (u8
*)wrb_from_mbox(adapter
);
640 status
= be_mbox_notify_wait(adapter
);
642 mutex_unlock(&adapter
->mbox_lock
);
646 /* Tell fw we're done with firing cmds by writing a
647 * special pattern across the wrb hdr; uses mbox
649 int be_cmd_fw_clean(struct be_adapter
*adapter
)
654 if (lancer_chip(adapter
))
657 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
660 wrb
= (u8
*)wrb_from_mbox(adapter
);
670 status
= be_mbox_notify_wait(adapter
);
672 mutex_unlock(&adapter
->mbox_lock
);
676 int be_cmd_eq_create(struct be_adapter
*adapter
,
677 struct be_queue_info
*eq
, int eq_delay
)
679 struct be_mcc_wrb
*wrb
;
680 struct be_cmd_req_eq_create
*req
;
681 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
684 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
687 wrb
= wrb_from_mbox(adapter
);
688 req
= embedded_payload(wrb
);
690 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
691 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
), wrb
, NULL
);
693 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
695 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
697 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
698 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
699 __ilog2_u32(eq
->len
/256));
700 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
701 eq_delay_to_mult(eq_delay
));
702 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
704 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
706 status
= be_mbox_notify_wait(adapter
);
708 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
709 eq
->id
= le16_to_cpu(resp
->eq_id
);
713 mutex_unlock(&adapter
->mbox_lock
);
718 int be_cmd_mac_addr_query(struct be_adapter
*adapter
, u8
*mac_addr
,
719 bool permanent
, u32 if_handle
, u32 pmac_id
)
721 struct be_mcc_wrb
*wrb
;
722 struct be_cmd_req_mac_query
*req
;
725 spin_lock_bh(&adapter
->mcc_lock
);
727 wrb
= wrb_from_mccq(adapter
);
732 req
= embedded_payload(wrb
);
734 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
735 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
), wrb
, NULL
);
736 req
->type
= MAC_ADDRESS_TYPE_NETWORK
;
740 req
->if_id
= cpu_to_le16((u16
) if_handle
);
741 req
->pmac_id
= cpu_to_le32(pmac_id
);
745 status
= be_mcc_notify_wait(adapter
);
747 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
748 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
752 spin_unlock_bh(&adapter
->mcc_lock
);
756 /* Uses synchronous MCCQ */
757 int be_cmd_pmac_add(struct be_adapter
*adapter
, u8
*mac_addr
,
758 u32 if_id
, u32
*pmac_id
, u32 domain
)
760 struct be_mcc_wrb
*wrb
;
761 struct be_cmd_req_pmac_add
*req
;
764 spin_lock_bh(&adapter
->mcc_lock
);
766 wrb
= wrb_from_mccq(adapter
);
771 req
= embedded_payload(wrb
);
773 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
774 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
), wrb
, NULL
);
776 req
->hdr
.domain
= domain
;
777 req
->if_id
= cpu_to_le32(if_id
);
778 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
780 status
= be_mcc_notify_wait(adapter
);
782 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
783 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
787 spin_unlock_bh(&adapter
->mcc_lock
);
789 if (status
== MCC_STATUS_UNAUTHORIZED_REQUEST
)
795 /* Uses synchronous MCCQ */
796 int be_cmd_pmac_del(struct be_adapter
*adapter
, u32 if_id
, int pmac_id
, u32 dom
)
798 struct be_mcc_wrb
*wrb
;
799 struct be_cmd_req_pmac_del
*req
;
805 spin_lock_bh(&adapter
->mcc_lock
);
807 wrb
= wrb_from_mccq(adapter
);
812 req
= embedded_payload(wrb
);
814 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
815 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
), wrb
, NULL
);
817 req
->hdr
.domain
= dom
;
818 req
->if_id
= cpu_to_le32(if_id
);
819 req
->pmac_id
= cpu_to_le32(pmac_id
);
821 status
= be_mcc_notify_wait(adapter
);
824 spin_unlock_bh(&adapter
->mcc_lock
);
829 int be_cmd_cq_create(struct be_adapter
*adapter
, struct be_queue_info
*cq
,
830 struct be_queue_info
*eq
, bool no_delay
, int coalesce_wm
)
832 struct be_mcc_wrb
*wrb
;
833 struct be_cmd_req_cq_create
*req
;
834 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
838 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
841 wrb
= wrb_from_mbox(adapter
);
842 req
= embedded_payload(wrb
);
843 ctxt
= &req
->context
;
845 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
846 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
), wrb
, NULL
);
848 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
849 if (lancer_chip(adapter
)) {
850 req
->hdr
.version
= 2;
851 req
->page_size
= 1; /* 1 for 4K */
852 AMAP_SET_BITS(struct amap_cq_context_lancer
, nodelay
, ctxt
,
854 AMAP_SET_BITS(struct amap_cq_context_lancer
, count
, ctxt
,
855 __ilog2_u32(cq
->len
/256));
856 AMAP_SET_BITS(struct amap_cq_context_lancer
, valid
, ctxt
, 1);
857 AMAP_SET_BITS(struct amap_cq_context_lancer
, eventable
,
859 AMAP_SET_BITS(struct amap_cq_context_lancer
, eqid
,
862 AMAP_SET_BITS(struct amap_cq_context_be
, coalescwm
, ctxt
,
864 AMAP_SET_BITS(struct amap_cq_context_be
, nodelay
,
866 AMAP_SET_BITS(struct amap_cq_context_be
, count
, ctxt
,
867 __ilog2_u32(cq
->len
/256));
868 AMAP_SET_BITS(struct amap_cq_context_be
, valid
, ctxt
, 1);
869 AMAP_SET_BITS(struct amap_cq_context_be
, eventable
, ctxt
, 1);
870 AMAP_SET_BITS(struct amap_cq_context_be
, eqid
, ctxt
, eq
->id
);
873 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
875 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
877 status
= be_mbox_notify_wait(adapter
);
879 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
880 cq
->id
= le16_to_cpu(resp
->cq_id
);
884 mutex_unlock(&adapter
->mbox_lock
);
889 static u32
be_encoded_q_len(int q_len
)
891 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
892 if (len_encoded
== 16)
897 int be_cmd_mccq_ext_create(struct be_adapter
*adapter
,
898 struct be_queue_info
*mccq
,
899 struct be_queue_info
*cq
)
901 struct be_mcc_wrb
*wrb
;
902 struct be_cmd_req_mcc_ext_create
*req
;
903 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
907 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
910 wrb
= wrb_from_mbox(adapter
);
911 req
= embedded_payload(wrb
);
912 ctxt
= &req
->context
;
914 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
915 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
), wrb
, NULL
);
917 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
918 if (lancer_chip(adapter
)) {
919 req
->hdr
.version
= 1;
920 req
->cq_id
= cpu_to_le16(cq
->id
);
922 AMAP_SET_BITS(struct amap_mcc_context_lancer
, ring_size
, ctxt
,
923 be_encoded_q_len(mccq
->len
));
924 AMAP_SET_BITS(struct amap_mcc_context_lancer
, valid
, ctxt
, 1);
925 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_id
,
927 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_valid
,
931 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
932 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
933 be_encoded_q_len(mccq
->len
));
934 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
937 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
938 req
->async_event_bitmap
[0] = cpu_to_le32(0x00000022);
939 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
941 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
943 status
= be_mbox_notify_wait(adapter
);
945 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
946 mccq
->id
= le16_to_cpu(resp
->id
);
947 mccq
->created
= true;
949 mutex_unlock(&adapter
->mbox_lock
);
954 int be_cmd_mccq_org_create(struct be_adapter
*adapter
,
955 struct be_queue_info
*mccq
,
956 struct be_queue_info
*cq
)
958 struct be_mcc_wrb
*wrb
;
959 struct be_cmd_req_mcc_create
*req
;
960 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
964 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
967 wrb
= wrb_from_mbox(adapter
);
968 req
= embedded_payload(wrb
);
969 ctxt
= &req
->context
;
971 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
972 OPCODE_COMMON_MCC_CREATE
, sizeof(*req
), wrb
, NULL
);
974 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
976 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
977 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
978 be_encoded_q_len(mccq
->len
));
979 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
981 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
983 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
985 status
= be_mbox_notify_wait(adapter
);
987 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
988 mccq
->id
= le16_to_cpu(resp
->id
);
989 mccq
->created
= true;
992 mutex_unlock(&adapter
->mbox_lock
);
996 int be_cmd_mccq_create(struct be_adapter
*adapter
,
997 struct be_queue_info
*mccq
,
998 struct be_queue_info
*cq
)
1002 status
= be_cmd_mccq_ext_create(adapter
, mccq
, cq
);
1003 if (status
&& !lancer_chip(adapter
)) {
1004 dev_warn(&adapter
->pdev
->dev
, "Upgrade to F/W ver 2.102.235.0 "
1005 "or newer to avoid conflicting priorities between NIC "
1006 "and FCoE traffic");
1007 status
= be_cmd_mccq_org_create(adapter
, mccq
, cq
);
1012 int be_cmd_txq_create(struct be_adapter
*adapter
,
1013 struct be_queue_info
*txq
,
1014 struct be_queue_info
*cq
)
1016 struct be_mcc_wrb
*wrb
;
1017 struct be_cmd_req_eth_tx_create
*req
;
1018 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
1022 spin_lock_bh(&adapter
->mcc_lock
);
1024 wrb
= wrb_from_mccq(adapter
);
1030 req
= embedded_payload(wrb
);
1031 ctxt
= &req
->context
;
1033 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1034 OPCODE_ETH_TX_CREATE
, sizeof(*req
), wrb
, NULL
);
1036 if (lancer_chip(adapter
)) {
1037 req
->hdr
.version
= 1;
1038 AMAP_SET_BITS(struct amap_tx_context
, if_id
, ctxt
,
1039 adapter
->if_handle
);
1042 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1043 req
->ulp_num
= BE_ULP1_NUM
;
1044 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
1046 AMAP_SET_BITS(struct amap_tx_context
, tx_ring_size
, ctxt
,
1047 be_encoded_q_len(txq
->len
));
1048 AMAP_SET_BITS(struct amap_tx_context
, ctx_valid
, ctxt
, 1);
1049 AMAP_SET_BITS(struct amap_tx_context
, cq_id_send
, ctxt
, cq
->id
);
1051 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1053 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1055 status
= be_mcc_notify_wait(adapter
);
1057 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(wrb
);
1058 txq
->id
= le16_to_cpu(resp
->cid
);
1059 txq
->created
= true;
1063 spin_unlock_bh(&adapter
->mcc_lock
);
1069 int be_cmd_rxq_create(struct be_adapter
*adapter
,
1070 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
1071 u32 if_id
, u32 rss
, u8
*rss_id
)
1073 struct be_mcc_wrb
*wrb
;
1074 struct be_cmd_req_eth_rx_create
*req
;
1075 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
1078 spin_lock_bh(&adapter
->mcc_lock
);
1080 wrb
= wrb_from_mccq(adapter
);
1085 req
= embedded_payload(wrb
);
1087 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1088 OPCODE_ETH_RX_CREATE
, sizeof(*req
), wrb
, NULL
);
1090 req
->cq_id
= cpu_to_le16(cq_id
);
1091 req
->frag_size
= fls(frag_size
) - 1;
1093 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1094 req
->interface_id
= cpu_to_le32(if_id
);
1095 req
->max_frame_size
= cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE
);
1096 req
->rss_queue
= cpu_to_le32(rss
);
1098 status
= be_mcc_notify_wait(adapter
);
1100 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
1101 rxq
->id
= le16_to_cpu(resp
->id
);
1102 rxq
->created
= true;
1103 *rss_id
= resp
->rss_id
;
1107 spin_unlock_bh(&adapter
->mcc_lock
);
1111 /* Generic destroyer function for all types of queues
1114 int be_cmd_q_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
,
1117 struct be_mcc_wrb
*wrb
;
1118 struct be_cmd_req_q_destroy
*req
;
1119 u8 subsys
= 0, opcode
= 0;
1122 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1125 wrb
= wrb_from_mbox(adapter
);
1126 req
= embedded_payload(wrb
);
1128 switch (queue_type
) {
1130 subsys
= CMD_SUBSYSTEM_COMMON
;
1131 opcode
= OPCODE_COMMON_EQ_DESTROY
;
1134 subsys
= CMD_SUBSYSTEM_COMMON
;
1135 opcode
= OPCODE_COMMON_CQ_DESTROY
;
1138 subsys
= CMD_SUBSYSTEM_ETH
;
1139 opcode
= OPCODE_ETH_TX_DESTROY
;
1142 subsys
= CMD_SUBSYSTEM_ETH
;
1143 opcode
= OPCODE_ETH_RX_DESTROY
;
1146 subsys
= CMD_SUBSYSTEM_COMMON
;
1147 opcode
= OPCODE_COMMON_MCC_DESTROY
;
1153 be_wrb_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
), wrb
,
1155 req
->id
= cpu_to_le16(q
->id
);
1157 status
= be_mbox_notify_wait(adapter
);
1161 mutex_unlock(&adapter
->mbox_lock
);
1166 int be_cmd_rxq_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
)
1168 struct be_mcc_wrb
*wrb
;
1169 struct be_cmd_req_q_destroy
*req
;
1172 spin_lock_bh(&adapter
->mcc_lock
);
1174 wrb
= wrb_from_mccq(adapter
);
1179 req
= embedded_payload(wrb
);
1181 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1182 OPCODE_ETH_RX_DESTROY
, sizeof(*req
), wrb
, NULL
);
1183 req
->id
= cpu_to_le16(q
->id
);
1185 status
= be_mcc_notify_wait(adapter
);
1190 spin_unlock_bh(&adapter
->mcc_lock
);
1194 /* Create an rx filtering policy configuration on an i/f
1197 int be_cmd_if_create(struct be_adapter
*adapter
, u32 cap_flags
, u32 en_flags
,
1198 u32
*if_handle
, u32 domain
)
1200 struct be_mcc_wrb
*wrb
;
1201 struct be_cmd_req_if_create
*req
;
1204 spin_lock_bh(&adapter
->mcc_lock
);
1206 wrb
= wrb_from_mccq(adapter
);
1211 req
= embedded_payload(wrb
);
1213 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1214 OPCODE_COMMON_NTWK_INTERFACE_CREATE
, sizeof(*req
), wrb
, NULL
);
1215 req
->hdr
.domain
= domain
;
1216 req
->capability_flags
= cpu_to_le32(cap_flags
);
1217 req
->enable_flags
= cpu_to_le32(en_flags
);
1219 req
->pmac_invalid
= true;
1221 status
= be_mcc_notify_wait(adapter
);
1223 struct be_cmd_resp_if_create
*resp
= embedded_payload(wrb
);
1224 *if_handle
= le32_to_cpu(resp
->interface_id
);
1228 spin_unlock_bh(&adapter
->mcc_lock
);
1233 int be_cmd_if_destroy(struct be_adapter
*adapter
, int interface_id
, u32 domain
)
1235 struct be_mcc_wrb
*wrb
;
1236 struct be_cmd_req_if_destroy
*req
;
1239 if (interface_id
== -1)
1242 spin_lock_bh(&adapter
->mcc_lock
);
1244 wrb
= wrb_from_mccq(adapter
);
1249 req
= embedded_payload(wrb
);
1251 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1252 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
, sizeof(*req
), wrb
, NULL
);
1253 req
->hdr
.domain
= domain
;
1254 req
->interface_id
= cpu_to_le32(interface_id
);
1256 status
= be_mcc_notify_wait(adapter
);
1258 spin_unlock_bh(&adapter
->mcc_lock
);
1262 /* Get stats is a non embedded command: the request is not embedded inside
1263 * WRB but is a separate dma memory block
1264 * Uses asynchronous MCC
1266 int be_cmd_get_stats(struct be_adapter
*adapter
, struct be_dma_mem
*nonemb_cmd
)
1268 struct be_mcc_wrb
*wrb
;
1269 struct be_cmd_req_hdr
*hdr
;
1272 spin_lock_bh(&adapter
->mcc_lock
);
1274 wrb
= wrb_from_mccq(adapter
);
1279 hdr
= nonemb_cmd
->va
;
1281 be_wrb_cmd_hdr_prepare(hdr
, CMD_SUBSYSTEM_ETH
,
1282 OPCODE_ETH_GET_STATISTICS
, nonemb_cmd
->size
, wrb
, nonemb_cmd
);
1284 if (adapter
->generation
== BE_GEN3
)
1287 be_mcc_notify(adapter
);
1288 adapter
->stats_cmd_sent
= true;
1291 spin_unlock_bh(&adapter
->mcc_lock
);
1296 int lancer_cmd_get_pport_stats(struct be_adapter
*adapter
,
1297 struct be_dma_mem
*nonemb_cmd
)
1300 struct be_mcc_wrb
*wrb
;
1301 struct lancer_cmd_req_pport_stats
*req
;
1304 spin_lock_bh(&adapter
->mcc_lock
);
1306 wrb
= wrb_from_mccq(adapter
);
1311 req
= nonemb_cmd
->va
;
1313 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1314 OPCODE_ETH_GET_PPORT_STATS
, nonemb_cmd
->size
, wrb
,
1317 req
->cmd_params
.params
.pport_num
= cpu_to_le16(adapter
->hba_port_num
);
1318 req
->cmd_params
.params
.reset_stats
= 0;
1320 be_mcc_notify(adapter
);
1321 adapter
->stats_cmd_sent
= true;
1324 spin_unlock_bh(&adapter
->mcc_lock
);
1328 static int be_mac_to_link_speed(int mac_speed
)
1330 switch (mac_speed
) {
1331 case PHY_LINK_SPEED_ZERO
:
1333 case PHY_LINK_SPEED_10MBPS
:
1335 case PHY_LINK_SPEED_100MBPS
:
1337 case PHY_LINK_SPEED_1GBPS
:
1339 case PHY_LINK_SPEED_10GBPS
:
1345 /* Uses synchronous mcc
1346 * Returns link_speed in Mbps
1348 int be_cmd_link_status_query(struct be_adapter
*adapter
, u16
*link_speed
,
1349 u8
*link_status
, u32 dom
)
1351 struct be_mcc_wrb
*wrb
;
1352 struct be_cmd_req_link_status
*req
;
1355 spin_lock_bh(&adapter
->mcc_lock
);
1358 *link_status
= LINK_DOWN
;
1360 wrb
= wrb_from_mccq(adapter
);
1365 req
= embedded_payload(wrb
);
1367 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1368 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
, sizeof(*req
), wrb
, NULL
);
1370 if (adapter
->generation
== BE_GEN3
|| lancer_chip(adapter
))
1371 req
->hdr
.version
= 1;
1373 req
->hdr
.domain
= dom
;
1375 status
= be_mcc_notify_wait(adapter
);
1377 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
1379 *link_speed
= resp
->link_speed
?
1380 le16_to_cpu(resp
->link_speed
) * 10 :
1381 be_mac_to_link_speed(resp
->mac_speed
);
1383 if (!resp
->logical_link_status
)
1387 *link_status
= resp
->logical_link_status
;
1391 spin_unlock_bh(&adapter
->mcc_lock
);
1395 /* Uses synchronous mcc */
1396 int be_cmd_get_die_temperature(struct be_adapter
*adapter
)
1398 struct be_mcc_wrb
*wrb
;
1399 struct be_cmd_req_get_cntl_addnl_attribs
*req
;
1402 spin_lock_bh(&adapter
->mcc_lock
);
1404 wrb
= wrb_from_mccq(adapter
);
1409 req
= embedded_payload(wrb
);
1411 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1412 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
, sizeof(*req
),
1415 be_mcc_notify(adapter
);
1418 spin_unlock_bh(&adapter
->mcc_lock
);
1422 /* Uses synchronous mcc */
1423 int be_cmd_get_reg_len(struct be_adapter
*adapter
, u32
*log_size
)
1425 struct be_mcc_wrb
*wrb
;
1426 struct be_cmd_req_get_fat
*req
;
1429 spin_lock_bh(&adapter
->mcc_lock
);
1431 wrb
= wrb_from_mccq(adapter
);
1436 req
= embedded_payload(wrb
);
1438 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1439 OPCODE_COMMON_MANAGE_FAT
, sizeof(*req
), wrb
, NULL
);
1440 req
->fat_operation
= cpu_to_le32(QUERY_FAT
);
1441 status
= be_mcc_notify_wait(adapter
);
1443 struct be_cmd_resp_get_fat
*resp
= embedded_payload(wrb
);
1444 if (log_size
&& resp
->log_size
)
1445 *log_size
= le32_to_cpu(resp
->log_size
) -
1449 spin_unlock_bh(&adapter
->mcc_lock
);
1453 void be_cmd_get_regs(struct be_adapter
*adapter
, u32 buf_len
, void *buf
)
1455 struct be_dma_mem get_fat_cmd
;
1456 struct be_mcc_wrb
*wrb
;
1457 struct be_cmd_req_get_fat
*req
;
1458 u32 offset
= 0, total_size
, buf_size
,
1459 log_offset
= sizeof(u32
), payload_len
;
1465 total_size
= buf_len
;
1467 get_fat_cmd
.size
= sizeof(struct be_cmd_req_get_fat
) + 60*1024;
1468 get_fat_cmd
.va
= pci_alloc_consistent(adapter
->pdev
,
1471 if (!get_fat_cmd
.va
) {
1473 dev_err(&adapter
->pdev
->dev
,
1474 "Memory allocation failure while retrieving FAT data\n");
1478 spin_lock_bh(&adapter
->mcc_lock
);
1480 while (total_size
) {
1481 buf_size
= min(total_size
, (u32
)60*1024);
1482 total_size
-= buf_size
;
1484 wrb
= wrb_from_mccq(adapter
);
1489 req
= get_fat_cmd
.va
;
1491 payload_len
= sizeof(struct be_cmd_req_get_fat
) + buf_size
;
1492 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1493 OPCODE_COMMON_MANAGE_FAT
, payload_len
, wrb
,
1496 req
->fat_operation
= cpu_to_le32(RETRIEVE_FAT
);
1497 req
->read_log_offset
= cpu_to_le32(log_offset
);
1498 req
->read_log_length
= cpu_to_le32(buf_size
);
1499 req
->data_buffer_size
= cpu_to_le32(buf_size
);
1501 status
= be_mcc_notify_wait(adapter
);
1503 struct be_cmd_resp_get_fat
*resp
= get_fat_cmd
.va
;
1504 memcpy(buf
+ offset
,
1506 le32_to_cpu(resp
->read_log_length
));
1508 dev_err(&adapter
->pdev
->dev
, "FAT Table Retrieve error\n");
1512 log_offset
+= buf_size
;
1515 pci_free_consistent(adapter
->pdev
, get_fat_cmd
.size
,
1518 spin_unlock_bh(&adapter
->mcc_lock
);
1521 /* Uses synchronous mcc */
1522 int be_cmd_get_fw_ver(struct be_adapter
*adapter
, char *fw_ver
,
1525 struct be_mcc_wrb
*wrb
;
1526 struct be_cmd_req_get_fw_version
*req
;
1529 spin_lock_bh(&adapter
->mcc_lock
);
1531 wrb
= wrb_from_mccq(adapter
);
1537 req
= embedded_payload(wrb
);
1539 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1540 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
), wrb
, NULL
);
1541 status
= be_mcc_notify_wait(adapter
);
1543 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
1544 strcpy(fw_ver
, resp
->firmware_version_string
);
1546 strcpy(fw_on_flash
, resp
->fw_on_flash_version_string
);
1549 spin_unlock_bh(&adapter
->mcc_lock
);
1553 /* set the EQ delay interval of an EQ to specified value
1556 int be_cmd_modify_eqd(struct be_adapter
*adapter
, u32 eq_id
, u32 eqd
)
1558 struct be_mcc_wrb
*wrb
;
1559 struct be_cmd_req_modify_eq_delay
*req
;
1562 spin_lock_bh(&adapter
->mcc_lock
);
1564 wrb
= wrb_from_mccq(adapter
);
1569 req
= embedded_payload(wrb
);
1571 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1572 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
), wrb
, NULL
);
1574 req
->num_eq
= cpu_to_le32(1);
1575 req
->delay
[0].eq_id
= cpu_to_le32(eq_id
);
1576 req
->delay
[0].phase
= 0;
1577 req
->delay
[0].delay_multiplier
= cpu_to_le32(eqd
);
1579 be_mcc_notify(adapter
);
1582 spin_unlock_bh(&adapter
->mcc_lock
);
1586 /* Uses sycnhronous mcc */
1587 int be_cmd_vlan_config(struct be_adapter
*adapter
, u32 if_id
, u16
*vtag_array
,
1588 u32 num
, bool untagged
, bool promiscuous
)
1590 struct be_mcc_wrb
*wrb
;
1591 struct be_cmd_req_vlan_config
*req
;
1594 spin_lock_bh(&adapter
->mcc_lock
);
1596 wrb
= wrb_from_mccq(adapter
);
1601 req
= embedded_payload(wrb
);
1603 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1604 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
), wrb
, NULL
);
1606 req
->interface_id
= if_id
;
1607 req
->promiscuous
= promiscuous
;
1608 req
->untagged
= untagged
;
1609 req
->num_vlan
= num
;
1611 memcpy(req
->normal_vlan
, vtag_array
,
1612 req
->num_vlan
* sizeof(vtag_array
[0]));
1615 status
= be_mcc_notify_wait(adapter
);
1618 spin_unlock_bh(&adapter
->mcc_lock
);
1622 int be_cmd_rx_filter(struct be_adapter
*adapter
, u32 flags
, u32 value
)
1624 struct be_mcc_wrb
*wrb
;
1625 struct be_dma_mem
*mem
= &adapter
->rx_filter
;
1626 struct be_cmd_req_rx_filter
*req
= mem
->va
;
1629 spin_lock_bh(&adapter
->mcc_lock
);
1631 wrb
= wrb_from_mccq(adapter
);
1636 memset(req
, 0, sizeof(*req
));
1637 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1638 OPCODE_COMMON_NTWK_RX_FILTER
, sizeof(*req
),
1641 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
1642 if (flags
& IFF_PROMISC
) {
1643 req
->if_flags_mask
= cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS
|
1644 BE_IF_FLAGS_VLAN_PROMISCUOUS
);
1646 req
->if_flags
= cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS
|
1647 BE_IF_FLAGS_VLAN_PROMISCUOUS
);
1648 } else if (flags
& IFF_ALLMULTI
) {
1649 req
->if_flags_mask
= req
->if_flags
=
1650 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS
);
1652 struct netdev_hw_addr
*ha
;
1655 req
->if_flags_mask
= req
->if_flags
=
1656 cpu_to_le32(BE_IF_FLAGS_MULTICAST
);
1658 /* Reset mcast promisc mode if already set by setting mask
1659 * and not setting flags field
1661 req
->if_flags_mask
|=
1662 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS
&
1663 adapter
->if_cap_flags
);
1665 req
->mcast_num
= cpu_to_le32(netdev_mc_count(adapter
->netdev
));
1666 netdev_for_each_mc_addr(ha
, adapter
->netdev
)
1667 memcpy(req
->mcast_mac
[i
++].byte
, ha
->addr
, ETH_ALEN
);
1670 status
= be_mcc_notify_wait(adapter
);
1672 spin_unlock_bh(&adapter
->mcc_lock
);
1676 /* Uses synchrounous mcc */
1677 int be_cmd_set_flow_control(struct be_adapter
*adapter
, u32 tx_fc
, u32 rx_fc
)
1679 struct be_mcc_wrb
*wrb
;
1680 struct be_cmd_req_set_flow_control
*req
;
1683 spin_lock_bh(&adapter
->mcc_lock
);
1685 wrb
= wrb_from_mccq(adapter
);
1690 req
= embedded_payload(wrb
);
1692 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1693 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
), wrb
, NULL
);
1695 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
1696 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
1698 status
= be_mcc_notify_wait(adapter
);
1701 spin_unlock_bh(&adapter
->mcc_lock
);
1706 int be_cmd_get_flow_control(struct be_adapter
*adapter
, u32
*tx_fc
, u32
*rx_fc
)
1708 struct be_mcc_wrb
*wrb
;
1709 struct be_cmd_req_get_flow_control
*req
;
1712 spin_lock_bh(&adapter
->mcc_lock
);
1714 wrb
= wrb_from_mccq(adapter
);
1719 req
= embedded_payload(wrb
);
1721 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1722 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
), wrb
, NULL
);
1724 status
= be_mcc_notify_wait(adapter
);
1726 struct be_cmd_resp_get_flow_control
*resp
=
1727 embedded_payload(wrb
);
1728 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
1729 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
1733 spin_unlock_bh(&adapter
->mcc_lock
);
1738 int be_cmd_query_fw_cfg(struct be_adapter
*adapter
, u32
*port_num
,
1739 u32
*mode
, u32
*caps
)
1741 struct be_mcc_wrb
*wrb
;
1742 struct be_cmd_req_query_fw_cfg
*req
;
1745 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1748 wrb
= wrb_from_mbox(adapter
);
1749 req
= embedded_payload(wrb
);
1751 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1752 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
, sizeof(*req
), wrb
, NULL
);
1754 status
= be_mbox_notify_wait(adapter
);
1756 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
1757 *port_num
= le32_to_cpu(resp
->phys_port
);
1758 *mode
= le32_to_cpu(resp
->function_mode
);
1759 *caps
= le32_to_cpu(resp
->function_caps
);
1762 mutex_unlock(&adapter
->mbox_lock
);
1767 int be_cmd_reset_function(struct be_adapter
*adapter
)
1769 struct be_mcc_wrb
*wrb
;
1770 struct be_cmd_req_hdr
*req
;
1773 if (lancer_chip(adapter
)) {
1774 status
= lancer_wait_ready(adapter
);
1776 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
1777 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
1778 status
= lancer_test_and_set_rdy_state(adapter
);
1781 dev_err(&adapter
->pdev
->dev
,
1782 "Adapter in non recoverable error\n");
1787 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1790 wrb
= wrb_from_mbox(adapter
);
1791 req
= embedded_payload(wrb
);
1793 be_wrb_cmd_hdr_prepare(req
, CMD_SUBSYSTEM_COMMON
,
1794 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
), wrb
, NULL
);
1796 status
= be_mbox_notify_wait(adapter
);
1798 mutex_unlock(&adapter
->mbox_lock
);
1802 int be_cmd_rss_config(struct be_adapter
*adapter
, u8
*rsstable
, u16 table_size
)
1804 struct be_mcc_wrb
*wrb
;
1805 struct be_cmd_req_rss_config
*req
;
1806 u32 myhash
[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1807 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1808 0x3ea83c02, 0x4a110304};
1811 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1814 wrb
= wrb_from_mbox(adapter
);
1815 req
= embedded_payload(wrb
);
1817 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1818 OPCODE_ETH_RSS_CONFIG
, sizeof(*req
), wrb
, NULL
);
1820 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
1821 req
->enable_rss
= cpu_to_le16(RSS_ENABLE_TCP_IPV4
| RSS_ENABLE_IPV4
|
1822 RSS_ENABLE_TCP_IPV6
| RSS_ENABLE_IPV6
);
1824 if (lancer_chip(adapter
) || skyhawk_chip(adapter
)) {
1825 req
->hdr
.version
= 1;
1826 req
->enable_rss
|= cpu_to_le16(RSS_ENABLE_UDP_IPV4
|
1827 RSS_ENABLE_UDP_IPV6
);
1830 req
->cpu_table_size_log2
= cpu_to_le16(fls(table_size
) - 1);
1831 memcpy(req
->cpu_table
, rsstable
, table_size
);
1832 memcpy(req
->hash
, myhash
, sizeof(myhash
));
1833 be_dws_cpu_to_le(req
->hash
, sizeof(req
->hash
));
1835 status
= be_mbox_notify_wait(adapter
);
1837 mutex_unlock(&adapter
->mbox_lock
);
1842 int be_cmd_set_beacon_state(struct be_adapter
*adapter
, u8 port_num
,
1843 u8 bcn
, u8 sts
, u8 state
)
1845 struct be_mcc_wrb
*wrb
;
1846 struct be_cmd_req_enable_disable_beacon
*req
;
1849 spin_lock_bh(&adapter
->mcc_lock
);
1851 wrb
= wrb_from_mccq(adapter
);
1856 req
= embedded_payload(wrb
);
1858 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1859 OPCODE_COMMON_ENABLE_DISABLE_BEACON
, sizeof(*req
), wrb
, NULL
);
1861 req
->port_num
= port_num
;
1862 req
->beacon_state
= state
;
1863 req
->beacon_duration
= bcn
;
1864 req
->status_duration
= sts
;
1866 status
= be_mcc_notify_wait(adapter
);
1869 spin_unlock_bh(&adapter
->mcc_lock
);
1874 int be_cmd_get_beacon_state(struct be_adapter
*adapter
, u8 port_num
, u32
*state
)
1876 struct be_mcc_wrb
*wrb
;
1877 struct be_cmd_req_get_beacon_state
*req
;
1880 spin_lock_bh(&adapter
->mcc_lock
);
1882 wrb
= wrb_from_mccq(adapter
);
1887 req
= embedded_payload(wrb
);
1889 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1890 OPCODE_COMMON_GET_BEACON_STATE
, sizeof(*req
), wrb
, NULL
);
1892 req
->port_num
= port_num
;
1894 status
= be_mcc_notify_wait(adapter
);
1896 struct be_cmd_resp_get_beacon_state
*resp
=
1897 embedded_payload(wrb
);
1898 *state
= resp
->beacon_state
;
1902 spin_unlock_bh(&adapter
->mcc_lock
);
1906 int lancer_cmd_write_object(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
1907 u32 data_size
, u32 data_offset
,
1908 const char *obj_name
, u32
*data_written
,
1909 u8
*change_status
, u8
*addn_status
)
1911 struct be_mcc_wrb
*wrb
;
1912 struct lancer_cmd_req_write_object
*req
;
1913 struct lancer_cmd_resp_write_object
*resp
;
1917 spin_lock_bh(&adapter
->mcc_lock
);
1918 adapter
->flash_status
= 0;
1920 wrb
= wrb_from_mccq(adapter
);
1926 req
= embedded_payload(wrb
);
1928 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1929 OPCODE_COMMON_WRITE_OBJECT
,
1930 sizeof(struct lancer_cmd_req_write_object
), wrb
,
1933 ctxt
= &req
->context
;
1934 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
1935 write_length
, ctxt
, data_size
);
1938 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
1941 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
1944 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1945 req
->write_offset
= cpu_to_le32(data_offset
);
1946 strcpy(req
->object_name
, obj_name
);
1947 req
->descriptor_count
= cpu_to_le32(1);
1948 req
->buf_len
= cpu_to_le32(data_size
);
1949 req
->addr_low
= cpu_to_le32((cmd
->dma
+
1950 sizeof(struct lancer_cmd_req_write_object
))
1952 req
->addr_high
= cpu_to_le32(upper_32_bits(cmd
->dma
+
1953 sizeof(struct lancer_cmd_req_write_object
)));
1955 be_mcc_notify(adapter
);
1956 spin_unlock_bh(&adapter
->mcc_lock
);
1958 if (!wait_for_completion_timeout(&adapter
->flash_compl
,
1959 msecs_to_jiffies(30000)))
1962 status
= adapter
->flash_status
;
1964 resp
= embedded_payload(wrb
);
1966 *data_written
= le32_to_cpu(resp
->actual_write_len
);
1967 *change_status
= resp
->change_status
;
1969 *addn_status
= resp
->additional_status
;
1975 spin_unlock_bh(&adapter
->mcc_lock
);
1979 int lancer_cmd_read_object(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
1980 u32 data_size
, u32 data_offset
, const char *obj_name
,
1981 u32
*data_read
, u32
*eof
, u8
*addn_status
)
1983 struct be_mcc_wrb
*wrb
;
1984 struct lancer_cmd_req_read_object
*req
;
1985 struct lancer_cmd_resp_read_object
*resp
;
1988 spin_lock_bh(&adapter
->mcc_lock
);
1990 wrb
= wrb_from_mccq(adapter
);
1996 req
= embedded_payload(wrb
);
1998 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1999 OPCODE_COMMON_READ_OBJECT
,
2000 sizeof(struct lancer_cmd_req_read_object
), wrb
,
2003 req
->desired_read_len
= cpu_to_le32(data_size
);
2004 req
->read_offset
= cpu_to_le32(data_offset
);
2005 strcpy(req
->object_name
, obj_name
);
2006 req
->descriptor_count
= cpu_to_le32(1);
2007 req
->buf_len
= cpu_to_le32(data_size
);
2008 req
->addr_low
= cpu_to_le32((cmd
->dma
& 0xFFFFFFFF));
2009 req
->addr_high
= cpu_to_le32(upper_32_bits(cmd
->dma
));
2011 status
= be_mcc_notify_wait(adapter
);
2013 resp
= embedded_payload(wrb
);
2015 *data_read
= le32_to_cpu(resp
->actual_read_len
);
2016 *eof
= le32_to_cpu(resp
->eof
);
2018 *addn_status
= resp
->additional_status
;
2022 spin_unlock_bh(&adapter
->mcc_lock
);
2026 int be_cmd_write_flashrom(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
2027 u32 flash_type
, u32 flash_opcode
, u32 buf_size
)
2029 struct be_mcc_wrb
*wrb
;
2030 struct be_cmd_write_flashrom
*req
;
2033 spin_lock_bh(&adapter
->mcc_lock
);
2034 adapter
->flash_status
= 0;
2036 wrb
= wrb_from_mccq(adapter
);
2043 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2044 OPCODE_COMMON_WRITE_FLASHROM
, cmd
->size
, wrb
, cmd
);
2046 req
->params
.op_type
= cpu_to_le32(flash_type
);
2047 req
->params
.op_code
= cpu_to_le32(flash_opcode
);
2048 req
->params
.data_buf_size
= cpu_to_le32(buf_size
);
2050 be_mcc_notify(adapter
);
2051 spin_unlock_bh(&adapter
->mcc_lock
);
2053 if (!wait_for_completion_timeout(&adapter
->flash_compl
,
2054 msecs_to_jiffies(40000)))
2057 status
= adapter
->flash_status
;
2062 spin_unlock_bh(&adapter
->mcc_lock
);
2066 int be_cmd_get_flash_crc(struct be_adapter
*adapter
, u8
*flashed_crc
,
2069 struct be_mcc_wrb
*wrb
;
2070 struct be_cmd_write_flashrom
*req
;
2073 spin_lock_bh(&adapter
->mcc_lock
);
2075 wrb
= wrb_from_mccq(adapter
);
2080 req
= embedded_payload(wrb
);
2082 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2083 OPCODE_COMMON_READ_FLASHROM
, sizeof(*req
)+4, wrb
, NULL
);
2085 req
->params
.op_type
= cpu_to_le32(OPTYPE_REDBOOT
);
2086 req
->params
.op_code
= cpu_to_le32(FLASHROM_OPER_REPORT
);
2087 req
->params
.offset
= cpu_to_le32(offset
);
2088 req
->params
.data_buf_size
= cpu_to_le32(0x4);
2090 status
= be_mcc_notify_wait(adapter
);
2092 memcpy(flashed_crc
, req
->params
.data_buf
, 4);
2095 spin_unlock_bh(&adapter
->mcc_lock
);
2099 int be_cmd_enable_magic_wol(struct be_adapter
*adapter
, u8
*mac
,
2100 struct be_dma_mem
*nonemb_cmd
)
2102 struct be_mcc_wrb
*wrb
;
2103 struct be_cmd_req_acpi_wol_magic_config
*req
;
2106 spin_lock_bh(&adapter
->mcc_lock
);
2108 wrb
= wrb_from_mccq(adapter
);
2113 req
= nonemb_cmd
->va
;
2115 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
2116 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
, sizeof(*req
), wrb
,
2118 memcpy(req
->magic_mac
, mac
, ETH_ALEN
);
2120 status
= be_mcc_notify_wait(adapter
);
2123 spin_unlock_bh(&adapter
->mcc_lock
);
2127 int be_cmd_set_loopback(struct be_adapter
*adapter
, u8 port_num
,
2128 u8 loopback_type
, u8 enable
)
2130 struct be_mcc_wrb
*wrb
;
2131 struct be_cmd_req_set_lmode
*req
;
2134 spin_lock_bh(&adapter
->mcc_lock
);
2136 wrb
= wrb_from_mccq(adapter
);
2142 req
= embedded_payload(wrb
);
2144 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2145 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
, sizeof(*req
), wrb
,
2148 req
->src_port
= port_num
;
2149 req
->dest_port
= port_num
;
2150 req
->loopback_type
= loopback_type
;
2151 req
->loopback_state
= enable
;
2153 status
= be_mcc_notify_wait(adapter
);
2155 spin_unlock_bh(&adapter
->mcc_lock
);
2159 int be_cmd_loopback_test(struct be_adapter
*adapter
, u32 port_num
,
2160 u32 loopback_type
, u32 pkt_size
, u32 num_pkts
, u64 pattern
)
2162 struct be_mcc_wrb
*wrb
;
2163 struct be_cmd_req_loopback_test
*req
;
2166 spin_lock_bh(&adapter
->mcc_lock
);
2168 wrb
= wrb_from_mccq(adapter
);
2174 req
= embedded_payload(wrb
);
2176 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2177 OPCODE_LOWLEVEL_LOOPBACK_TEST
, sizeof(*req
), wrb
, NULL
);
2178 req
->hdr
.timeout
= cpu_to_le32(4);
2180 req
->pattern
= cpu_to_le64(pattern
);
2181 req
->src_port
= cpu_to_le32(port_num
);
2182 req
->dest_port
= cpu_to_le32(port_num
);
2183 req
->pkt_size
= cpu_to_le32(pkt_size
);
2184 req
->num_pkts
= cpu_to_le32(num_pkts
);
2185 req
->loopback_type
= cpu_to_le32(loopback_type
);
2187 status
= be_mcc_notify_wait(adapter
);
2189 struct be_cmd_resp_loopback_test
*resp
= embedded_payload(wrb
);
2190 status
= le32_to_cpu(resp
->status
);
2194 spin_unlock_bh(&adapter
->mcc_lock
);
2198 int be_cmd_ddr_dma_test(struct be_adapter
*adapter
, u64 pattern
,
2199 u32 byte_cnt
, struct be_dma_mem
*cmd
)
2201 struct be_mcc_wrb
*wrb
;
2202 struct be_cmd_req_ddrdma_test
*req
;
2206 spin_lock_bh(&adapter
->mcc_lock
);
2208 wrb
= wrb_from_mccq(adapter
);
2214 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2215 OPCODE_LOWLEVEL_HOST_DDR_DMA
, cmd
->size
, wrb
, cmd
);
2217 req
->pattern
= cpu_to_le64(pattern
);
2218 req
->byte_count
= cpu_to_le32(byte_cnt
);
2219 for (i
= 0; i
< byte_cnt
; i
++) {
2220 req
->snd_buff
[i
] = (u8
)(pattern
>> (j
*8));
2226 status
= be_mcc_notify_wait(adapter
);
2229 struct be_cmd_resp_ddrdma_test
*resp
;
2231 if ((memcmp(resp
->rcv_buff
, req
->snd_buff
, byte_cnt
) != 0) ||
2238 spin_unlock_bh(&adapter
->mcc_lock
);
2242 int be_cmd_get_seeprom_data(struct be_adapter
*adapter
,
2243 struct be_dma_mem
*nonemb_cmd
)
2245 struct be_mcc_wrb
*wrb
;
2246 struct be_cmd_req_seeprom_read
*req
;
2250 spin_lock_bh(&adapter
->mcc_lock
);
2252 wrb
= wrb_from_mccq(adapter
);
2257 req
= nonemb_cmd
->va
;
2258 sge
= nonembedded_sgl(wrb
);
2260 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2261 OPCODE_COMMON_SEEPROM_READ
, sizeof(*req
), wrb
,
2264 status
= be_mcc_notify_wait(adapter
);
2267 spin_unlock_bh(&adapter
->mcc_lock
);
2271 int be_cmd_get_phy_info(struct be_adapter
*adapter
)
2273 struct be_mcc_wrb
*wrb
;
2274 struct be_cmd_req_get_phy_info
*req
;
2275 struct be_dma_mem cmd
;
2278 spin_lock_bh(&adapter
->mcc_lock
);
2280 wrb
= wrb_from_mccq(adapter
);
2285 cmd
.size
= sizeof(struct be_cmd_req_get_phy_info
);
2286 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
,
2289 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
2296 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2297 OPCODE_COMMON_GET_PHY_DETAILS
, sizeof(*req
),
2300 status
= be_mcc_notify_wait(adapter
);
2302 struct be_phy_info
*resp_phy_info
=
2303 cmd
.va
+ sizeof(struct be_cmd_req_hdr
);
2304 adapter
->phy
.phy_type
= le16_to_cpu(resp_phy_info
->phy_type
);
2305 adapter
->phy
.interface_type
=
2306 le16_to_cpu(resp_phy_info
->interface_type
);
2307 adapter
->phy
.auto_speeds_supported
=
2308 le16_to_cpu(resp_phy_info
->auto_speeds_supported
);
2309 adapter
->phy
.fixed_speeds_supported
=
2310 le16_to_cpu(resp_phy_info
->fixed_speeds_supported
);
2311 adapter
->phy
.misc_params
=
2312 le32_to_cpu(resp_phy_info
->misc_params
);
2314 pci_free_consistent(adapter
->pdev
, cmd
.size
,
2317 spin_unlock_bh(&adapter
->mcc_lock
);
2321 int be_cmd_set_qos(struct be_adapter
*adapter
, u32 bps
, u32 domain
)
2323 struct be_mcc_wrb
*wrb
;
2324 struct be_cmd_req_set_qos
*req
;
2327 spin_lock_bh(&adapter
->mcc_lock
);
2329 wrb
= wrb_from_mccq(adapter
);
2335 req
= embedded_payload(wrb
);
2337 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2338 OPCODE_COMMON_SET_QOS
, sizeof(*req
), wrb
, NULL
);
2340 req
->hdr
.domain
= domain
;
2341 req
->valid_bits
= cpu_to_le32(BE_QOS_BITS_NIC
);
2342 req
->max_bps_nic
= cpu_to_le32(bps
);
2344 status
= be_mcc_notify_wait(adapter
);
2347 spin_unlock_bh(&adapter
->mcc_lock
);
2351 int be_cmd_get_cntl_attributes(struct be_adapter
*adapter
)
2353 struct be_mcc_wrb
*wrb
;
2354 struct be_cmd_req_cntl_attribs
*req
;
2355 struct be_cmd_resp_cntl_attribs
*resp
;
2357 int payload_len
= max(sizeof(*req
), sizeof(*resp
));
2358 struct mgmt_controller_attrib
*attribs
;
2359 struct be_dma_mem attribs_cmd
;
2361 memset(&attribs_cmd
, 0, sizeof(struct be_dma_mem
));
2362 attribs_cmd
.size
= sizeof(struct be_cmd_resp_cntl_attribs
);
2363 attribs_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, attribs_cmd
.size
,
2365 if (!attribs_cmd
.va
) {
2366 dev_err(&adapter
->pdev
->dev
,
2367 "Memory allocation failure\n");
2371 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2374 wrb
= wrb_from_mbox(adapter
);
2379 req
= attribs_cmd
.va
;
2381 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2382 OPCODE_COMMON_GET_CNTL_ATTRIBUTES
, payload_len
, wrb
,
2385 status
= be_mbox_notify_wait(adapter
);
2387 attribs
= attribs_cmd
.va
+ sizeof(struct be_cmd_resp_hdr
);
2388 adapter
->hba_port_num
= attribs
->hba_attribs
.phy_port
;
2392 mutex_unlock(&adapter
->mbox_lock
);
2393 pci_free_consistent(adapter
->pdev
, attribs_cmd
.size
, attribs_cmd
.va
,
2399 int be_cmd_req_native_mode(struct be_adapter
*adapter
)
2401 struct be_mcc_wrb
*wrb
;
2402 struct be_cmd_req_set_func_cap
*req
;
2405 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2408 wrb
= wrb_from_mbox(adapter
);
2414 req
= embedded_payload(wrb
);
2416 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2417 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP
, sizeof(*req
), wrb
, NULL
);
2419 req
->valid_cap_flags
= cpu_to_le32(CAPABILITY_SW_TIMESTAMPS
|
2420 CAPABILITY_BE3_NATIVE_ERX_API
);
2421 req
->cap_flags
= cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API
);
2423 status
= be_mbox_notify_wait(adapter
);
2425 struct be_cmd_resp_set_func_cap
*resp
= embedded_payload(wrb
);
2426 adapter
->be3_native
= le32_to_cpu(resp
->cap_flags
) &
2427 CAPABILITY_BE3_NATIVE_ERX_API
;
2428 if (!adapter
->be3_native
)
2429 dev_warn(&adapter
->pdev
->dev
,
2430 "adapter not in advanced mode\n");
2433 mutex_unlock(&adapter
->mbox_lock
);
2437 /* Uses synchronous MCCQ */
2438 int be_cmd_get_mac_from_list(struct be_adapter
*adapter
, u8
*mac
,
2439 bool *pmac_id_active
, u32
*pmac_id
, u8 domain
)
2441 struct be_mcc_wrb
*wrb
;
2442 struct be_cmd_req_get_mac_list
*req
;
2445 struct be_dma_mem get_mac_list_cmd
;
2448 memset(&get_mac_list_cmd
, 0, sizeof(struct be_dma_mem
));
2449 get_mac_list_cmd
.size
= sizeof(struct be_cmd_resp_get_mac_list
);
2450 get_mac_list_cmd
.va
= pci_alloc_consistent(adapter
->pdev
,
2451 get_mac_list_cmd
.size
,
2452 &get_mac_list_cmd
.dma
);
2454 if (!get_mac_list_cmd
.va
) {
2455 dev_err(&adapter
->pdev
->dev
,
2456 "Memory allocation failure during GET_MAC_LIST\n");
2460 spin_lock_bh(&adapter
->mcc_lock
);
2462 wrb
= wrb_from_mccq(adapter
);
2468 req
= get_mac_list_cmd
.va
;
2470 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2471 OPCODE_COMMON_GET_MAC_LIST
, sizeof(*req
),
2472 wrb
, &get_mac_list_cmd
);
2474 req
->hdr
.domain
= domain
;
2475 req
->mac_type
= MAC_ADDRESS_TYPE_NETWORK
;
2476 req
->perm_override
= 1;
2478 status
= be_mcc_notify_wait(adapter
);
2480 struct be_cmd_resp_get_mac_list
*resp
=
2481 get_mac_list_cmd
.va
;
2482 mac_count
= resp
->true_mac_count
+ resp
->pseudo_mac_count
;
2483 /* Mac list returned could contain one or more active mac_ids
2484 * or one or more true or pseudo permanant mac addresses.
2485 * If an active mac_id is present, return first active mac_id
2488 for (i
= 0; i
< mac_count
; i
++) {
2489 struct get_list_macaddr
*mac_entry
;
2493 mac_entry
= &resp
->macaddr_list
[i
];
2494 mac_addr_size
= le16_to_cpu(mac_entry
->mac_addr_size
);
2495 /* mac_id is a 32 bit value and mac_addr size
2498 if (mac_addr_size
== sizeof(u32
)) {
2499 *pmac_id_active
= true;
2500 mac_id
= mac_entry
->mac_addr_id
.s_mac_id
.mac_id
;
2501 *pmac_id
= le32_to_cpu(mac_id
);
2505 /* If no active mac_id found, return first mac addr */
2506 *pmac_id_active
= false;
2507 memcpy(mac
, resp
->macaddr_list
[0].mac_addr_id
.macaddr
,
2512 spin_unlock_bh(&adapter
->mcc_lock
);
2513 pci_free_consistent(adapter
->pdev
, get_mac_list_cmd
.size
,
2514 get_mac_list_cmd
.va
, get_mac_list_cmd
.dma
);
2518 /* Uses synchronous MCCQ */
2519 int be_cmd_set_mac_list(struct be_adapter
*adapter
, u8
*mac_array
,
2520 u8 mac_count
, u32 domain
)
2522 struct be_mcc_wrb
*wrb
;
2523 struct be_cmd_req_set_mac_list
*req
;
2525 struct be_dma_mem cmd
;
2527 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
2528 cmd
.size
= sizeof(struct be_cmd_req_set_mac_list
);
2529 cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
.size
,
2530 &cmd
.dma
, GFP_KERNEL
);
2532 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
2536 spin_lock_bh(&adapter
->mcc_lock
);
2538 wrb
= wrb_from_mccq(adapter
);
2545 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2546 OPCODE_COMMON_SET_MAC_LIST
, sizeof(*req
),
2549 req
->hdr
.domain
= domain
;
2550 req
->mac_count
= mac_count
;
2552 memcpy(req
->mac
, mac_array
, ETH_ALEN
*mac_count
);
2554 status
= be_mcc_notify_wait(adapter
);
2557 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
,
2559 spin_unlock_bh(&adapter
->mcc_lock
);
2563 int be_cmd_set_hsw_config(struct be_adapter
*adapter
, u16 pvid
,
2564 u32 domain
, u16 intf_id
)
2566 struct be_mcc_wrb
*wrb
;
2567 struct be_cmd_req_set_hsw_config
*req
;
2571 spin_lock_bh(&adapter
->mcc_lock
);
2573 wrb
= wrb_from_mccq(adapter
);
2579 req
= embedded_payload(wrb
);
2580 ctxt
= &req
->context
;
2582 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2583 OPCODE_COMMON_SET_HSW_CONFIG
, sizeof(*req
), wrb
, NULL
);
2585 req
->hdr
.domain
= domain
;
2586 AMAP_SET_BITS(struct amap_set_hsw_context
, interface_id
, ctxt
, intf_id
);
2588 AMAP_SET_BITS(struct amap_set_hsw_context
, pvid_valid
, ctxt
, 1);
2589 AMAP_SET_BITS(struct amap_set_hsw_context
, pvid
, ctxt
, pvid
);
2592 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
2593 status
= be_mcc_notify_wait(adapter
);
2596 spin_unlock_bh(&adapter
->mcc_lock
);
2600 /* Get Hyper switch config */
2601 int be_cmd_get_hsw_config(struct be_adapter
*adapter
, u16
*pvid
,
2602 u32 domain
, u16 intf_id
)
2604 struct be_mcc_wrb
*wrb
;
2605 struct be_cmd_req_get_hsw_config
*req
;
2610 spin_lock_bh(&adapter
->mcc_lock
);
2612 wrb
= wrb_from_mccq(adapter
);
2618 req
= embedded_payload(wrb
);
2619 ctxt
= &req
->context
;
2621 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2622 OPCODE_COMMON_GET_HSW_CONFIG
, sizeof(*req
), wrb
, NULL
);
2624 req
->hdr
.domain
= domain
;
2625 AMAP_SET_BITS(struct amap_get_hsw_req_context
, interface_id
, ctxt
,
2627 AMAP_SET_BITS(struct amap_get_hsw_req_context
, pvid_valid
, ctxt
, 1);
2628 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
2630 status
= be_mcc_notify_wait(adapter
);
2632 struct be_cmd_resp_get_hsw_config
*resp
=
2633 embedded_payload(wrb
);
2634 be_dws_le_to_cpu(&resp
->context
,
2635 sizeof(resp
->context
));
2636 vid
= AMAP_GET_BITS(struct amap_get_hsw_resp_context
,
2637 pvid
, &resp
->context
);
2638 *pvid
= le16_to_cpu(vid
);
2642 spin_unlock_bh(&adapter
->mcc_lock
);
2646 int be_cmd_get_acpi_wol_cap(struct be_adapter
*adapter
)
2648 struct be_mcc_wrb
*wrb
;
2649 struct be_cmd_req_acpi_wol_magic_config_v1
*req
;
2651 int payload_len
= sizeof(*req
);
2652 struct be_dma_mem cmd
;
2654 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
2655 cmd
.size
= sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1
);
2656 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
,
2659 dev_err(&adapter
->pdev
->dev
,
2660 "Memory allocation failure\n");
2664 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2667 wrb
= wrb_from_mbox(adapter
);
2675 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
2676 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
,
2677 payload_len
, wrb
, &cmd
);
2679 req
->hdr
.version
= 1;
2680 req
->query_options
= BE_GET_WOL_CAP
;
2682 status
= be_mbox_notify_wait(adapter
);
2684 struct be_cmd_resp_acpi_wol_magic_config_v1
*resp
;
2685 resp
= (struct be_cmd_resp_acpi_wol_magic_config_v1
*) cmd
.va
;
2687 /* the command could succeed misleadingly on old f/w
2688 * which is not aware of the V1 version. fake an error. */
2689 if (resp
->hdr
.response_length
< payload_len
) {
2693 adapter
->wol_cap
= resp
->wol_settings
;
2696 mutex_unlock(&adapter
->mbox_lock
);
2697 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2701 int be_cmd_get_ext_fat_capabilites(struct be_adapter
*adapter
,
2702 struct be_dma_mem
*cmd
)
2704 struct be_mcc_wrb
*wrb
;
2705 struct be_cmd_req_get_ext_fat_caps
*req
;
2708 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2711 wrb
= wrb_from_mbox(adapter
);
2718 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2719 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES
,
2720 cmd
->size
, wrb
, cmd
);
2721 req
->parameter_type
= cpu_to_le32(1);
2723 status
= be_mbox_notify_wait(adapter
);
2725 mutex_unlock(&adapter
->mbox_lock
);
2729 int be_cmd_set_ext_fat_capabilites(struct be_adapter
*adapter
,
2730 struct be_dma_mem
*cmd
,
2731 struct be_fat_conf_params
*configs
)
2733 struct be_mcc_wrb
*wrb
;
2734 struct be_cmd_req_set_ext_fat_caps
*req
;
2737 spin_lock_bh(&adapter
->mcc_lock
);
2739 wrb
= wrb_from_mccq(adapter
);
2746 memcpy(&req
->set_params
, configs
, sizeof(struct be_fat_conf_params
));
2747 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2748 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES
,
2749 cmd
->size
, wrb
, cmd
);
2751 status
= be_mcc_notify_wait(adapter
);
2753 spin_unlock_bh(&adapter
->mcc_lock
);
2757 int be_cmd_query_port_name(struct be_adapter
*adapter
, u8
*port_name
)
2759 struct be_mcc_wrb
*wrb
;
2760 struct be_cmd_req_get_port_name
*req
;
2763 if (!lancer_chip(adapter
)) {
2764 *port_name
= adapter
->hba_port_num
+ '0';
2768 spin_lock_bh(&adapter
->mcc_lock
);
2770 wrb
= wrb_from_mccq(adapter
);
2776 req
= embedded_payload(wrb
);
2778 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2779 OPCODE_COMMON_GET_PORT_NAME
, sizeof(*req
), wrb
,
2781 req
->hdr
.version
= 1;
2783 status
= be_mcc_notify_wait(adapter
);
2785 struct be_cmd_resp_get_port_name
*resp
= embedded_payload(wrb
);
2786 *port_name
= resp
->port_name
[adapter
->hba_port_num
];
2788 *port_name
= adapter
->hba_port_num
+ '0';
2791 spin_unlock_bh(&adapter
->mcc_lock
);
2795 static struct be_nic_resource_desc
*be_get_nic_desc(u8
*buf
, u32 desc_count
,
2798 struct be_nic_resource_desc
*desc
= (struct be_nic_resource_desc
*)buf
;
2801 for (i
= 0; i
< desc_count
; i
++) {
2802 desc
->desc_len
= RESOURCE_DESC_SIZE
;
2803 if (((void *)desc
+ desc
->desc_len
) >
2804 (void *)(buf
+ max_buf_size
)) {
2809 if (desc
->desc_type
== NIC_RESOURCE_DESC_TYPE_ID
)
2812 desc
= (void *)desc
+ desc
->desc_len
;
2815 if (!desc
|| i
== MAX_RESOURCE_DESC
)
2822 int be_cmd_get_func_config(struct be_adapter
*adapter
)
2824 struct be_mcc_wrb
*wrb
;
2825 struct be_cmd_req_get_func_config
*req
;
2827 struct be_dma_mem cmd
;
2829 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
2830 cmd
.size
= sizeof(struct be_cmd_resp_get_func_config
);
2831 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
,
2834 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
2837 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2840 wrb
= wrb_from_mbox(adapter
);
2848 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2849 OPCODE_COMMON_GET_FUNC_CONFIG
,
2850 cmd
.size
, wrb
, &cmd
);
2852 status
= be_mbox_notify_wait(adapter
);
2854 struct be_cmd_resp_get_func_config
*resp
= cmd
.va
;
2855 u32 desc_count
= le32_to_cpu(resp
->desc_count
);
2856 struct be_nic_resource_desc
*desc
;
2858 desc
= be_get_nic_desc(resp
->func_param
, desc_count
,
2859 sizeof(resp
->func_param
));
2865 adapter
->pf_number
= desc
->pf_num
;
2866 adapter
->max_pmac_cnt
= le16_to_cpu(desc
->unicast_mac_count
);
2867 adapter
->max_vlans
= le16_to_cpu(desc
->vlan_count
);
2868 adapter
->max_mcast_mac
= le16_to_cpu(desc
->mcast_mac_count
);
2869 adapter
->max_tx_queues
= le16_to_cpu(desc
->txq_count
);
2870 adapter
->max_rss_queues
= le16_to_cpu(desc
->rssq_count
);
2871 adapter
->max_rx_queues
= le16_to_cpu(desc
->rq_count
);
2873 adapter
->max_event_queues
= le16_to_cpu(desc
->eq_count
);
2874 adapter
->if_cap_flags
= le32_to_cpu(desc
->cap_flags
);
2877 mutex_unlock(&adapter
->mbox_lock
);
2878 pci_free_consistent(adapter
->pdev
, cmd
.size
,
2884 int be_cmd_get_profile_config(struct be_adapter
*adapter
, u32
*cap_flags
,
2887 struct be_mcc_wrb
*wrb
;
2888 struct be_cmd_req_get_profile_config
*req
;
2890 struct be_dma_mem cmd
;
2892 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
2893 cmd
.size
= sizeof(struct be_cmd_resp_get_profile_config
);
2894 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
,
2897 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
2901 spin_lock_bh(&adapter
->mcc_lock
);
2903 wrb
= wrb_from_mccq(adapter
);
2911 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2912 OPCODE_COMMON_GET_PROFILE_CONFIG
,
2913 cmd
.size
, wrb
, &cmd
);
2915 req
->type
= ACTIVE_PROFILE_TYPE
;
2916 req
->hdr
.domain
= domain
;
2918 status
= be_mcc_notify_wait(adapter
);
2920 struct be_cmd_resp_get_profile_config
*resp
= cmd
.va
;
2921 u32 desc_count
= le32_to_cpu(resp
->desc_count
);
2922 struct be_nic_resource_desc
*desc
;
2924 desc
= be_get_nic_desc(resp
->func_param
, desc_count
,
2925 sizeof(resp
->func_param
));
2931 *cap_flags
= le32_to_cpu(desc
->cap_flags
);
2934 spin_unlock_bh(&adapter
->mcc_lock
);
2935 pci_free_consistent(adapter
->pdev
, cmd
.size
,
2941 int be_cmd_set_profile_config(struct be_adapter
*adapter
, u32 bps
,
2944 struct be_mcc_wrb
*wrb
;
2945 struct be_cmd_req_set_profile_config
*req
;
2948 spin_lock_bh(&adapter
->mcc_lock
);
2950 wrb
= wrb_from_mccq(adapter
);
2956 req
= embedded_payload(wrb
);
2958 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2959 OPCODE_COMMON_SET_PROFILE_CONFIG
, sizeof(*req
),
2962 req
->hdr
.domain
= domain
;
2963 req
->desc_count
= cpu_to_le32(1);
2965 req
->nic_desc
.desc_type
= NIC_RESOURCE_DESC_TYPE_ID
;
2966 req
->nic_desc
.desc_len
= RESOURCE_DESC_SIZE
;
2967 req
->nic_desc
.flags
= (1 << QUN
) | (1 << IMM
) | (1 << NOSV
);
2968 req
->nic_desc
.pf_num
= adapter
->pf_number
;
2969 req
->nic_desc
.vf_num
= domain
;
2971 /* Mark fields invalid */
2972 req
->nic_desc
.unicast_mac_count
= 0xFFFF;
2973 req
->nic_desc
.mcc_count
= 0xFFFF;
2974 req
->nic_desc
.vlan_count
= 0xFFFF;
2975 req
->nic_desc
.mcast_mac_count
= 0xFFFF;
2976 req
->nic_desc
.txq_count
= 0xFFFF;
2977 req
->nic_desc
.rq_count
= 0xFFFF;
2978 req
->nic_desc
.rssq_count
= 0xFFFF;
2979 req
->nic_desc
.lro_count
= 0xFFFF;
2980 req
->nic_desc
.cq_count
= 0xFFFF;
2981 req
->nic_desc
.toe_conn_count
= 0xFFFF;
2982 req
->nic_desc
.eq_count
= 0xFFFF;
2983 req
->nic_desc
.link_param
= 0xFF;
2984 req
->nic_desc
.bw_min
= 0xFFFFFFFF;
2985 req
->nic_desc
.acpi_params
= 0xFF;
2986 req
->nic_desc
.wol_param
= 0x0F;
2989 req
->nic_desc
.bw_min
= cpu_to_le32(bps
);
2990 req
->nic_desc
.bw_max
= cpu_to_le32(bps
);
2991 status
= be_mcc_notify_wait(adapter
);
2993 spin_unlock_bh(&adapter
->mcc_lock
);
2997 int be_roce_mcc_cmd(void *netdev_handle
, void *wrb_payload
,
2998 int wrb_payload_size
, u16
*cmd_status
, u16
*ext_status
)
3000 struct be_adapter
*adapter
= netdev_priv(netdev_handle
);
3001 struct be_mcc_wrb
*wrb
;
3002 struct be_cmd_req_hdr
*hdr
= (struct be_cmd_req_hdr
*) wrb_payload
;
3003 struct be_cmd_req_hdr
*req
;
3004 struct be_cmd_resp_hdr
*resp
;
3007 spin_lock_bh(&adapter
->mcc_lock
);
3009 wrb
= wrb_from_mccq(adapter
);
3014 req
= embedded_payload(wrb
);
3015 resp
= embedded_payload(wrb
);
3017 be_wrb_cmd_hdr_prepare(req
, hdr
->subsystem
,
3018 hdr
->opcode
, wrb_payload_size
, wrb
, NULL
);
3019 memcpy(req
, wrb_payload
, wrb_payload_size
);
3020 be_dws_cpu_to_le(req
, wrb_payload_size
);
3022 status
= be_mcc_notify_wait(adapter
);
3024 *cmd_status
= (status
& 0xffff);
3027 memcpy(wrb_payload
, resp
, sizeof(*resp
) + resp
->response_length
);
3028 be_dws_le_to_cpu(wrb_payload
, sizeof(*resp
) + resp
->response_length
);
3030 spin_unlock_bh(&adapter
->mcc_lock
);
3033 EXPORT_SYMBOL(be_roce_mcc_cmd
);