2 * Copyright (C) 2005 - 2010 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
21 /* Must be a power of 2 or else MODULO will BUG_ON */
22 static int be_get_temp_freq
= 32;
24 static void be_mcc_notify(struct be_adapter
*adapter
)
26 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
29 if (adapter
->eeh_err
) {
30 dev_info(&adapter
->pdev
->dev
,
31 "Error in Card Detected! Cannot issue commands\n");
35 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
36 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
39 iowrite32(val
, adapter
->db
+ DB_MCCQ_OFFSET
);
42 /* To check if valid bit is set, check the entire word as we don't know
43 * the endianness of the data (old entry is host endian while a new entry is
45 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
47 if (compl->flags
!= 0) {
48 compl->flags
= le32_to_cpu(compl->flags
);
49 BUG_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
56 /* Need to reset the entire word that houses the valid bit */
57 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
62 static int be_mcc_compl_process(struct be_adapter
*adapter
,
63 struct be_mcc_compl
*compl)
65 u16 compl_status
, extd_status
;
67 /* Just swap the status to host endian; mcc tag is opaquely copied
69 be_dws_le_to_cpu(compl, 4);
71 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
72 CQE_STATUS_COMPL_MASK
;
74 if ((compl->tag0
== OPCODE_COMMON_WRITE_FLASHROM
) &&
75 (compl->tag1
== CMD_SUBSYSTEM_COMMON
)) {
76 adapter
->flash_status
= compl_status
;
77 complete(&adapter
->flash_compl
);
80 if (compl_status
== MCC_STATUS_SUCCESS
) {
81 if (compl->tag0
== OPCODE_ETH_GET_STATISTICS
) {
82 struct be_cmd_resp_get_stats
*resp
=
83 adapter
->stats_cmd
.va
;
84 be_dws_le_to_cpu(&resp
->hw_stats
,
85 sizeof(resp
->hw_stats
));
86 netdev_stats_update(adapter
);
87 adapter
->stats_cmd_sent
= false;
89 } else if ((compl_status
!= MCC_STATUS_NOT_SUPPORTED
) &&
90 (compl->tag0
!= OPCODE_COMMON_NTWK_MAC_QUERY
)) {
91 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
93 dev_warn(&adapter
->pdev
->dev
,
94 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
95 compl->tag0
, compl_status
, extd_status
);
100 /* Link state evt is a string of bytes; no need for endian swapping */
101 static void be_async_link_state_process(struct be_adapter
*adapter
,
102 struct be_async_event_link_state
*evt
)
104 be_link_status_update(adapter
,
105 evt
->port_link_status
== ASYNC_EVENT_LINK_UP
);
108 /* Grp5 CoS Priority evt */
109 static void be_async_grp5_cos_priority_process(struct be_adapter
*adapter
,
110 struct be_async_event_grp5_cos_priority
*evt
)
113 adapter
->vlan_prio_bmap
= evt
->available_priority_bmap
;
114 adapter
->recommended_prio
&= ~VLAN_PRIO_MASK
;
115 adapter
->recommended_prio
=
116 evt
->reco_default_priority
<< VLAN_PRIO_SHIFT
;
120 /* Grp5 QOS Speed evt */
121 static void be_async_grp5_qos_speed_process(struct be_adapter
*adapter
,
122 struct be_async_event_grp5_qos_link_speed
*evt
)
124 if (evt
->physical_port
== adapter
->port_num
) {
125 /* qos_link_speed is in units of 10 Mbps */
126 adapter
->link_speed
= evt
->qos_link_speed
* 10;
131 static void be_async_grp5_pvid_state_process(struct be_adapter
*adapter
,
132 struct be_async_event_grp5_pvid_state
*evt
)
135 adapter
->pvid
= evt
->tag
;
140 static void be_async_grp5_evt_process(struct be_adapter
*adapter
,
141 u32 trailer
, struct be_mcc_compl
*evt
)
145 event_type
= (trailer
>> ASYNC_TRAILER_EVENT_TYPE_SHIFT
) &
146 ASYNC_TRAILER_EVENT_TYPE_MASK
;
148 switch (event_type
) {
149 case ASYNC_EVENT_COS_PRIORITY
:
150 be_async_grp5_cos_priority_process(adapter
,
151 (struct be_async_event_grp5_cos_priority
*)evt
);
153 case ASYNC_EVENT_QOS_SPEED
:
154 be_async_grp5_qos_speed_process(adapter
,
155 (struct be_async_event_grp5_qos_link_speed
*)evt
);
157 case ASYNC_EVENT_PVID_STATE
:
158 be_async_grp5_pvid_state_process(adapter
,
159 (struct be_async_event_grp5_pvid_state
*)evt
);
162 dev_warn(&adapter
->pdev
->dev
, "Unknown grp5 event!\n");
167 static inline bool is_link_state_evt(u32 trailer
)
169 return ((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
170 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
171 ASYNC_EVENT_CODE_LINK_STATE
;
174 static inline bool is_grp5_evt(u32 trailer
)
176 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
177 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
178 ASYNC_EVENT_CODE_GRP_5
);
181 static struct be_mcc_compl
*be_mcc_compl_get(struct be_adapter
*adapter
)
183 struct be_queue_info
*mcc_cq
= &adapter
->mcc_obj
.cq
;
184 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
186 if (be_mcc_compl_is_new(compl)) {
187 queue_tail_inc(mcc_cq
);
193 void be_async_mcc_enable(struct be_adapter
*adapter
)
195 spin_lock_bh(&adapter
->mcc_cq_lock
);
197 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, true, 0);
198 adapter
->mcc_obj
.rearm_cq
= true;
200 spin_unlock_bh(&adapter
->mcc_cq_lock
);
203 void be_async_mcc_disable(struct be_adapter
*adapter
)
205 adapter
->mcc_obj
.rearm_cq
= false;
208 int be_process_mcc(struct be_adapter
*adapter
, int *status
)
210 struct be_mcc_compl
*compl;
212 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
214 spin_lock_bh(&adapter
->mcc_cq_lock
);
215 while ((compl = be_mcc_compl_get(adapter
))) {
216 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
217 /* Interpret flags as an async trailer */
218 if (is_link_state_evt(compl->flags
))
219 be_async_link_state_process(adapter
,
220 (struct be_async_event_link_state
*) compl);
221 else if (is_grp5_evt(compl->flags
))
222 be_async_grp5_evt_process(adapter
,
223 compl->flags
, compl);
224 } else if (compl->flags
& CQE_FLAGS_COMPLETED_MASK
) {
225 *status
= be_mcc_compl_process(adapter
, compl);
226 atomic_dec(&mcc_obj
->q
.used
);
228 be_mcc_compl_use(compl);
232 spin_unlock_bh(&adapter
->mcc_cq_lock
);
236 /* Wait till no more pending mcc requests are present */
237 static int be_mcc_wait_compl(struct be_adapter
*adapter
)
239 #define mcc_timeout 120000 /* 12s timeout */
240 int i
, num
, status
= 0;
241 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
243 if (adapter
->eeh_err
)
246 for (i
= 0; i
< mcc_timeout
; i
++) {
247 num
= be_process_mcc(adapter
, &status
);
249 be_cq_notify(adapter
, mcc_obj
->cq
.id
,
250 mcc_obj
->rearm_cq
, num
);
252 if (atomic_read(&mcc_obj
->q
.used
) == 0)
256 if (i
== mcc_timeout
) {
257 dev_err(&adapter
->pdev
->dev
, "mccq poll timed out\n");
263 /* Notify MCC requests and wait for completion */
264 static int be_mcc_notify_wait(struct be_adapter
*adapter
)
266 be_mcc_notify(adapter
);
267 return be_mcc_wait_compl(adapter
);
270 static int be_mbox_db_ready_wait(struct be_adapter
*adapter
, void __iomem
*db
)
275 if (adapter
->eeh_err
) {
276 dev_err(&adapter
->pdev
->dev
,
277 "Error detected in card.Cannot issue commands\n");
282 ready
= ioread32(db
);
283 if (ready
== 0xffffffff) {
284 dev_err(&adapter
->pdev
->dev
,
285 "pci slot disconnected\n");
289 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
294 dev_err(&adapter
->pdev
->dev
, "mbox poll timed out\n");
295 be_detect_dump_ue(adapter
);
299 set_current_state(TASK_INTERRUPTIBLE
);
300 schedule_timeout(msecs_to_jiffies(1));
308 * Insert the mailbox address into the doorbell in two steps
309 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
311 static int be_mbox_notify_wait(struct be_adapter
*adapter
)
315 void __iomem
*db
= adapter
->db
+ MPU_MAILBOX_DB_OFFSET
;
316 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
317 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
318 struct be_mcc_compl
*compl = &mbox
->compl;
320 /* wait for ready to be set */
321 status
= be_mbox_db_ready_wait(adapter
, db
);
325 val
|= MPU_MAILBOX_DB_HI_MASK
;
326 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
327 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
330 /* wait for ready to be set */
331 status
= be_mbox_db_ready_wait(adapter
, db
);
336 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
337 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
340 status
= be_mbox_db_ready_wait(adapter
, db
);
344 /* A cq entry has been made now */
345 if (be_mcc_compl_is_new(compl)) {
346 status
= be_mcc_compl_process(adapter
, &mbox
->compl);
347 be_mcc_compl_use(compl);
351 dev_err(&adapter
->pdev
->dev
, "invalid mailbox completion\n");
357 static int be_POST_stage_get(struct be_adapter
*adapter
, u16
*stage
)
361 if (lancer_chip(adapter
))
362 sem
= ioread32(adapter
->db
+ MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET
);
364 sem
= ioread32(adapter
->csr
+ MPU_EP_SEMAPHORE_OFFSET
);
366 *stage
= sem
& EP_SEMAPHORE_POST_STAGE_MASK
;
367 if ((sem
>> EP_SEMAPHORE_POST_ERR_SHIFT
) & EP_SEMAPHORE_POST_ERR_MASK
)
373 int be_cmd_POST(struct be_adapter
*adapter
)
376 int status
, timeout
= 0;
379 status
= be_POST_stage_get(adapter
, &stage
);
381 dev_err(&adapter
->pdev
->dev
, "POST error; stage=0x%x\n",
384 } else if (stage
!= POST_STAGE_ARMFW_RDY
) {
385 set_current_state(TASK_INTERRUPTIBLE
);
386 schedule_timeout(2 * HZ
);
391 } while (timeout
< 40);
393 dev_err(&adapter
->pdev
->dev
, "POST timeout; stage=0x%x\n", stage
);
397 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
399 return wrb
->payload
.embedded_payload
;
402 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
404 return &wrb
->payload
.sgl
[0];
407 /* Don't touch the hdr after it's prepared */
408 static void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
409 bool embedded
, u8 sge_cnt
, u32 opcode
)
412 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
414 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
415 MCC_WRB_SGE_CNT_SHIFT
;
416 wrb
->payload_length
= payload_len
;
418 be_dws_cpu_to_le(wrb
, 8);
421 /* Don't touch the hdr after it's prepared */
422 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
423 u8 subsystem
, u8 opcode
, int cmd_len
)
425 req_hdr
->opcode
= opcode
;
426 req_hdr
->subsystem
= subsystem
;
427 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
428 req_hdr
->version
= 0;
431 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
432 struct be_dma_mem
*mem
)
434 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
435 u64 dma
= (u64
)mem
->dma
;
437 for (i
= 0; i
< buf_pages
; i
++) {
438 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
439 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
444 /* Converts interrupt delay in microseconds to multiplier value */
445 static u32
eq_delay_to_mult(u32 usec_delay
)
447 #define MAX_INTR_RATE 651042
448 const u32 round
= 10;
454 u32 interrupt_rate
= 1000000 / usec_delay
;
455 /* Max delay, corresponding to the lowest interrupt rate */
456 if (interrupt_rate
== 0)
459 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
460 multiplier
/= interrupt_rate
;
461 /* Round the multiplier to the closest value.*/
462 multiplier
= (multiplier
+ round
/2) / round
;
463 multiplier
= min(multiplier
, (u32
)1023);
469 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_adapter
*adapter
)
471 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
472 struct be_mcc_wrb
*wrb
473 = &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
474 memset(wrb
, 0, sizeof(*wrb
));
478 static struct be_mcc_wrb
*wrb_from_mccq(struct be_adapter
*adapter
)
480 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
481 struct be_mcc_wrb
*wrb
;
483 if (atomic_read(&mccq
->used
) >= mccq
->len
) {
484 dev_err(&adapter
->pdev
->dev
, "Out of MCCQ wrbs\n");
488 wrb
= queue_head_node(mccq
);
489 queue_head_inc(mccq
);
490 atomic_inc(&mccq
->used
);
491 memset(wrb
, 0, sizeof(*wrb
));
495 /* Tell fw we're about to start firing cmds by writing a
496 * special pattern across the wrb hdr; uses mbox
498 int be_cmd_fw_init(struct be_adapter
*adapter
)
503 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
506 wrb
= (u8
*)wrb_from_mbox(adapter
);
516 status
= be_mbox_notify_wait(adapter
);
518 mutex_unlock(&adapter
->mbox_lock
);
522 /* Tell fw we're done with firing cmds by writing a
523 * special pattern across the wrb hdr; uses mbox
525 int be_cmd_fw_clean(struct be_adapter
*adapter
)
530 if (adapter
->eeh_err
)
533 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
536 wrb
= (u8
*)wrb_from_mbox(adapter
);
546 status
= be_mbox_notify_wait(adapter
);
548 mutex_unlock(&adapter
->mbox_lock
);
551 int be_cmd_eq_create(struct be_adapter
*adapter
,
552 struct be_queue_info
*eq
, int eq_delay
)
554 struct be_mcc_wrb
*wrb
;
555 struct be_cmd_req_eq_create
*req
;
556 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
559 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
562 wrb
= wrb_from_mbox(adapter
);
563 req
= embedded_payload(wrb
);
565 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, OPCODE_COMMON_EQ_CREATE
);
567 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
568 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
570 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
572 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
574 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
575 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
576 __ilog2_u32(eq
->len
/256));
577 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
578 eq_delay_to_mult(eq_delay
));
579 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
581 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
583 status
= be_mbox_notify_wait(adapter
);
585 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
586 eq
->id
= le16_to_cpu(resp
->eq_id
);
590 mutex_unlock(&adapter
->mbox_lock
);
595 int be_cmd_mac_addr_query(struct be_adapter
*adapter
, u8
*mac_addr
,
596 u8 type
, bool permanent
, u32 if_handle
)
598 struct be_mcc_wrb
*wrb
;
599 struct be_cmd_req_mac_query
*req
;
602 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
605 wrb
= wrb_from_mbox(adapter
);
606 req
= embedded_payload(wrb
);
608 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
609 OPCODE_COMMON_NTWK_MAC_QUERY
);
611 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
612 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
));
618 req
->if_id
= cpu_to_le16((u16
) if_handle
);
622 status
= be_mbox_notify_wait(adapter
);
624 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
625 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
628 mutex_unlock(&adapter
->mbox_lock
);
632 /* Uses synchronous MCCQ */
633 int be_cmd_pmac_add(struct be_adapter
*adapter
, u8
*mac_addr
,
634 u32 if_id
, u32
*pmac_id
, u32 domain
)
636 struct be_mcc_wrb
*wrb
;
637 struct be_cmd_req_pmac_add
*req
;
640 spin_lock_bh(&adapter
->mcc_lock
);
642 wrb
= wrb_from_mccq(adapter
);
647 req
= embedded_payload(wrb
);
649 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
650 OPCODE_COMMON_NTWK_PMAC_ADD
);
652 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
653 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
));
655 req
->hdr
.domain
= domain
;
656 req
->if_id
= cpu_to_le32(if_id
);
657 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
659 status
= be_mcc_notify_wait(adapter
);
661 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
662 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
666 spin_unlock_bh(&adapter
->mcc_lock
);
670 /* Uses synchronous MCCQ */
671 int be_cmd_pmac_del(struct be_adapter
*adapter
, u32 if_id
, u32 pmac_id
, u32 dom
)
673 struct be_mcc_wrb
*wrb
;
674 struct be_cmd_req_pmac_del
*req
;
677 spin_lock_bh(&adapter
->mcc_lock
);
679 wrb
= wrb_from_mccq(adapter
);
684 req
= embedded_payload(wrb
);
686 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
687 OPCODE_COMMON_NTWK_PMAC_DEL
);
689 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
690 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
));
692 req
->hdr
.domain
= dom
;
693 req
->if_id
= cpu_to_le32(if_id
);
694 req
->pmac_id
= cpu_to_le32(pmac_id
);
696 status
= be_mcc_notify_wait(adapter
);
699 spin_unlock_bh(&adapter
->mcc_lock
);
704 int be_cmd_cq_create(struct be_adapter
*adapter
,
705 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
706 bool sol_evts
, bool no_delay
, int coalesce_wm
)
708 struct be_mcc_wrb
*wrb
;
709 struct be_cmd_req_cq_create
*req
;
710 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
714 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
717 wrb
= wrb_from_mbox(adapter
);
718 req
= embedded_payload(wrb
);
719 ctxt
= &req
->context
;
721 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
722 OPCODE_COMMON_CQ_CREATE
);
724 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
725 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
727 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
728 if (lancer_chip(adapter
)) {
729 req
->hdr
.version
= 1;
730 req
->page_size
= 1; /* 1 for 4K */
731 AMAP_SET_BITS(struct amap_cq_context_lancer
, coalescwm
, ctxt
,
733 AMAP_SET_BITS(struct amap_cq_context_lancer
, nodelay
, ctxt
,
735 AMAP_SET_BITS(struct amap_cq_context_lancer
, count
, ctxt
,
736 __ilog2_u32(cq
->len
/256));
737 AMAP_SET_BITS(struct amap_cq_context_lancer
, valid
, ctxt
, 1);
738 AMAP_SET_BITS(struct amap_cq_context_lancer
, eventable
,
740 AMAP_SET_BITS(struct amap_cq_context_lancer
, eqid
,
742 AMAP_SET_BITS(struct amap_cq_context_lancer
, armed
, ctxt
, 1);
744 AMAP_SET_BITS(struct amap_cq_context_be
, coalescwm
, ctxt
,
746 AMAP_SET_BITS(struct amap_cq_context_be
, nodelay
,
748 AMAP_SET_BITS(struct amap_cq_context_be
, count
, ctxt
,
749 __ilog2_u32(cq
->len
/256));
750 AMAP_SET_BITS(struct amap_cq_context_be
, valid
, ctxt
, 1);
751 AMAP_SET_BITS(struct amap_cq_context_be
, solevent
,
753 AMAP_SET_BITS(struct amap_cq_context_be
, eventable
, ctxt
, 1);
754 AMAP_SET_BITS(struct amap_cq_context_be
, eqid
, ctxt
, eq
->id
);
755 AMAP_SET_BITS(struct amap_cq_context_be
, armed
, ctxt
, 1);
758 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
760 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
762 status
= be_mbox_notify_wait(adapter
);
764 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
765 cq
->id
= le16_to_cpu(resp
->cq_id
);
769 mutex_unlock(&adapter
->mbox_lock
);
774 static u32
be_encoded_q_len(int q_len
)
776 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
777 if (len_encoded
== 16)
782 int be_cmd_mccq_create(struct be_adapter
*adapter
,
783 struct be_queue_info
*mccq
,
784 struct be_queue_info
*cq
)
786 struct be_mcc_wrb
*wrb
;
787 struct be_cmd_req_mcc_create
*req
;
788 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
792 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
795 wrb
= wrb_from_mbox(adapter
);
796 req
= embedded_payload(wrb
);
797 ctxt
= &req
->context
;
799 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
800 OPCODE_COMMON_MCC_CREATE_EXT
);
802 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
803 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
));
805 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
806 if (lancer_chip(adapter
)) {
807 req
->hdr
.version
= 1;
808 req
->cq_id
= cpu_to_le16(cq
->id
);
810 AMAP_SET_BITS(struct amap_mcc_context_lancer
, ring_size
, ctxt
,
811 be_encoded_q_len(mccq
->len
));
812 AMAP_SET_BITS(struct amap_mcc_context_lancer
, valid
, ctxt
, 1);
813 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_id
,
815 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_valid
,
819 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
820 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
821 be_encoded_q_len(mccq
->len
));
822 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
825 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
826 req
->async_event_bitmap
[0] = cpu_to_le32(0x00000022);
827 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
829 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
831 status
= be_mbox_notify_wait(adapter
);
833 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
834 mccq
->id
= le16_to_cpu(resp
->id
);
835 mccq
->created
= true;
837 mutex_unlock(&adapter
->mbox_lock
);
842 int be_cmd_txq_create(struct be_adapter
*adapter
,
843 struct be_queue_info
*txq
,
844 struct be_queue_info
*cq
)
846 struct be_mcc_wrb
*wrb
;
847 struct be_cmd_req_eth_tx_create
*req
;
848 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
852 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
855 wrb
= wrb_from_mbox(adapter
);
856 req
= embedded_payload(wrb
);
857 ctxt
= &req
->context
;
859 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
860 OPCODE_ETH_TX_CREATE
);
862 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_TX_CREATE
,
865 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
866 req
->ulp_num
= BE_ULP1_NUM
;
867 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
869 AMAP_SET_BITS(struct amap_tx_context
, tx_ring_size
, ctxt
,
870 be_encoded_q_len(txq
->len
));
871 AMAP_SET_BITS(struct amap_tx_context
, ctx_valid
, ctxt
, 1);
872 AMAP_SET_BITS(struct amap_tx_context
, cq_id_send
, ctxt
, cq
->id
);
874 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
876 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
878 status
= be_mbox_notify_wait(adapter
);
880 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(wrb
);
881 txq
->id
= le16_to_cpu(resp
->cid
);
885 mutex_unlock(&adapter
->mbox_lock
);
891 int be_cmd_rxq_create(struct be_adapter
*adapter
,
892 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
893 u16 max_frame_size
, u32 if_id
, u32 rss
, u8
*rss_id
)
895 struct be_mcc_wrb
*wrb
;
896 struct be_cmd_req_eth_rx_create
*req
;
897 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
900 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
903 wrb
= wrb_from_mbox(adapter
);
904 req
= embedded_payload(wrb
);
906 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
907 OPCODE_ETH_RX_CREATE
);
909 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_RX_CREATE
,
912 req
->cq_id
= cpu_to_le16(cq_id
);
913 req
->frag_size
= fls(frag_size
) - 1;
915 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
916 req
->interface_id
= cpu_to_le32(if_id
);
917 req
->max_frame_size
= cpu_to_le16(max_frame_size
);
918 req
->rss_queue
= cpu_to_le32(rss
);
920 status
= be_mbox_notify_wait(adapter
);
922 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
923 rxq
->id
= le16_to_cpu(resp
->id
);
925 *rss_id
= resp
->rss_id
;
928 mutex_unlock(&adapter
->mbox_lock
);
933 /* Generic destroyer function for all types of queues
936 int be_cmd_q_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
,
939 struct be_mcc_wrb
*wrb
;
940 struct be_cmd_req_q_destroy
*req
;
941 u8 subsys
= 0, opcode
= 0;
944 if (adapter
->eeh_err
)
947 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
950 wrb
= wrb_from_mbox(adapter
);
951 req
= embedded_payload(wrb
);
953 switch (queue_type
) {
955 subsys
= CMD_SUBSYSTEM_COMMON
;
956 opcode
= OPCODE_COMMON_EQ_DESTROY
;
959 subsys
= CMD_SUBSYSTEM_COMMON
;
960 opcode
= OPCODE_COMMON_CQ_DESTROY
;
963 subsys
= CMD_SUBSYSTEM_ETH
;
964 opcode
= OPCODE_ETH_TX_DESTROY
;
967 subsys
= CMD_SUBSYSTEM_ETH
;
968 opcode
= OPCODE_ETH_RX_DESTROY
;
971 subsys
= CMD_SUBSYSTEM_COMMON
;
972 opcode
= OPCODE_COMMON_MCC_DESTROY
;
978 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, opcode
);
980 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
981 req
->id
= cpu_to_le16(q
->id
);
983 status
= be_mbox_notify_wait(adapter
);
985 mutex_unlock(&adapter
->mbox_lock
);
990 /* Create an rx filtering policy configuration on an i/f
993 int be_cmd_if_create(struct be_adapter
*adapter
, u32 cap_flags
, u32 en_flags
,
994 u8
*mac
, bool pmac_invalid
, u32
*if_handle
, u32
*pmac_id
,
997 struct be_mcc_wrb
*wrb
;
998 struct be_cmd_req_if_create
*req
;
1001 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1004 wrb
= wrb_from_mbox(adapter
);
1005 req
= embedded_payload(wrb
);
1007 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1008 OPCODE_COMMON_NTWK_INTERFACE_CREATE
);
1010 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1011 OPCODE_COMMON_NTWK_INTERFACE_CREATE
, sizeof(*req
));
1013 req
->hdr
.domain
= domain
;
1014 req
->capability_flags
= cpu_to_le32(cap_flags
);
1015 req
->enable_flags
= cpu_to_le32(en_flags
);
1016 req
->pmac_invalid
= pmac_invalid
;
1018 memcpy(req
->mac_addr
, mac
, ETH_ALEN
);
1020 status
= be_mbox_notify_wait(adapter
);
1022 struct be_cmd_resp_if_create
*resp
= embedded_payload(wrb
);
1023 *if_handle
= le32_to_cpu(resp
->interface_id
);
1025 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
1028 mutex_unlock(&adapter
->mbox_lock
);
1033 int be_cmd_if_destroy(struct be_adapter
*adapter
, u32 interface_id
, u32 domain
)
1035 struct be_mcc_wrb
*wrb
;
1036 struct be_cmd_req_if_destroy
*req
;
1039 if (adapter
->eeh_err
)
1042 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1045 wrb
= wrb_from_mbox(adapter
);
1046 req
= embedded_payload(wrb
);
1048 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1049 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
);
1051 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1052 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
, sizeof(*req
));
1054 req
->hdr
.domain
= domain
;
1055 req
->interface_id
= cpu_to_le32(interface_id
);
1057 status
= be_mbox_notify_wait(adapter
);
1059 mutex_unlock(&adapter
->mbox_lock
);
1064 /* Get stats is a non embedded command: the request is not embedded inside
1065 * WRB but is a separate dma memory block
1066 * Uses asynchronous MCC
1068 int be_cmd_get_stats(struct be_adapter
*adapter
, struct be_dma_mem
*nonemb_cmd
)
1070 struct be_mcc_wrb
*wrb
;
1071 struct be_cmd_req_get_stats
*req
;
1075 if (MODULO(adapter
->work_counter
, be_get_temp_freq
) == 0)
1076 be_cmd_get_die_temperature(adapter
);
1078 spin_lock_bh(&adapter
->mcc_lock
);
1080 wrb
= wrb_from_mccq(adapter
);
1085 req
= nonemb_cmd
->va
;
1086 sge
= nonembedded_sgl(wrb
);
1088 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1089 OPCODE_ETH_GET_STATISTICS
);
1091 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1092 OPCODE_ETH_GET_STATISTICS
, sizeof(*req
));
1093 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1094 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1095 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1097 be_mcc_notify(adapter
);
1098 adapter
->stats_cmd_sent
= true;
1101 spin_unlock_bh(&adapter
->mcc_lock
);
1105 /* Uses synchronous mcc */
1106 int be_cmd_link_status_query(struct be_adapter
*adapter
,
1107 bool *link_up
, u8
*mac_speed
, u16
*link_speed
)
1109 struct be_mcc_wrb
*wrb
;
1110 struct be_cmd_req_link_status
*req
;
1113 spin_lock_bh(&adapter
->mcc_lock
);
1115 wrb
= wrb_from_mccq(adapter
);
1120 req
= embedded_payload(wrb
);
1124 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1125 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
);
1127 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1128 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
, sizeof(*req
));
1130 status
= be_mcc_notify_wait(adapter
);
1132 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
1133 if (resp
->mac_speed
!= PHY_LINK_SPEED_ZERO
) {
1135 *link_speed
= le16_to_cpu(resp
->link_speed
);
1136 *mac_speed
= resp
->mac_speed
;
1141 spin_unlock_bh(&adapter
->mcc_lock
);
1145 /* Uses synchronous mcc */
1146 int be_cmd_get_die_temperature(struct be_adapter
*adapter
)
1148 struct be_mcc_wrb
*wrb
;
1149 struct be_cmd_req_get_cntl_addnl_attribs
*req
;
1152 spin_lock_bh(&adapter
->mcc_lock
);
1154 wrb
= wrb_from_mccq(adapter
);
1159 req
= embedded_payload(wrb
);
1161 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1162 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
);
1164 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1165 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
, sizeof(*req
));
1167 status
= be_mcc_notify_wait(adapter
);
1169 struct be_cmd_resp_get_cntl_addnl_attribs
*resp
=
1170 embedded_payload(wrb
);
1171 adapter
->drv_stats
.be_on_die_temperature
=
1172 resp
->on_die_temperature
;
1174 /* If IOCTL fails once, do not bother issuing it again */
1176 be_get_temp_freq
= 0;
1179 spin_unlock_bh(&adapter
->mcc_lock
);
1184 int be_cmd_get_fw_ver(struct be_adapter
*adapter
, char *fw_ver
)
1186 struct be_mcc_wrb
*wrb
;
1187 struct be_cmd_req_get_fw_version
*req
;
1190 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1193 wrb
= wrb_from_mbox(adapter
);
1194 req
= embedded_payload(wrb
);
1196 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1197 OPCODE_COMMON_GET_FW_VERSION
);
1199 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1200 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
));
1202 status
= be_mbox_notify_wait(adapter
);
1204 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
1205 strncpy(fw_ver
, resp
->firmware_version_string
, FW_VER_LEN
);
1208 mutex_unlock(&adapter
->mbox_lock
);
1212 /* set the EQ delay interval of an EQ to specified value
1215 int be_cmd_modify_eqd(struct be_adapter
*adapter
, u32 eq_id
, u32 eqd
)
1217 struct be_mcc_wrb
*wrb
;
1218 struct be_cmd_req_modify_eq_delay
*req
;
1221 spin_lock_bh(&adapter
->mcc_lock
);
1223 wrb
= wrb_from_mccq(adapter
);
1228 req
= embedded_payload(wrb
);
1230 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1231 OPCODE_COMMON_MODIFY_EQ_DELAY
);
1233 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1234 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
));
1236 req
->num_eq
= cpu_to_le32(1);
1237 req
->delay
[0].eq_id
= cpu_to_le32(eq_id
);
1238 req
->delay
[0].phase
= 0;
1239 req
->delay
[0].delay_multiplier
= cpu_to_le32(eqd
);
1241 be_mcc_notify(adapter
);
1244 spin_unlock_bh(&adapter
->mcc_lock
);
1248 /* Uses sycnhronous mcc */
1249 int be_cmd_vlan_config(struct be_adapter
*adapter
, u32 if_id
, u16
*vtag_array
,
1250 u32 num
, bool untagged
, bool promiscuous
)
1252 struct be_mcc_wrb
*wrb
;
1253 struct be_cmd_req_vlan_config
*req
;
1256 spin_lock_bh(&adapter
->mcc_lock
);
1258 wrb
= wrb_from_mccq(adapter
);
1263 req
= embedded_payload(wrb
);
1265 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1266 OPCODE_COMMON_NTWK_VLAN_CONFIG
);
1268 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1269 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
));
1271 req
->interface_id
= if_id
;
1272 req
->promiscuous
= promiscuous
;
1273 req
->untagged
= untagged
;
1274 req
->num_vlan
= num
;
1276 memcpy(req
->normal_vlan
, vtag_array
,
1277 req
->num_vlan
* sizeof(vtag_array
[0]));
1280 status
= be_mcc_notify_wait(adapter
);
1283 spin_unlock_bh(&adapter
->mcc_lock
);
1287 /* Uses MCC for this command as it may be called in BH context
1288 * Uses synchronous mcc
1290 int be_cmd_promiscuous_config(struct be_adapter
*adapter
, u8 port_num
, bool en
)
1292 struct be_mcc_wrb
*wrb
;
1293 struct be_cmd_req_promiscuous_config
*req
;
1296 spin_lock_bh(&adapter
->mcc_lock
);
1298 wrb
= wrb_from_mccq(adapter
);
1303 req
= embedded_payload(wrb
);
1305 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, OPCODE_ETH_PROMISCUOUS
);
1307 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1308 OPCODE_ETH_PROMISCUOUS
, sizeof(*req
));
1310 /* In FW versions X.102.149/X.101.487 and later,
1311 * the port setting associated only with the
1312 * issuing pci function will take effect
1315 req
->port1_promiscuous
= en
;
1317 req
->port0_promiscuous
= en
;
1319 status
= be_mcc_notify_wait(adapter
);
1322 spin_unlock_bh(&adapter
->mcc_lock
);
1327 * Uses MCC for this command as it may be called in BH context
1328 * (mc == NULL) => multicast promiscous
1330 int be_cmd_multicast_set(struct be_adapter
*adapter
, u32 if_id
,
1331 struct net_device
*netdev
, struct be_dma_mem
*mem
)
1333 struct be_mcc_wrb
*wrb
;
1334 struct be_cmd_req_mcast_mac_config
*req
= mem
->va
;
1338 spin_lock_bh(&adapter
->mcc_lock
);
1340 wrb
= wrb_from_mccq(adapter
);
1345 sge
= nonembedded_sgl(wrb
);
1346 memset(req
, 0, sizeof(*req
));
1348 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1349 OPCODE_COMMON_NTWK_MULTICAST_SET
);
1350 sge
->pa_hi
= cpu_to_le32(upper_32_bits(mem
->dma
));
1351 sge
->pa_lo
= cpu_to_le32(mem
->dma
& 0xFFFFFFFF);
1352 sge
->len
= cpu_to_le32(mem
->size
);
1354 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1355 OPCODE_COMMON_NTWK_MULTICAST_SET
, sizeof(*req
));
1357 req
->interface_id
= if_id
;
1360 struct netdev_hw_addr
*ha
;
1362 req
->num_mac
= cpu_to_le16(netdev_mc_count(netdev
));
1365 netdev_for_each_mc_addr(ha
, netdev
)
1366 memcpy(req
->mac
[i
++].byte
, ha
->addr
, ETH_ALEN
);
1368 req
->promiscuous
= 1;
1371 status
= be_mcc_notify_wait(adapter
);
1374 spin_unlock_bh(&adapter
->mcc_lock
);
1378 /* Uses synchrounous mcc */
1379 int be_cmd_set_flow_control(struct be_adapter
*adapter
, u32 tx_fc
, u32 rx_fc
)
1381 struct be_mcc_wrb
*wrb
;
1382 struct be_cmd_req_set_flow_control
*req
;
1385 spin_lock_bh(&adapter
->mcc_lock
);
1387 wrb
= wrb_from_mccq(adapter
);
1392 req
= embedded_payload(wrb
);
1394 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1395 OPCODE_COMMON_SET_FLOW_CONTROL
);
1397 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1398 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
));
1400 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
1401 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
1403 status
= be_mcc_notify_wait(adapter
);
1406 spin_unlock_bh(&adapter
->mcc_lock
);
1411 int be_cmd_get_flow_control(struct be_adapter
*adapter
, u32
*tx_fc
, u32
*rx_fc
)
1413 struct be_mcc_wrb
*wrb
;
1414 struct be_cmd_req_get_flow_control
*req
;
1417 spin_lock_bh(&adapter
->mcc_lock
);
1419 wrb
= wrb_from_mccq(adapter
);
1424 req
= embedded_payload(wrb
);
1426 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1427 OPCODE_COMMON_GET_FLOW_CONTROL
);
1429 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1430 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
));
1432 status
= be_mcc_notify_wait(adapter
);
1434 struct be_cmd_resp_get_flow_control
*resp
=
1435 embedded_payload(wrb
);
1436 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
1437 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
1441 spin_unlock_bh(&adapter
->mcc_lock
);
1446 int be_cmd_query_fw_cfg(struct be_adapter
*adapter
, u32
*port_num
,
1447 u32
*mode
, u32
*caps
)
1449 struct be_mcc_wrb
*wrb
;
1450 struct be_cmd_req_query_fw_cfg
*req
;
1453 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1456 wrb
= wrb_from_mbox(adapter
);
1457 req
= embedded_payload(wrb
);
1459 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1460 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
);
1462 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1463 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
, sizeof(*req
));
1465 status
= be_mbox_notify_wait(adapter
);
1467 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
1468 *port_num
= le32_to_cpu(resp
->phys_port
);
1469 *mode
= le32_to_cpu(resp
->function_mode
);
1470 *caps
= le32_to_cpu(resp
->function_caps
);
1473 mutex_unlock(&adapter
->mbox_lock
);
1478 int be_cmd_reset_function(struct be_adapter
*adapter
)
1480 struct be_mcc_wrb
*wrb
;
1481 struct be_cmd_req_hdr
*req
;
1484 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1487 wrb
= wrb_from_mbox(adapter
);
1488 req
= embedded_payload(wrb
);
1490 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1491 OPCODE_COMMON_FUNCTION_RESET
);
1493 be_cmd_hdr_prepare(req
, CMD_SUBSYSTEM_COMMON
,
1494 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
));
1496 status
= be_mbox_notify_wait(adapter
);
1498 mutex_unlock(&adapter
->mbox_lock
);
1502 int be_cmd_rss_config(struct be_adapter
*adapter
, u8
*rsstable
, u16 table_size
)
1504 struct be_mcc_wrb
*wrb
;
1505 struct be_cmd_req_rss_config
*req
;
1509 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1512 wrb
= wrb_from_mbox(adapter
);
1513 req
= embedded_payload(wrb
);
1515 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1516 OPCODE_ETH_RSS_CONFIG
);
1518 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1519 OPCODE_ETH_RSS_CONFIG
, sizeof(*req
));
1521 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
1522 req
->enable_rss
= cpu_to_le16(RSS_ENABLE_TCP_IPV4
| RSS_ENABLE_IPV4
);
1523 req
->cpu_table_size_log2
= cpu_to_le16(fls(table_size
) - 1);
1524 memcpy(req
->cpu_table
, rsstable
, table_size
);
1525 memcpy(req
->hash
, myhash
, sizeof(myhash
));
1526 be_dws_cpu_to_le(req
->hash
, sizeof(req
->hash
));
1528 status
= be_mbox_notify_wait(adapter
);
1530 mutex_unlock(&adapter
->mbox_lock
);
1535 int be_cmd_set_beacon_state(struct be_adapter
*adapter
, u8 port_num
,
1536 u8 bcn
, u8 sts
, u8 state
)
1538 struct be_mcc_wrb
*wrb
;
1539 struct be_cmd_req_enable_disable_beacon
*req
;
1542 spin_lock_bh(&adapter
->mcc_lock
);
1544 wrb
= wrb_from_mccq(adapter
);
1549 req
= embedded_payload(wrb
);
1551 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1552 OPCODE_COMMON_ENABLE_DISABLE_BEACON
);
1554 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1555 OPCODE_COMMON_ENABLE_DISABLE_BEACON
, sizeof(*req
));
1557 req
->port_num
= port_num
;
1558 req
->beacon_state
= state
;
1559 req
->beacon_duration
= bcn
;
1560 req
->status_duration
= sts
;
1562 status
= be_mcc_notify_wait(adapter
);
1565 spin_unlock_bh(&adapter
->mcc_lock
);
1570 int be_cmd_get_beacon_state(struct be_adapter
*adapter
, u8 port_num
, u32
*state
)
1572 struct be_mcc_wrb
*wrb
;
1573 struct be_cmd_req_get_beacon_state
*req
;
1576 spin_lock_bh(&adapter
->mcc_lock
);
1578 wrb
= wrb_from_mccq(adapter
);
1583 req
= embedded_payload(wrb
);
1585 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1586 OPCODE_COMMON_GET_BEACON_STATE
);
1588 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1589 OPCODE_COMMON_GET_BEACON_STATE
, sizeof(*req
));
1591 req
->port_num
= port_num
;
1593 status
= be_mcc_notify_wait(adapter
);
1595 struct be_cmd_resp_get_beacon_state
*resp
=
1596 embedded_payload(wrb
);
1597 *state
= resp
->beacon_state
;
1601 spin_unlock_bh(&adapter
->mcc_lock
);
1605 int be_cmd_write_flashrom(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
1606 u32 flash_type
, u32 flash_opcode
, u32 buf_size
)
1608 struct be_mcc_wrb
*wrb
;
1609 struct be_cmd_write_flashrom
*req
;
1613 spin_lock_bh(&adapter
->mcc_lock
);
1614 adapter
->flash_status
= 0;
1616 wrb
= wrb_from_mccq(adapter
);
1622 sge
= nonembedded_sgl(wrb
);
1624 be_wrb_hdr_prepare(wrb
, cmd
->size
, false, 1,
1625 OPCODE_COMMON_WRITE_FLASHROM
);
1626 wrb
->tag1
= CMD_SUBSYSTEM_COMMON
;
1628 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1629 OPCODE_COMMON_WRITE_FLASHROM
, cmd
->size
);
1630 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1631 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1632 sge
->len
= cpu_to_le32(cmd
->size
);
1634 req
->params
.op_type
= cpu_to_le32(flash_type
);
1635 req
->params
.op_code
= cpu_to_le32(flash_opcode
);
1636 req
->params
.data_buf_size
= cpu_to_le32(buf_size
);
1638 be_mcc_notify(adapter
);
1639 spin_unlock_bh(&adapter
->mcc_lock
);
1641 if (!wait_for_completion_timeout(&adapter
->flash_compl
,
1642 msecs_to_jiffies(12000)))
1645 status
= adapter
->flash_status
;
1650 spin_unlock_bh(&adapter
->mcc_lock
);
1654 int be_cmd_get_flash_crc(struct be_adapter
*adapter
, u8
*flashed_crc
,
1657 struct be_mcc_wrb
*wrb
;
1658 struct be_cmd_write_flashrom
*req
;
1661 spin_lock_bh(&adapter
->mcc_lock
);
1663 wrb
= wrb_from_mccq(adapter
);
1668 req
= embedded_payload(wrb
);
1670 be_wrb_hdr_prepare(wrb
, sizeof(*req
)+4, true, 0,
1671 OPCODE_COMMON_READ_FLASHROM
);
1673 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1674 OPCODE_COMMON_READ_FLASHROM
, sizeof(*req
)+4);
1676 req
->params
.op_type
= cpu_to_le32(IMG_TYPE_REDBOOT
);
1677 req
->params
.op_code
= cpu_to_le32(FLASHROM_OPER_REPORT
);
1678 req
->params
.offset
= cpu_to_le32(offset
);
1679 req
->params
.data_buf_size
= cpu_to_le32(0x4);
1681 status
= be_mcc_notify_wait(adapter
);
1683 memcpy(flashed_crc
, req
->params
.data_buf
, 4);
1686 spin_unlock_bh(&adapter
->mcc_lock
);
1690 int be_cmd_enable_magic_wol(struct be_adapter
*adapter
, u8
*mac
,
1691 struct be_dma_mem
*nonemb_cmd
)
1693 struct be_mcc_wrb
*wrb
;
1694 struct be_cmd_req_acpi_wol_magic_config
*req
;
1698 spin_lock_bh(&adapter
->mcc_lock
);
1700 wrb
= wrb_from_mccq(adapter
);
1705 req
= nonemb_cmd
->va
;
1706 sge
= nonembedded_sgl(wrb
);
1708 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1709 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
);
1711 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1712 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
, sizeof(*req
));
1713 memcpy(req
->magic_mac
, mac
, ETH_ALEN
);
1715 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1716 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1717 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1719 status
= be_mcc_notify_wait(adapter
);
1722 spin_unlock_bh(&adapter
->mcc_lock
);
1726 int be_cmd_set_loopback(struct be_adapter
*adapter
, u8 port_num
,
1727 u8 loopback_type
, u8 enable
)
1729 struct be_mcc_wrb
*wrb
;
1730 struct be_cmd_req_set_lmode
*req
;
1733 spin_lock_bh(&adapter
->mcc_lock
);
1735 wrb
= wrb_from_mccq(adapter
);
1741 req
= embedded_payload(wrb
);
1743 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1744 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
);
1746 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
1747 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
,
1750 req
->src_port
= port_num
;
1751 req
->dest_port
= port_num
;
1752 req
->loopback_type
= loopback_type
;
1753 req
->loopback_state
= enable
;
1755 status
= be_mcc_notify_wait(adapter
);
1757 spin_unlock_bh(&adapter
->mcc_lock
);
1761 int be_cmd_loopback_test(struct be_adapter
*adapter
, u32 port_num
,
1762 u32 loopback_type
, u32 pkt_size
, u32 num_pkts
, u64 pattern
)
1764 struct be_mcc_wrb
*wrb
;
1765 struct be_cmd_req_loopback_test
*req
;
1768 spin_lock_bh(&adapter
->mcc_lock
);
1770 wrb
= wrb_from_mccq(adapter
);
1776 req
= embedded_payload(wrb
);
1778 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1779 OPCODE_LOWLEVEL_LOOPBACK_TEST
);
1781 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
1782 OPCODE_LOWLEVEL_LOOPBACK_TEST
, sizeof(*req
));
1783 req
->hdr
.timeout
= cpu_to_le32(4);
1785 req
->pattern
= cpu_to_le64(pattern
);
1786 req
->src_port
= cpu_to_le32(port_num
);
1787 req
->dest_port
= cpu_to_le32(port_num
);
1788 req
->pkt_size
= cpu_to_le32(pkt_size
);
1789 req
->num_pkts
= cpu_to_le32(num_pkts
);
1790 req
->loopback_type
= cpu_to_le32(loopback_type
);
1792 status
= be_mcc_notify_wait(adapter
);
1794 struct be_cmd_resp_loopback_test
*resp
= embedded_payload(wrb
);
1795 status
= le32_to_cpu(resp
->status
);
1799 spin_unlock_bh(&adapter
->mcc_lock
);
1803 int be_cmd_ddr_dma_test(struct be_adapter
*adapter
, u64 pattern
,
1804 u32 byte_cnt
, struct be_dma_mem
*cmd
)
1806 struct be_mcc_wrb
*wrb
;
1807 struct be_cmd_req_ddrdma_test
*req
;
1812 spin_lock_bh(&adapter
->mcc_lock
);
1814 wrb
= wrb_from_mccq(adapter
);
1820 sge
= nonembedded_sgl(wrb
);
1821 be_wrb_hdr_prepare(wrb
, cmd
->size
, false, 1,
1822 OPCODE_LOWLEVEL_HOST_DDR_DMA
);
1823 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
1824 OPCODE_LOWLEVEL_HOST_DDR_DMA
, cmd
->size
);
1826 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1827 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1828 sge
->len
= cpu_to_le32(cmd
->size
);
1830 req
->pattern
= cpu_to_le64(pattern
);
1831 req
->byte_count
= cpu_to_le32(byte_cnt
);
1832 for (i
= 0; i
< byte_cnt
; i
++) {
1833 req
->snd_buff
[i
] = (u8
)(pattern
>> (j
*8));
1839 status
= be_mcc_notify_wait(adapter
);
1842 struct be_cmd_resp_ddrdma_test
*resp
;
1844 if ((memcmp(resp
->rcv_buff
, req
->snd_buff
, byte_cnt
) != 0) ||
1851 spin_unlock_bh(&adapter
->mcc_lock
);
1855 int be_cmd_get_seeprom_data(struct be_adapter
*adapter
,
1856 struct be_dma_mem
*nonemb_cmd
)
1858 struct be_mcc_wrb
*wrb
;
1859 struct be_cmd_req_seeprom_read
*req
;
1863 spin_lock_bh(&adapter
->mcc_lock
);
1865 wrb
= wrb_from_mccq(adapter
);
1870 req
= nonemb_cmd
->va
;
1871 sge
= nonembedded_sgl(wrb
);
1873 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1874 OPCODE_COMMON_SEEPROM_READ
);
1876 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1877 OPCODE_COMMON_SEEPROM_READ
, sizeof(*req
));
1879 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1880 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1881 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1883 status
= be_mcc_notify_wait(adapter
);
1886 spin_unlock_bh(&adapter
->mcc_lock
);
1890 int be_cmd_get_phy_info(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
)
1892 struct be_mcc_wrb
*wrb
;
1893 struct be_cmd_req_get_phy_info
*req
;
1897 spin_lock_bh(&adapter
->mcc_lock
);
1899 wrb
= wrb_from_mccq(adapter
);
1906 sge
= nonembedded_sgl(wrb
);
1908 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1909 OPCODE_COMMON_GET_PHY_DETAILS
);
1911 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1912 OPCODE_COMMON_GET_PHY_DETAILS
,
1915 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1916 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1917 sge
->len
= cpu_to_le32(cmd
->size
);
1919 status
= be_mcc_notify_wait(adapter
);
1921 spin_unlock_bh(&adapter
->mcc_lock
);
1925 int be_cmd_set_qos(struct be_adapter
*adapter
, u32 bps
, u32 domain
)
1927 struct be_mcc_wrb
*wrb
;
1928 struct be_cmd_req_set_qos
*req
;
1931 spin_lock_bh(&adapter
->mcc_lock
);
1933 wrb
= wrb_from_mccq(adapter
);
1939 req
= embedded_payload(wrb
);
1941 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1942 OPCODE_COMMON_SET_QOS
);
1944 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1945 OPCODE_COMMON_SET_QOS
, sizeof(*req
));
1947 req
->hdr
.domain
= domain
;
1948 req
->valid_bits
= cpu_to_le32(BE_QOS_BITS_NIC
);
1949 req
->max_bps_nic
= cpu_to_le32(bps
);
1951 status
= be_mcc_notify_wait(adapter
);
1954 spin_unlock_bh(&adapter
->mcc_lock
);