7790 Want support for XXV710
[unleashed.git] / usr / src / uts / common / io / i40e / core / i40e_adminq.c
blob1485244e7a8393c17e263711c2956afb2e65d51e
1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
4 All rights reserved.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
33 /*$FreeBSD$*/
35 #include "i40e_status.h"
36 #include "i40e_type.h"
37 #include "i40e_register.h"
38 #include "i40e_adminq.h"
39 #include "i40e_prototype.h"
41 /**
42 * i40e_adminq_init_regs - Initialize AdminQ registers
43 * @hw: pointer to the hardware structure
45 * This assumes the alloc_asq and alloc_arq functions have already been called
46 **/
47 static void i40e_adminq_init_regs(struct i40e_hw *hw)
49 /* set head and tail registers in our local struct */
50 if (i40e_is_vf(hw)) {
51 hw->aq.asq.tail = I40E_VF_ATQT1;
52 hw->aq.asq.head = I40E_VF_ATQH1;
53 hw->aq.asq.len = I40E_VF_ATQLEN1;
54 hw->aq.asq.bal = I40E_VF_ATQBAL1;
55 hw->aq.asq.bah = I40E_VF_ATQBAH1;
56 hw->aq.arq.tail = I40E_VF_ARQT1;
57 hw->aq.arq.head = I40E_VF_ARQH1;
58 hw->aq.arq.len = I40E_VF_ARQLEN1;
59 hw->aq.arq.bal = I40E_VF_ARQBAL1;
60 hw->aq.arq.bah = I40E_VF_ARQBAH1;
61 } else {
62 hw->aq.asq.tail = I40E_PF_ATQT;
63 hw->aq.asq.head = I40E_PF_ATQH;
64 hw->aq.asq.len = I40E_PF_ATQLEN;
65 hw->aq.asq.bal = I40E_PF_ATQBAL;
66 hw->aq.asq.bah = I40E_PF_ATQBAH;
67 hw->aq.arq.tail = I40E_PF_ARQT;
68 hw->aq.arq.head = I40E_PF_ARQH;
69 hw->aq.arq.len = I40E_PF_ARQLEN;
70 hw->aq.arq.bal = I40E_PF_ARQBAL;
71 hw->aq.arq.bah = I40E_PF_ARQBAH;
75 /**
76 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
77 * @hw: pointer to the hardware structure
78 **/
79 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
81 enum i40e_status_code ret_code;
83 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
84 i40e_mem_atq_ring,
85 (hw->aq.num_asq_entries *
86 sizeof(struct i40e_aq_desc)),
87 I40E_ADMINQ_DESC_ALIGNMENT);
88 if (ret_code)
89 return ret_code;
91 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
92 (hw->aq.num_asq_entries *
93 sizeof(struct i40e_asq_cmd_details)));
94 if (ret_code) {
95 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
96 return ret_code;
99 return ret_code;
103 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
104 * @hw: pointer to the hardware structure
106 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
108 enum i40e_status_code ret_code;
110 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
111 i40e_mem_arq_ring,
112 (hw->aq.num_arq_entries *
113 sizeof(struct i40e_aq_desc)),
114 I40E_ADMINQ_DESC_ALIGNMENT);
116 return ret_code;
120 * i40e_free_adminq_asq - Free Admin Queue send rings
121 * @hw: pointer to the hardware structure
123 * This assumes the posted send buffers have already been cleaned
124 * and de-allocated
126 void i40e_free_adminq_asq(struct i40e_hw *hw)
128 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
132 * i40e_free_adminq_arq - Free Admin Queue receive rings
133 * @hw: pointer to the hardware structure
135 * This assumes the posted receive buffers have already been cleaned
136 * and de-allocated
138 void i40e_free_adminq_arq(struct i40e_hw *hw)
140 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
144 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
145 * @hw: pointer to the hardware structure
147 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
149 enum i40e_status_code ret_code;
150 struct i40e_aq_desc *desc;
151 struct i40e_dma_mem *bi;
152 int i;
154 /* We'll be allocating the buffer info memory first, then we can
155 * allocate the mapped buffers for the event processing
158 /* buffer_info structures do not need alignment */
159 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
160 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
161 if (ret_code)
162 goto alloc_arq_bufs;
163 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
165 /* allocate the mapped buffers */
166 for (i = 0; i < hw->aq.num_arq_entries; i++) {
167 bi = &hw->aq.arq.r.arq_bi[i];
168 ret_code = i40e_allocate_dma_mem(hw, bi,
169 i40e_mem_arq_buf,
170 hw->aq.arq_buf_size,
171 I40E_ADMINQ_DESC_ALIGNMENT);
172 if (ret_code)
173 goto unwind_alloc_arq_bufs;
175 /* now configure the descriptors for use */
176 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
178 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
179 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
180 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
181 desc->opcode = 0;
182 /* This is in accordance with Admin queue design, there is no
183 * register for buffer size configuration
185 desc->datalen = CPU_TO_LE16((u16)bi->size);
186 desc->retval = 0;
187 desc->cookie_high = 0;
188 desc->cookie_low = 0;
189 desc->params.external.addr_high =
190 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
191 desc->params.external.addr_low =
192 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
193 desc->params.external.param0 = 0;
194 desc->params.external.param1 = 0;
197 alloc_arq_bufs:
198 return ret_code;
200 unwind_alloc_arq_bufs:
201 /* don't try to free the one that failed... */
202 i--;
203 for (; i >= 0; i--)
204 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
205 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
207 return ret_code;
211 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
212 * @hw: pointer to the hardware structure
214 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
216 enum i40e_status_code ret_code;
217 struct i40e_dma_mem *bi;
218 int i;
220 /* No mapped memory needed yet, just the buffer info structures */
221 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
222 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
223 if (ret_code)
224 goto alloc_asq_bufs;
225 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
227 /* allocate the mapped buffers */
228 for (i = 0; i < hw->aq.num_asq_entries; i++) {
229 bi = &hw->aq.asq.r.asq_bi[i];
230 ret_code = i40e_allocate_dma_mem(hw, bi,
231 i40e_mem_asq_buf,
232 hw->aq.asq_buf_size,
233 I40E_ADMINQ_DESC_ALIGNMENT);
234 if (ret_code)
235 goto unwind_alloc_asq_bufs;
237 alloc_asq_bufs:
238 return ret_code;
240 unwind_alloc_asq_bufs:
241 /* don't try to free the one that failed... */
242 i--;
243 for (; i >= 0; i--)
244 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
245 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
247 return ret_code;
251 * i40e_free_arq_bufs - Free receive queue buffer info elements
252 * @hw: pointer to the hardware structure
254 static void i40e_free_arq_bufs(struct i40e_hw *hw)
256 int i;
258 /* free descriptors */
259 for (i = 0; i < hw->aq.num_arq_entries; i++)
260 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
262 /* free the descriptor memory */
263 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
265 /* free the dma header */
266 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
270 * i40e_free_asq_bufs - Free send queue buffer info elements
271 * @hw: pointer to the hardware structure
273 static void i40e_free_asq_bufs(struct i40e_hw *hw)
275 int i;
277 /* only unmap if the address is non-NULL */
278 for (i = 0; i < hw->aq.num_asq_entries; i++)
279 if (hw->aq.asq.r.asq_bi[i].pa)
280 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
282 /* free the buffer info list */
283 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
285 /* free the descriptor memory */
286 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
288 /* free the dma header */
289 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
293 * i40e_config_asq_regs - configure ASQ registers
294 * @hw: pointer to the hardware structure
296 * Configure base address and length registers for the transmit queue
298 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
300 enum i40e_status_code ret_code = I40E_SUCCESS;
301 u32 reg = 0;
303 /* Clear Head and Tail */
304 wr32(hw, hw->aq.asq.head, 0);
305 wr32(hw, hw->aq.asq.tail, 0);
307 /* set starting point */
308 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
309 I40E_PF_ATQLEN_ATQENABLE_MASK));
310 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
311 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
313 /* Check one register to verify that config was applied */
314 reg = rd32(hw, hw->aq.asq.bal);
315 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
316 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
318 return ret_code;
322 * i40e_config_arq_regs - ARQ register configuration
323 * @hw: pointer to the hardware structure
325 * Configure base address and length registers for the receive (event queue)
327 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
329 enum i40e_status_code ret_code = I40E_SUCCESS;
330 u32 reg = 0;
332 /* Clear Head and Tail */
333 wr32(hw, hw->aq.arq.head, 0);
334 wr32(hw, hw->aq.arq.tail, 0);
336 /* set starting point */
337 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
338 I40E_PF_ARQLEN_ARQENABLE_MASK));
339 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
340 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
342 /* Update tail in the HW to post pre-allocated buffers */
343 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
345 /* Check one register to verify that config was applied */
346 reg = rd32(hw, hw->aq.arq.bal);
347 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
348 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
350 return ret_code;
354 * i40e_init_asq - main initialization routine for ASQ
355 * @hw: pointer to the hardware structure
357 * This is the main initialization routine for the Admin Send Queue
358 * Prior to calling this function, drivers *MUST* set the following fields
359 * in the hw->aq structure:
360 * - hw->aq.num_asq_entries
361 * - hw->aq.arq_buf_size
363 * Do *NOT* hold the lock when calling this as the memory allocation routines
364 * called are not going to be atomic context safe
366 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
368 enum i40e_status_code ret_code = I40E_SUCCESS;
370 if (hw->aq.asq.count > 0) {
371 /* queue already initialized */
372 ret_code = I40E_ERR_NOT_READY;
373 goto init_adminq_exit;
376 /* verify input for valid configuration */
377 if ((hw->aq.num_asq_entries == 0) ||
378 (hw->aq.asq_buf_size == 0)) {
379 ret_code = I40E_ERR_CONFIG;
380 goto init_adminq_exit;
383 hw->aq.asq.next_to_use = 0;
384 hw->aq.asq.next_to_clean = 0;
386 /* allocate the ring memory */
387 ret_code = i40e_alloc_adminq_asq_ring(hw);
388 if (ret_code != I40E_SUCCESS)
389 goto init_adminq_exit;
391 /* allocate buffers in the rings */
392 ret_code = i40e_alloc_asq_bufs(hw);
393 if (ret_code != I40E_SUCCESS)
394 goto init_adminq_free_rings;
396 /* initialize base registers */
397 ret_code = i40e_config_asq_regs(hw);
398 if (ret_code != I40E_SUCCESS)
399 goto init_adminq_free_rings;
401 /* success! */
402 hw->aq.asq.count = hw->aq.num_asq_entries;
403 goto init_adminq_exit;
405 init_adminq_free_rings:
406 i40e_free_adminq_asq(hw);
408 init_adminq_exit:
409 return ret_code;
413 * i40e_init_arq - initialize ARQ
414 * @hw: pointer to the hardware structure
416 * The main initialization routine for the Admin Receive (Event) Queue.
417 * Prior to calling this function, drivers *MUST* set the following fields
418 * in the hw->aq structure:
419 * - hw->aq.num_asq_entries
420 * - hw->aq.arq_buf_size
422 * Do *NOT* hold the lock when calling this as the memory allocation routines
423 * called are not going to be atomic context safe
425 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
427 enum i40e_status_code ret_code = I40E_SUCCESS;
429 if (hw->aq.arq.count > 0) {
430 /* queue already initialized */
431 ret_code = I40E_ERR_NOT_READY;
432 goto init_adminq_exit;
435 /* verify input for valid configuration */
436 if ((hw->aq.num_arq_entries == 0) ||
437 (hw->aq.arq_buf_size == 0)) {
438 ret_code = I40E_ERR_CONFIG;
439 goto init_adminq_exit;
442 hw->aq.arq.next_to_use = 0;
443 hw->aq.arq.next_to_clean = 0;
445 /* allocate the ring memory */
446 ret_code = i40e_alloc_adminq_arq_ring(hw);
447 if (ret_code != I40E_SUCCESS)
448 goto init_adminq_exit;
450 /* allocate buffers in the rings */
451 ret_code = i40e_alloc_arq_bufs(hw);
452 if (ret_code != I40E_SUCCESS)
453 goto init_adminq_free_rings;
455 /* initialize base registers */
456 ret_code = i40e_config_arq_regs(hw);
457 if (ret_code != I40E_SUCCESS)
458 goto init_adminq_free_rings;
460 /* success! */
461 hw->aq.arq.count = hw->aq.num_arq_entries;
462 goto init_adminq_exit;
464 init_adminq_free_rings:
465 i40e_free_adminq_arq(hw);
467 init_adminq_exit:
468 return ret_code;
472 * i40e_shutdown_asq - shutdown the ASQ
473 * @hw: pointer to the hardware structure
475 * The main shutdown routine for the Admin Send Queue
477 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
479 enum i40e_status_code ret_code = I40E_SUCCESS;
481 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
483 if (hw->aq.asq.count == 0) {
484 ret_code = I40E_ERR_NOT_READY;
485 goto shutdown_asq_out;
488 /* Stop firmware AdminQ processing */
489 wr32(hw, hw->aq.asq.head, 0);
490 wr32(hw, hw->aq.asq.tail, 0);
491 wr32(hw, hw->aq.asq.len, 0);
492 wr32(hw, hw->aq.asq.bal, 0);
493 wr32(hw, hw->aq.asq.bah, 0);
495 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
497 /* free ring buffers */
498 i40e_free_asq_bufs(hw);
500 shutdown_asq_out:
501 i40e_release_spinlock(&hw->aq.asq_spinlock);
502 return ret_code;
506 * i40e_shutdown_arq - shutdown ARQ
507 * @hw: pointer to the hardware structure
509 * The main shutdown routine for the Admin Receive Queue
511 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
513 enum i40e_status_code ret_code = I40E_SUCCESS;
515 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
517 if (hw->aq.arq.count == 0) {
518 ret_code = I40E_ERR_NOT_READY;
519 goto shutdown_arq_out;
522 /* Stop firmware AdminQ processing */
523 wr32(hw, hw->aq.arq.head, 0);
524 wr32(hw, hw->aq.arq.tail, 0);
525 wr32(hw, hw->aq.arq.len, 0);
526 wr32(hw, hw->aq.arq.bal, 0);
527 wr32(hw, hw->aq.arq.bah, 0);
529 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
531 /* free ring buffers */
532 i40e_free_arq_bufs(hw);
534 shutdown_arq_out:
535 i40e_release_spinlock(&hw->aq.arq_spinlock);
536 return ret_code;
540 * i40e_resume_aq - resume AQ processing from 0
541 * @hw: pointer to the hardware structure
543 static void i40e_resume_aq(struct i40e_hw *hw)
545 /* Registers are reset after PF reset */
546 hw->aq.asq.next_to_use = 0;
547 hw->aq.asq.next_to_clean = 0;
549 i40e_config_asq_regs(hw);
551 hw->aq.arq.next_to_use = 0;
552 hw->aq.arq.next_to_clean = 0;
554 i40e_config_arq_regs(hw);
558 * i40e_init_adminq - main initialization routine for Admin Queue
559 * @hw: pointer to the hardware structure
561 * Prior to calling this function, drivers *MUST* set the following fields
562 * in the hw->aq structure:
563 * - hw->aq.num_asq_entries
564 * - hw->aq.num_arq_entries
565 * - hw->aq.arq_buf_size
566 * - hw->aq.asq_buf_size
568 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
570 u16 cfg_ptr, oem_hi, oem_lo;
571 u16 eetrack_lo, eetrack_hi;
572 enum i40e_status_code ret_code;
573 int retry = 0;
575 /* verify input for valid configuration */
576 if ((hw->aq.num_arq_entries == 0) ||
577 (hw->aq.num_asq_entries == 0) ||
578 (hw->aq.arq_buf_size == 0) ||
579 (hw->aq.asq_buf_size == 0)) {
580 ret_code = I40E_ERR_CONFIG;
581 goto init_adminq_exit;
583 i40e_init_spinlock(&hw->aq.asq_spinlock);
584 i40e_init_spinlock(&hw->aq.arq_spinlock);
586 /* Set up register offsets */
587 i40e_adminq_init_regs(hw);
589 /* setup ASQ command write back timeout */
590 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
592 /* allocate the ASQ */
593 ret_code = i40e_init_asq(hw);
594 if (ret_code != I40E_SUCCESS)
595 goto init_adminq_destroy_spinlocks;
597 /* allocate the ARQ */
598 ret_code = i40e_init_arq(hw);
599 if (ret_code != I40E_SUCCESS)
600 goto init_adminq_free_asq;
602 /* There are some cases where the firmware may not be quite ready
603 * for AdminQ operations, so we retry the AdminQ setup a few times
604 * if we see timeouts in this first AQ call.
606 do {
607 ret_code = i40e_aq_get_firmware_version(hw,
608 &hw->aq.fw_maj_ver,
609 &hw->aq.fw_min_ver,
610 &hw->aq.fw_build,
611 &hw->aq.api_maj_ver,
612 &hw->aq.api_min_ver,
613 NULL);
614 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
615 break;
616 retry++;
617 i40e_msec_delay(100);
618 i40e_resume_aq(hw);
619 } while (retry < 10);
620 if (ret_code != I40E_SUCCESS)
621 goto init_adminq_free_arq;
623 /* get the NVM version info */
624 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
625 &hw->nvm.version);
626 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
627 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
628 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
629 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
630 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
631 &oem_hi);
632 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
633 &oem_lo);
634 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
636 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
637 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
638 goto init_adminq_free_arq;
641 /* pre-emptive resource lock release */
642 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
643 hw->nvm_release_on_done = FALSE;
644 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
646 ret_code = I40E_SUCCESS;
648 /* success! */
649 goto init_adminq_exit;
651 init_adminq_free_arq:
652 i40e_shutdown_arq(hw);
653 init_adminq_free_asq:
654 i40e_shutdown_asq(hw);
655 init_adminq_destroy_spinlocks:
656 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
657 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
659 init_adminq_exit:
660 return ret_code;
664 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
665 * @hw: pointer to the hardware structure
667 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
669 enum i40e_status_code ret_code = I40E_SUCCESS;
671 if (i40e_check_asq_alive(hw))
672 i40e_aq_queue_shutdown(hw, TRUE);
674 i40e_shutdown_asq(hw);
675 i40e_shutdown_arq(hw);
676 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
677 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
679 if (hw->nvm_buff.va)
680 i40e_free_virt_mem(hw, &hw->nvm_buff);
682 return ret_code;
686 * i40e_clean_asq - cleans Admin send queue
687 * @hw: pointer to the hardware structure
689 * returns the number of free desc
691 u16 i40e_clean_asq(struct i40e_hw *hw)
693 struct i40e_adminq_ring *asq = &(hw->aq.asq);
694 struct i40e_asq_cmd_details *details;
695 u16 ntc = asq->next_to_clean;
696 struct i40e_aq_desc desc_cb;
697 struct i40e_aq_desc *desc;
699 desc = I40E_ADMINQ_DESC(*asq, ntc);
700 details = I40E_ADMINQ_DETAILS(*asq, ntc);
701 while (rd32(hw, hw->aq.asq.head) != ntc) {
702 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
703 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
705 if (details->callback) {
706 I40E_ADMINQ_CALLBACK cb_func =
707 (I40E_ADMINQ_CALLBACK)details->callback;
708 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
709 I40E_DMA_TO_DMA);
710 cb_func(hw, &desc_cb);
712 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
713 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
714 ntc++;
715 if (ntc == asq->count)
716 ntc = 0;
717 desc = I40E_ADMINQ_DESC(*asq, ntc);
718 details = I40E_ADMINQ_DETAILS(*asq, ntc);
721 asq->next_to_clean = ntc;
723 return I40E_DESC_UNUSED(asq);
727 * i40e_asq_done - check if FW has processed the Admin Send Queue
728 * @hw: pointer to the hw struct
730 * Returns TRUE if the firmware has processed all descriptors on the
731 * admin send queue. Returns FALSE if there are still requests pending.
733 static bool i40e_asq_done(struct i40e_hw *hw)
735 /* AQ designers suggest use of head for better
736 * timing reliability than DD bit
738 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
743 * i40e_asq_send_command - send command to Admin Queue
744 * @hw: pointer to the hw struct
745 * @desc: prefilled descriptor describing the command (non DMA mem)
746 * @buff: buffer to use for indirect commands
747 * @buff_size: size of buffer for indirect commands
748 * @cmd_details: pointer to command details structure
750 * This is the main send command driver routine for the Admin Queue send
751 * queue. It runs the queue, cleans the queue, etc
753 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
754 struct i40e_aq_desc *desc,
755 void *buff, /* can be NULL */
756 u16 buff_size,
757 struct i40e_asq_cmd_details *cmd_details)
759 enum i40e_status_code status = I40E_SUCCESS;
760 struct i40e_dma_mem *dma_buff = NULL;
761 struct i40e_asq_cmd_details *details;
762 struct i40e_aq_desc *desc_on_ring;
763 bool cmd_completed = FALSE;
764 u16 retval = 0;
765 u32 val = 0;
767 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
769 hw->aq.asq_last_status = I40E_AQ_RC_OK;
771 if (hw->aq.asq.count == 0) {
772 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
773 "AQTX: Admin queue not initialized.\n");
774 status = I40E_ERR_QUEUE_EMPTY;
775 goto asq_send_command_error;
778 val = rd32(hw, hw->aq.asq.head);
779 if (val >= hw->aq.num_asq_entries) {
780 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
781 "AQTX: head overrun at %d\n", val);
782 status = I40E_ERR_QUEUE_EMPTY;
783 goto asq_send_command_error;
786 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
787 if (cmd_details) {
788 i40e_memcpy(details,
789 cmd_details,
790 sizeof(struct i40e_asq_cmd_details),
791 I40E_NONDMA_TO_NONDMA);
793 /* If the cmd_details are defined copy the cookie. The
794 * CPU_TO_LE32 is not needed here because the data is ignored
795 * by the FW, only used by the driver
797 if (details->cookie) {
798 desc->cookie_high =
799 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
800 desc->cookie_low =
801 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
803 } else {
804 i40e_memset(details, 0,
805 sizeof(struct i40e_asq_cmd_details),
806 I40E_NONDMA_MEM);
809 /* clear requested flags and then set additional flags if defined */
810 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
811 desc->flags |= CPU_TO_LE16(details->flags_ena);
813 if (buff_size > hw->aq.asq_buf_size) {
814 i40e_debug(hw,
815 I40E_DEBUG_AQ_MESSAGE,
816 "AQTX: Invalid buffer size: %d.\n",
817 buff_size);
818 status = I40E_ERR_INVALID_SIZE;
819 goto asq_send_command_error;
822 if (details->postpone && !details->async) {
823 i40e_debug(hw,
824 I40E_DEBUG_AQ_MESSAGE,
825 "AQTX: Async flag not set along with postpone flag");
826 status = I40E_ERR_PARAM;
827 goto asq_send_command_error;
830 /* call clean and check queue available function to reclaim the
831 * descriptors that were processed by FW, the function returns the
832 * number of desc available
834 /* the clean function called here could be called in a separate thread
835 * in case of asynchronous completions
837 if (i40e_clean_asq(hw) == 0) {
838 i40e_debug(hw,
839 I40E_DEBUG_AQ_MESSAGE,
840 "AQTX: Error queue is full.\n");
841 status = I40E_ERR_ADMIN_QUEUE_FULL;
842 goto asq_send_command_error;
845 /* initialize the temp desc pointer with the right desc */
846 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
848 /* if the desc is available copy the temp desc to the right place */
849 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
850 I40E_NONDMA_TO_DMA);
852 /* if buff is not NULL assume indirect command */
853 if (buff != NULL) {
854 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
855 /* copy the user buff into the respective DMA buff */
856 i40e_memcpy(dma_buff->va, buff, buff_size,
857 I40E_NONDMA_TO_DMA);
858 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
860 /* Update the address values in the desc with the pa value
861 * for respective buffer
863 desc_on_ring->params.external.addr_high =
864 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
865 desc_on_ring->params.external.addr_low =
866 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
869 /* bump the tail */
870 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
871 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
872 buff, buff_size);
873 (hw->aq.asq.next_to_use)++;
874 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
875 hw->aq.asq.next_to_use = 0;
876 if (!details->postpone)
877 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
879 /* if cmd_details are not defined or async flag is not set,
880 * we need to wait for desc write back
882 if (!details->async && !details->postpone) {
883 u32 total_delay = 0;
885 do {
886 /* AQ designers suggest use of head for better
887 * timing reliability than DD bit
889 if (i40e_asq_done(hw))
890 break;
891 i40e_msec_delay(1);
892 total_delay++;
893 } while (total_delay < hw->aq.asq_cmd_timeout);
896 /* if ready, copy the desc back to temp */
897 if (i40e_asq_done(hw)) {
898 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
899 I40E_DMA_TO_NONDMA);
900 if (buff != NULL)
901 i40e_memcpy(buff, dma_buff->va, buff_size,
902 I40E_DMA_TO_NONDMA);
903 retval = LE16_TO_CPU(desc->retval);
904 if (retval != 0) {
905 i40e_debug(hw,
906 I40E_DEBUG_AQ_MESSAGE,
907 "AQTX: Command completed with error 0x%X.\n",
908 retval);
910 /* strip off FW internal code */
911 retval &= 0xff;
913 cmd_completed = TRUE;
914 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
915 status = I40E_SUCCESS;
916 else
917 status = I40E_ERR_ADMIN_QUEUE_ERROR;
918 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
921 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
922 "AQTX: desc and buffer writeback:\n");
923 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
925 /* save writeback aq if requested */
926 if (details->wb_desc)
927 i40e_memcpy(details->wb_desc, desc_on_ring,
928 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
930 /* update the error if time out occurred */
931 if ((!cmd_completed) &&
932 (!details->async && !details->postpone)) {
933 i40e_debug(hw,
934 I40E_DEBUG_AQ_MESSAGE,
935 "AQTX: Writeback timeout.\n");
936 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
939 asq_send_command_error:
940 i40e_release_spinlock(&hw->aq.asq_spinlock);
941 return status;
945 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
946 * @desc: pointer to the temp descriptor (non DMA mem)
947 * @opcode: the opcode can be used to decide which flags to turn off or on
949 * Fill the desc with default values
951 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
952 u16 opcode)
954 /* zero out the desc */
955 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
956 I40E_NONDMA_MEM);
957 desc->opcode = CPU_TO_LE16(opcode);
958 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
962 * i40e_clean_arq_element
963 * @hw: pointer to the hw struct
964 * @e: event info from the receive descriptor, includes any buffers
965 * @pending: number of events that could be left to process
967 * This function cleans one Admin Receive Queue element and returns
968 * the contents through e. It can also return how many events are
969 * left to process through 'pending'
971 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
972 struct i40e_arq_event_info *e,
973 u16 *pending)
975 enum i40e_status_code ret_code = I40E_SUCCESS;
976 u16 ntc = hw->aq.arq.next_to_clean;
977 struct i40e_aq_desc *desc;
978 struct i40e_dma_mem *bi;
979 u16 desc_idx;
980 u16 datalen;
981 u16 flags;
982 u16 ntu;
984 /* pre-clean the event info */
985 i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
987 /* take the lock before we start messing with the ring */
988 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
990 if (hw->aq.arq.count == 0) {
991 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
992 "AQRX: Admin queue not initialized.\n");
993 ret_code = I40E_ERR_QUEUE_EMPTY;
994 goto clean_arq_element_err;
997 /* set next_to_use to head */
998 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
999 if (ntu == ntc) {
1000 /* nothing to do - shouldn't need to update ring's values */
1001 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1002 goto clean_arq_element_out;
1005 /* now clean the next descriptor */
1006 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1007 desc_idx = ntc;
1009 hw->aq.arq_last_status =
1010 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1011 flags = LE16_TO_CPU(desc->flags);
1012 if (flags & I40E_AQ_FLAG_ERR) {
1013 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1014 i40e_debug(hw,
1015 I40E_DEBUG_AQ_MESSAGE,
1016 "AQRX: Event received with error 0x%X.\n",
1017 hw->aq.arq_last_status);
1020 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1021 I40E_DMA_TO_NONDMA);
1022 datalen = LE16_TO_CPU(desc->datalen);
1023 e->msg_len = min(datalen, e->buf_len);
1024 if (e->msg_buf != NULL && (e->msg_len != 0))
1025 i40e_memcpy(e->msg_buf,
1026 hw->aq.arq.r.arq_bi[desc_idx].va,
1027 e->msg_len, I40E_DMA_TO_NONDMA);
1029 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1030 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1031 hw->aq.arq_buf_size);
1033 /* Restore the original datalen and buffer address in the desc,
1034 * FW updates datalen to indicate the event message
1035 * size
1037 bi = &hw->aq.arq.r.arq_bi[ntc];
1038 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1040 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1041 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1042 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1043 desc->datalen = CPU_TO_LE16((u16)bi->size);
1044 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1045 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1047 /* set tail = the last cleaned desc index. */
1048 wr32(hw, hw->aq.arq.tail, ntc);
1049 /* ntc is updated to tail + 1 */
1050 ntc++;
1051 if (ntc == hw->aq.num_arq_entries)
1052 ntc = 0;
1053 hw->aq.arq.next_to_clean = ntc;
1054 hw->aq.arq.next_to_use = ntu;
1056 i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
1057 clean_arq_element_out:
1058 /* Set pending if needed, unlock and return */
1059 if (pending != NULL)
1060 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1061 clean_arq_element_err:
1062 i40e_release_spinlock(&hw->aq.arq_spinlock);
1064 return ret_code;