1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
41 * i40e_adminq_init_regs - Initialize AdminQ registers
42 * @hw: pointer to the hardware structure
44 * This assumes the alloc_asq and alloc_arq functions have already been called
46 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
48 /* set head and tail registers in our local struct */
50 hw->aq.asq.tail = I40E_VF_ATQT1;
51 hw->aq.asq.head = I40E_VF_ATQH1;
52 hw->aq.asq.len = I40E_VF_ATQLEN1;
53 hw->aq.asq.bal = I40E_VF_ATQBAL1;
54 hw->aq.asq.bah = I40E_VF_ATQBAH1;
55 hw->aq.arq.tail = I40E_VF_ARQT1;
56 hw->aq.arq.head = I40E_VF_ARQH1;
57 hw->aq.arq.len = I40E_VF_ARQLEN1;
58 hw->aq.arq.bal = I40E_VF_ARQBAL1;
59 hw->aq.arq.bah = I40E_VF_ARQBAH1;
62 hw->aq.asq.tail = I40E_PF_ATQT;
63 hw->aq.asq.head = I40E_PF_ATQH;
64 hw->aq.asq.len = I40E_PF_ATQLEN;
65 hw->aq.asq.bal = I40E_PF_ATQBAL;
66 hw->aq.asq.bah = I40E_PF_ATQBAH;
67 hw->aq.arq.tail = I40E_PF_ARQT;
68 hw->aq.arq.head = I40E_PF_ARQH;
69 hw->aq.arq.len = I40E_PF_ARQLEN;
70 hw->aq.arq.bal = I40E_PF_ARQBAL;
71 hw->aq.arq.bah = I40E_PF_ARQBAH;
77 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
78 * @hw: pointer to the hardware structure
80 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
82 enum i40e_status_code ret_code;
84 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
86 (hw->aq.num_asq_entries *
87 sizeof(struct i40e_aq_desc)),
88 I40E_ADMINQ_DESC_ALIGNMENT);
92 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
93 (hw->aq.num_asq_entries *
94 sizeof(struct i40e_asq_cmd_details)));
96 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
104 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
105 * @hw: pointer to the hardware structure
107 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
109 enum i40e_status_code ret_code;
111 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
113 (hw->aq.num_arq_entries *
114 sizeof(struct i40e_aq_desc)),
115 I40E_ADMINQ_DESC_ALIGNMENT);
121 * i40e_free_adminq_asq - Free Admin Queue send rings
122 * @hw: pointer to the hardware structure
124 * This assumes the posted send buffers have already been cleaned
127 void i40e_free_adminq_asq(struct i40e_hw *hw)
129 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
130 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
134 * i40e_free_adminq_arq - Free Admin Queue receive rings
135 * @hw: pointer to the hardware structure
137 * This assumes the posted receive buffers have already been cleaned
140 void i40e_free_adminq_arq(struct i40e_hw *hw)
142 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
146 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
147 * @hw: pointer to the hardware structure
149 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
151 enum i40e_status_code ret_code;
152 struct i40e_aq_desc *desc;
153 struct i40e_dma_mem *bi;
156 /* We'll be allocating the buffer info memory first, then we can
157 * allocate the mapped buffers for the event processing
160 /* buffer_info structures do not need alignment */
161 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
162 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
165 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
167 /* allocate the mapped buffers */
168 for (i = 0; i < hw->aq.num_arq_entries; i++) {
169 bi = &hw->aq.arq.r.arq_bi[i];
170 ret_code = i40e_allocate_dma_mem(hw, bi,
173 I40E_ADMINQ_DESC_ALIGNMENT);
175 goto unwind_alloc_arq_bufs;
177 /* now configure the descriptors for use */
178 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
180 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
181 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
182 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
184 /* This is in accordance with Admin queue design, there is no
185 * register for buffer size configuration
187 desc->datalen = CPU_TO_LE16((u16)bi->size);
189 desc->cookie_high = 0;
190 desc->cookie_low = 0;
191 desc->params.external.addr_high =
192 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
193 desc->params.external.addr_low =
194 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
195 desc->params.external.param0 = 0;
196 desc->params.external.param1 = 0;
202 unwind_alloc_arq_bufs:
203 /* don't try to free the one that failed... */
206 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
207 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
213 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
214 * @hw: pointer to the hardware structure
216 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
218 enum i40e_status_code ret_code;
219 struct i40e_dma_mem *bi;
222 /* No mapped memory needed yet, just the buffer info structures */
223 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
224 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
227 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
229 /* allocate the mapped buffers */
230 for (i = 0; i < hw->aq.num_asq_entries; i++) {
231 bi = &hw->aq.asq.r.asq_bi[i];
232 ret_code = i40e_allocate_dma_mem(hw, bi,
235 I40E_ADMINQ_DESC_ALIGNMENT);
237 goto unwind_alloc_asq_bufs;
242 unwind_alloc_asq_bufs:
243 /* don't try to free the one that failed... */
246 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
247 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
253 * i40e_free_arq_bufs - Free receive queue buffer info elements
254 * @hw: pointer to the hardware structure
256 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
260 /* free descriptors */
261 for (i = 0; i < hw->aq.num_arq_entries; i++)
262 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
264 /* free the descriptor memory */
265 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
267 /* free the dma header */
268 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
272 * i40e_free_asq_bufs - Free send queue buffer info elements
273 * @hw: pointer to the hardware structure
275 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
279 /* only unmap if the address is non-NULL */
280 for (i = 0; i < hw->aq.num_asq_entries; i++)
281 if (hw->aq.asq.r.asq_bi[i].pa)
282 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
284 /* free the buffer info list */
285 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
287 /* free the descriptor memory */
288 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
290 /* free the dma header */
291 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
295 * i40e_config_asq_regs - configure ASQ registers
296 * @hw: pointer to the hardware structure
298 * Configure base address and length registers for the transmit queue
300 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
302 enum i40e_status_code ret_code = I40E_SUCCESS;
305 /* Clear Head and Tail */
306 wr32(hw, hw->aq.asq.head, 0);
307 wr32(hw, hw->aq.asq.tail, 0);
309 /* set starting point */
313 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
314 I40E_PF_ATQLEN_ATQENABLE_MASK));
316 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
317 I40E_PF_ATQLEN_ATQENABLE_MASK));
318 #endif /* INTEGRATED_VF */
319 #endif /* PF_DRIVER */
323 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
324 I40E_VF_ATQLEN1_ATQENABLE_MASK));
326 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
327 I40E_VF_ATQLEN1_ATQENABLE_MASK));
328 #endif /* INTEGRATED_VF */
329 #endif /* VF_DRIVER */
330 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
331 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
333 /* Check one register to verify that config was applied */
334 reg = rd32(hw, hw->aq.asq.bal);
335 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
336 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
342 * i40e_config_arq_regs - ARQ register configuration
343 * @hw: pointer to the hardware structure
345 * Configure base address and length registers for the receive (event queue)
347 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
349 enum i40e_status_code ret_code = I40E_SUCCESS;
352 /* Clear Head and Tail */
353 wr32(hw, hw->aq.arq.head, 0);
354 wr32(hw, hw->aq.arq.tail, 0);
356 /* set starting point */
360 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
361 I40E_PF_ARQLEN_ARQENABLE_MASK));
363 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
364 I40E_PF_ARQLEN_ARQENABLE_MASK));
365 #endif /* INTEGRATED_VF */
366 #endif /* PF_DRIVER */
370 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
371 I40E_VF_ARQLEN1_ARQENABLE_MASK));
373 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
374 I40E_VF_ARQLEN1_ARQENABLE_MASK));
375 #endif /* INTEGRATED_VF */
376 #endif /* VF_DRIVER */
377 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
378 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
380 /* Update tail in the HW to post pre-allocated buffers */
381 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
383 /* Check one register to verify that config was applied */
384 reg = rd32(hw, hw->aq.arq.bal);
385 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
386 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
392 * i40e_init_asq - main initialization routine for ASQ
393 * @hw: pointer to the hardware structure
395 * This is the main initialization routine for the Admin Send Queue
396 * Prior to calling this function, drivers *MUST* set the following fields
397 * in the hw->aq structure:
398 * - hw->aq.num_asq_entries
399 * - hw->aq.arq_buf_size
401 * Do *NOT* hold the lock when calling this as the memory allocation routines
402 * called are not going to be atomic context safe
404 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
406 enum i40e_status_code ret_code = I40E_SUCCESS;
408 if (hw->aq.asq.count > 0) {
409 /* queue already initialized */
410 ret_code = I40E_ERR_NOT_READY;
411 goto init_adminq_exit;
414 /* verify input for valid configuration */
415 if ((hw->aq.num_asq_entries == 0) ||
416 (hw->aq.asq_buf_size == 0)) {
417 ret_code = I40E_ERR_CONFIG;
418 goto init_adminq_exit;
421 hw->aq.asq.next_to_use = 0;
422 hw->aq.asq.next_to_clean = 0;
424 /* allocate the ring memory */
425 ret_code = i40e_alloc_adminq_asq_ring(hw);
426 if (ret_code != I40E_SUCCESS)
427 goto init_adminq_exit;
429 /* allocate buffers in the rings */
430 ret_code = i40e_alloc_asq_bufs(hw);
431 if (ret_code != I40E_SUCCESS)
432 goto init_adminq_free_rings;
434 /* initialize base registers */
435 ret_code = i40e_config_asq_regs(hw);
436 if (ret_code != I40E_SUCCESS)
437 goto init_config_regs;
440 hw->aq.asq.count = hw->aq.num_asq_entries;
441 goto init_adminq_exit;
443 init_adminq_free_rings:
444 i40e_free_adminq_asq(hw);
448 i40e_free_asq_bufs(hw);
455 * i40e_init_arq - initialize ARQ
456 * @hw: pointer to the hardware structure
458 * The main initialization routine for the Admin Receive (Event) Queue.
459 * Prior to calling this function, drivers *MUST* set the following fields
460 * in the hw->aq structure:
461 * - hw->aq.num_asq_entries
462 * - hw->aq.arq_buf_size
464 * Do *NOT* hold the lock when calling this as the memory allocation routines
465 * called are not going to be atomic context safe
467 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
469 enum i40e_status_code ret_code = I40E_SUCCESS;
471 if (hw->aq.arq.count > 0) {
472 /* queue already initialized */
473 ret_code = I40E_ERR_NOT_READY;
474 goto init_adminq_exit;
477 /* verify input for valid configuration */
478 if ((hw->aq.num_arq_entries == 0) ||
479 (hw->aq.arq_buf_size == 0)) {
480 ret_code = I40E_ERR_CONFIG;
481 goto init_adminq_exit;
484 hw->aq.arq.next_to_use = 0;
485 hw->aq.arq.next_to_clean = 0;
487 /* allocate the ring memory */
488 ret_code = i40e_alloc_adminq_arq_ring(hw);
489 if (ret_code != I40E_SUCCESS)
490 goto init_adminq_exit;
492 /* allocate buffers in the rings */
493 ret_code = i40e_alloc_arq_bufs(hw);
494 if (ret_code != I40E_SUCCESS)
495 goto init_adminq_free_rings;
497 /* initialize base registers */
498 ret_code = i40e_config_arq_regs(hw);
499 if (ret_code != I40E_SUCCESS)
500 goto init_adminq_free_rings;
503 hw->aq.arq.count = hw->aq.num_arq_entries;
504 goto init_adminq_exit;
506 init_adminq_free_rings:
507 i40e_free_adminq_arq(hw);
514 * i40e_shutdown_asq - shutdown the ASQ
515 * @hw: pointer to the hardware structure
517 * The main shutdown routine for the Admin Send Queue
519 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
521 enum i40e_status_code ret_code = I40E_SUCCESS;
523 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
525 if (hw->aq.asq.count == 0) {
526 ret_code = I40E_ERR_NOT_READY;
527 goto shutdown_asq_out;
530 /* Stop firmware AdminQ processing */
531 wr32(hw, hw->aq.asq.head, 0);
532 wr32(hw, hw->aq.asq.tail, 0);
533 wr32(hw, hw->aq.asq.len, 0);
534 wr32(hw, hw->aq.asq.bal, 0);
535 wr32(hw, hw->aq.asq.bah, 0);
537 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
539 /* free ring buffers */
540 i40e_free_asq_bufs(hw);
543 i40e_release_spinlock(&hw->aq.asq_spinlock);
548 * i40e_shutdown_arq - shutdown ARQ
549 * @hw: pointer to the hardware structure
551 * The main shutdown routine for the Admin Receive Queue
553 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
555 enum i40e_status_code ret_code = I40E_SUCCESS;
557 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
559 if (hw->aq.arq.count == 0) {
560 ret_code = I40E_ERR_NOT_READY;
561 goto shutdown_arq_out;
564 /* Stop firmware AdminQ processing */
565 wr32(hw, hw->aq.arq.head, 0);
566 wr32(hw, hw->aq.arq.tail, 0);
567 wr32(hw, hw->aq.arq.len, 0);
568 wr32(hw, hw->aq.arq.bal, 0);
569 wr32(hw, hw->aq.arq.bah, 0);
571 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
573 /* free ring buffers */
574 i40e_free_arq_bufs(hw);
577 i40e_release_spinlock(&hw->aq.arq_spinlock);
583 * i40e_resume_aq - resume AQ processing from 0
584 * @hw: pointer to the hardware structure
586 STATIC void i40e_resume_aq(struct i40e_hw *hw)
588 /* Registers are reset after PF reset */
589 hw->aq.asq.next_to_use = 0;
590 hw->aq.asq.next_to_clean = 0;
592 i40e_config_asq_regs(hw);
594 hw->aq.arq.next_to_use = 0;
595 hw->aq.arq.next_to_clean = 0;
597 i40e_config_arq_regs(hw);
599 #endif /* PF_DRIVER */
602 * i40e_init_adminq - main initialization routine for Admin Queue
603 * @hw: pointer to the hardware structure
605 * Prior to calling this function, drivers *MUST* set the following fields
606 * in the hw->aq structure:
607 * - hw->aq.num_asq_entries
608 * - hw->aq.num_arq_entries
609 * - hw->aq.arq_buf_size
610 * - hw->aq.asq_buf_size
612 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
615 u16 cfg_ptr, oem_hi, oem_lo;
616 u16 eetrack_lo, eetrack_hi;
618 enum i40e_status_code ret_code;
623 /* verify input for valid configuration */
624 if ((hw->aq.num_arq_entries == 0) ||
625 (hw->aq.num_asq_entries == 0) ||
626 (hw->aq.arq_buf_size == 0) ||
627 (hw->aq.asq_buf_size == 0)) {
628 ret_code = I40E_ERR_CONFIG;
629 goto init_adminq_exit;
631 i40e_init_spinlock(&hw->aq.asq_spinlock);
632 i40e_init_spinlock(&hw->aq.arq_spinlock);
634 /* Set up register offsets */
635 i40e_adminq_init_regs(hw);
637 /* setup ASQ command write back timeout */
638 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
640 /* allocate the ASQ */
641 ret_code = i40e_init_asq(hw);
642 if (ret_code != I40E_SUCCESS)
643 goto init_adminq_destroy_spinlocks;
645 /* allocate the ARQ */
646 ret_code = i40e_init_arq(hw);
647 if (ret_code != I40E_SUCCESS)
648 goto init_adminq_free_asq;
652 /* VF has no need of firmware */
654 goto init_adminq_exit;
656 /* There are some cases where the firmware may not be quite ready
657 * for AdminQ operations, so we retry the AdminQ setup a few times
658 * if we see timeouts in this first AQ call.
661 ret_code = i40e_aq_get_firmware_version(hw,
668 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
671 i40e_msec_delay(100);
673 } while (retry < 10);
674 if (ret_code != I40E_SUCCESS)
675 goto init_adminq_free_arq;
677 /* get the NVM version info */
678 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
680 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
681 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
682 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
683 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
684 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
686 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
688 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
690 /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
691 if ((hw->aq.api_maj_ver > 1) ||
692 ((hw->aq.api_maj_ver == 1) &&
693 (hw->aq.api_min_ver >= 7)))
694 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
696 if (hw->mac.type == I40E_MAC_XL710 &&
697 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
698 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
699 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
702 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
703 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
704 goto init_adminq_free_arq;
707 /* pre-emptive resource lock release */
708 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
709 hw->nvm_release_on_done = false;
710 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
712 #endif /* PF_DRIVER */
713 ret_code = I40E_SUCCESS;
716 goto init_adminq_exit;
719 init_adminq_free_arq:
720 i40e_shutdown_arq(hw);
722 init_adminq_free_asq:
723 i40e_shutdown_asq(hw);
724 init_adminq_destroy_spinlocks:
725 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
726 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
733 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
734 * @hw: pointer to the hardware structure
736 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
738 enum i40e_status_code ret_code = I40E_SUCCESS;
740 if (i40e_check_asq_alive(hw))
741 i40e_aq_queue_shutdown(hw, true);
743 i40e_shutdown_asq(hw);
744 i40e_shutdown_arq(hw);
745 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
746 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
749 i40e_free_virt_mem(hw, &hw->nvm_buff);
755 * i40e_clean_asq - cleans Admin send queue
756 * @hw: pointer to the hardware structure
758 * returns the number of free desc
760 u16 i40e_clean_asq(struct i40e_hw *hw)
762 struct i40e_adminq_ring *asq = &(hw->aq.asq);
763 struct i40e_asq_cmd_details *details;
764 u16 ntc = asq->next_to_clean;
765 struct i40e_aq_desc desc_cb;
766 struct i40e_aq_desc *desc;
768 desc = I40E_ADMINQ_DESC(*asq, ntc);
769 details = I40E_ADMINQ_DETAILS(*asq, ntc);
770 while (rd32(hw, hw->aq.asq.head) != ntc) {
771 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
772 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
774 if (details->callback) {
775 I40E_ADMINQ_CALLBACK cb_func =
776 (I40E_ADMINQ_CALLBACK)details->callback;
777 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
779 cb_func(hw, &desc_cb);
781 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
782 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
784 if (ntc == asq->count)
786 desc = I40E_ADMINQ_DESC(*asq, ntc);
787 details = I40E_ADMINQ_DETAILS(*asq, ntc);
790 asq->next_to_clean = ntc;
792 return I40E_DESC_UNUSED(asq);
796 * i40e_asq_done - check if FW has processed the Admin Send Queue
797 * @hw: pointer to the hw struct
799 * Returns true if the firmware has processed all descriptors on the
800 * admin send queue. Returns false if there are still requests pending.
803 bool i40e_asq_done(struct i40e_hw *hw)
805 STATIC bool i40e_asq_done(struct i40e_hw *hw)
808 /* AQ designers suggest use of head for better
809 * timing reliability than DD bit
811 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
816 * i40e_asq_send_command - send command to Admin Queue
817 * @hw: pointer to the hw struct
818 * @desc: prefilled descriptor describing the command (non DMA mem)
819 * @buff: buffer to use for indirect commands
820 * @buff_size: size of buffer for indirect commands
821 * @cmd_details: pointer to command details structure
823 * This is the main send command driver routine for the Admin Queue send
824 * queue. It runs the queue, cleans the queue, etc
826 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
827 struct i40e_aq_desc *desc,
828 void *buff, /* can be NULL */
830 struct i40e_asq_cmd_details *cmd_details)
832 enum i40e_status_code status = I40E_SUCCESS;
833 struct i40e_dma_mem *dma_buff = NULL;
834 struct i40e_asq_cmd_details *details;
835 struct i40e_aq_desc *desc_on_ring;
836 bool cmd_completed = false;
840 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
842 hw->aq.asq_last_status = I40E_AQ_RC_OK;
844 if (hw->aq.asq.count == 0) {
845 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
846 "AQTX: Admin queue not initialized.\n");
847 status = I40E_ERR_QUEUE_EMPTY;
848 goto asq_send_command_error;
851 val = rd32(hw, hw->aq.asq.head);
852 if (val >= hw->aq.num_asq_entries) {
853 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
854 "AQTX: head overrun at %d\n", val);
855 status = I40E_ERR_QUEUE_EMPTY;
856 goto asq_send_command_error;
859 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
863 sizeof(struct i40e_asq_cmd_details),
864 I40E_NONDMA_TO_NONDMA);
866 /* If the cmd_details are defined copy the cookie. The
867 * CPU_TO_LE32 is not needed here because the data is ignored
868 * by the FW, only used by the driver
870 if (details->cookie) {
872 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
874 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
877 i40e_memset(details, 0,
878 sizeof(struct i40e_asq_cmd_details),
882 /* clear requested flags and then set additional flags if defined */
883 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
884 desc->flags |= CPU_TO_LE16(details->flags_ena);
886 if (buff_size > hw->aq.asq_buf_size) {
888 I40E_DEBUG_AQ_MESSAGE,
889 "AQTX: Invalid buffer size: %d.\n",
891 status = I40E_ERR_INVALID_SIZE;
892 goto asq_send_command_error;
895 if (details->postpone && !details->async) {
897 I40E_DEBUG_AQ_MESSAGE,
898 "AQTX: Async flag not set along with postpone flag");
899 status = I40E_ERR_PARAM;
900 goto asq_send_command_error;
903 /* call clean and check queue available function to reclaim the
904 * descriptors that were processed by FW, the function returns the
905 * number of desc available
907 /* the clean function called here could be called in a separate thread
908 * in case of asynchronous completions
910 if (i40e_clean_asq(hw) == 0) {
912 I40E_DEBUG_AQ_MESSAGE,
913 "AQTX: Error queue is full.\n");
914 status = I40E_ERR_ADMIN_QUEUE_FULL;
915 goto asq_send_command_error;
918 /* initialize the temp desc pointer with the right desc */
919 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
921 /* if the desc is available copy the temp desc to the right place */
922 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
925 /* if buff is not NULL assume indirect command */
927 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
928 /* copy the user buff into the respective DMA buff */
929 i40e_memcpy(dma_buff->va, buff, buff_size,
931 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
933 /* Update the address values in the desc with the pa value
934 * for respective buffer
936 desc_on_ring->params.external.addr_high =
937 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
938 desc_on_ring->params.external.addr_low =
939 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
943 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
944 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
946 (hw->aq.asq.next_to_use)++;
947 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
948 hw->aq.asq.next_to_use = 0;
949 if (!details->postpone)
950 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
952 /* if cmd_details are not defined or async flag is not set,
953 * we need to wait for desc write back
955 if (!details->async && !details->postpone) {
959 /* AQ designers suggest use of head for better
960 * timing reliability than DD bit
962 if (i40e_asq_done(hw))
966 } while (total_delay < hw->aq.asq_cmd_timeout);
969 /* if ready, copy the desc back to temp */
970 if (i40e_asq_done(hw)) {
971 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
974 i40e_memcpy(buff, dma_buff->va, buff_size,
976 retval = LE16_TO_CPU(desc->retval);
979 I40E_DEBUG_AQ_MESSAGE,
980 "AQTX: Command completed with error 0x%X.\n",
983 /* strip off FW internal code */
986 cmd_completed = true;
987 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
988 status = I40E_SUCCESS;
990 status = I40E_ERR_ADMIN_QUEUE_ERROR;
991 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
994 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
995 "AQTX: desc and buffer writeback:\n");
996 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
998 /* save writeback aq if requested */
999 if (details->wb_desc)
1000 i40e_memcpy(details->wb_desc, desc_on_ring,
1001 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
1003 /* update the error if time out occurred */
1004 if ((!cmd_completed) &&
1005 (!details->async && !details->postpone)) {
1007 I40E_DEBUG_AQ_MESSAGE,
1008 "AQTX: Writeback timeout.\n");
1009 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1012 asq_send_command_error:
1013 i40e_release_spinlock(&hw->aq.asq_spinlock);
1018 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1019 * @desc: pointer to the temp descriptor (non DMA mem)
1020 * @opcode: the opcode can be used to decide which flags to turn off or on
1022 * Fill the desc with default values
1024 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1027 /* zero out the desc */
1028 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1030 desc->opcode = CPU_TO_LE16(opcode);
1031 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1035 * i40e_clean_arq_element
1036 * @hw: pointer to the hw struct
1037 * @e: event info from the receive descriptor, includes any buffers
1038 * @pending: number of events that could be left to process
1040 * This function cleans one Admin Receive Queue element and returns
1041 * the contents through e. It can also return how many events are
1042 * left to process through 'pending'
1044 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1045 struct i40e_arq_event_info *e,
1048 enum i40e_status_code ret_code = I40E_SUCCESS;
1049 u16 ntc = hw->aq.arq.next_to_clean;
1050 struct i40e_aq_desc *desc;
1051 struct i40e_dma_mem *bi;
1057 /* pre-clean the event info */
1058 i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1060 /* take the lock before we start messing with the ring */
1061 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1063 if (hw->aq.arq.count == 0) {
1064 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1065 "AQRX: Admin queue not initialized.\n");
1066 ret_code = I40E_ERR_QUEUE_EMPTY;
1067 goto clean_arq_element_err;
1070 /* set next_to_use to head */
1072 #ifdef INTEGRATED_VF
1073 if (!i40e_is_vf(hw))
1074 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1076 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1077 #endif /* INTEGRATED_VF */
1078 #endif /* PF_DRIVER */
1080 #ifdef INTEGRATED_VF
1082 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1084 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1085 #endif /* INTEGRATED_VF */
1086 #endif /* VF_DRIVER */
1088 /* nothing to do - shouldn't need to update ring's values */
1089 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1090 goto clean_arq_element_out;
1093 /* now clean the next descriptor */
1094 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1097 hw->aq.arq_last_status =
1098 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1099 flags = LE16_TO_CPU(desc->flags);
1100 if (flags & I40E_AQ_FLAG_ERR) {
1101 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1103 I40E_DEBUG_AQ_MESSAGE,
1104 "AQRX: Event received with error 0x%X.\n",
1105 hw->aq.arq_last_status);
1108 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1109 I40E_DMA_TO_NONDMA);
1110 datalen = LE16_TO_CPU(desc->datalen);
1111 e->msg_len = min(datalen, e->buf_len);
1112 if (e->msg_buf != NULL && (e->msg_len != 0))
1113 i40e_memcpy(e->msg_buf,
1114 hw->aq.arq.r.arq_bi[desc_idx].va,
1115 e->msg_len, I40E_DMA_TO_NONDMA);
1117 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1118 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1119 hw->aq.arq_buf_size);
1121 /* Restore the original datalen and buffer address in the desc,
1122 * FW updates datalen to indicate the event message
1125 bi = &hw->aq.arq.r.arq_bi[ntc];
1126 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1128 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1129 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1130 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1131 desc->datalen = CPU_TO_LE16((u16)bi->size);
1132 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1133 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1135 /* set tail = the last cleaned desc index. */
1136 wr32(hw, hw->aq.arq.tail, ntc);
1137 /* ntc is updated to tail + 1 */
1139 if (ntc == hw->aq.num_arq_entries)
1141 hw->aq.arq.next_to_clean = ntc;
1142 hw->aq.arq.next_to_use = ntu;
1145 i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
1146 #endif /* PF_DRIVER */
1147 clean_arq_element_out:
1148 /* Set pending if needed, unlock and return */
1149 if (pending != NULL)
1150 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1151 clean_arq_element_err:
1152 i40e_release_spinlock(&hw->aq.arq_spinlock);