New upstream version 18.02
[deb_dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
39
40 /**
41  *  i40e_adminq_init_regs - Initialize AdminQ registers
42  *  @hw: pointer to the hardware structure
43  *
44  *  This assumes the alloc_asq and alloc_arq functions have already been called
45  **/
46 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
47 {
48         /* set head and tail registers in our local struct */
49         if (i40e_is_vf(hw)) {
50                 hw->aq.asq.tail = I40E_VF_ATQT1;
51                 hw->aq.asq.head = I40E_VF_ATQH1;
52                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
53                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
54                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
55                 hw->aq.arq.tail = I40E_VF_ARQT1;
56                 hw->aq.arq.head = I40E_VF_ARQH1;
57                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
58                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
59                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
60 #ifdef PF_DRIVER
61         } else {
62                 hw->aq.asq.tail = I40E_PF_ATQT;
63                 hw->aq.asq.head = I40E_PF_ATQH;
64                 hw->aq.asq.len  = I40E_PF_ATQLEN;
65                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
66                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
67                 hw->aq.arq.tail = I40E_PF_ARQT;
68                 hw->aq.arq.head = I40E_PF_ARQH;
69                 hw->aq.arq.len  = I40E_PF_ARQLEN;
70                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
71                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
72 #endif
73         }
74 }
75
76 /**
77  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
78  *  @hw: pointer to the hardware structure
79  **/
80 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
81 {
82         enum i40e_status_code ret_code;
83
84         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
85                                          i40e_mem_atq_ring,
86                                          (hw->aq.num_asq_entries *
87                                          sizeof(struct i40e_aq_desc)),
88                                          I40E_ADMINQ_DESC_ALIGNMENT);
89         if (ret_code)
90                 return ret_code;
91
92         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
93                                           (hw->aq.num_asq_entries *
94                                           sizeof(struct i40e_asq_cmd_details)));
95         if (ret_code) {
96                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
97                 return ret_code;
98         }
99
100         return ret_code;
101 }
102
103 /**
104  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
105  *  @hw: pointer to the hardware structure
106  **/
107 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
108 {
109         enum i40e_status_code ret_code;
110
111         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
112                                          i40e_mem_arq_ring,
113                                          (hw->aq.num_arq_entries *
114                                          sizeof(struct i40e_aq_desc)),
115                                          I40E_ADMINQ_DESC_ALIGNMENT);
116
117         return ret_code;
118 }
119
120 /**
121  *  i40e_free_adminq_asq - Free Admin Queue send rings
122  *  @hw: pointer to the hardware structure
123  *
124  *  This assumes the posted send buffers have already been cleaned
125  *  and de-allocated
126  **/
127 void i40e_free_adminq_asq(struct i40e_hw *hw)
128 {
129         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
130 }
131
132 /**
133  *  i40e_free_adminq_arq - Free Admin Queue receive rings
134  *  @hw: pointer to the hardware structure
135  *
136  *  This assumes the posted receive buffers have already been cleaned
137  *  and de-allocated
138  **/
139 void i40e_free_adminq_arq(struct i40e_hw *hw)
140 {
141         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
142 }
143
144 /**
145  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
146  *  @hw: pointer to the hardware structure
147  **/
148 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
149 {
150         enum i40e_status_code ret_code;
151         struct i40e_aq_desc *desc;
152         struct i40e_dma_mem *bi;
153         int i;
154
155         /* We'll be allocating the buffer info memory first, then we can
156          * allocate the mapped buffers for the event processing
157          */
158
159         /* buffer_info structures do not need alignment */
160         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
161                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
162         if (ret_code)
163                 goto alloc_arq_bufs;
164         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
165
166         /* allocate the mapped buffers */
167         for (i = 0; i < hw->aq.num_arq_entries; i++) {
168                 bi = &hw->aq.arq.r.arq_bi[i];
169                 ret_code = i40e_allocate_dma_mem(hw, bi,
170                                                  i40e_mem_arq_buf,
171                                                  hw->aq.arq_buf_size,
172                                                  I40E_ADMINQ_DESC_ALIGNMENT);
173                 if (ret_code)
174                         goto unwind_alloc_arq_bufs;
175
176                 /* now configure the descriptors for use */
177                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
178
179                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
180                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
181                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
182                 desc->opcode = 0;
183                 /* This is in accordance with Admin queue design, there is no
184                  * register for buffer size configuration
185                  */
186                 desc->datalen = CPU_TO_LE16((u16)bi->size);
187                 desc->retval = 0;
188                 desc->cookie_high = 0;
189                 desc->cookie_low = 0;
190                 desc->params.external.addr_high =
191                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
192                 desc->params.external.addr_low =
193                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
194                 desc->params.external.param0 = 0;
195                 desc->params.external.param1 = 0;
196         }
197
198 alloc_arq_bufs:
199         return ret_code;
200
201 unwind_alloc_arq_bufs:
202         /* don't try to free the one that failed... */
203         i--;
204         for (; i >= 0; i--)
205                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
206         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
207
208         return ret_code;
209 }
210
211 /**
212  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
213  *  @hw: pointer to the hardware structure
214  **/
215 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
216 {
217         enum i40e_status_code ret_code;
218         struct i40e_dma_mem *bi;
219         int i;
220
221         /* No mapped memory needed yet, just the buffer info structures */
222         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
223                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
224         if (ret_code)
225                 goto alloc_asq_bufs;
226         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
227
228         /* allocate the mapped buffers */
229         for (i = 0; i < hw->aq.num_asq_entries; i++) {
230                 bi = &hw->aq.asq.r.asq_bi[i];
231                 ret_code = i40e_allocate_dma_mem(hw, bi,
232                                                  i40e_mem_asq_buf,
233                                                  hw->aq.asq_buf_size,
234                                                  I40E_ADMINQ_DESC_ALIGNMENT);
235                 if (ret_code)
236                         goto unwind_alloc_asq_bufs;
237         }
238 alloc_asq_bufs:
239         return ret_code;
240
241 unwind_alloc_asq_bufs:
242         /* don't try to free the one that failed... */
243         i--;
244         for (; i >= 0; i--)
245                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
246         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
247
248         return ret_code;
249 }
250
251 /**
252  *  i40e_free_arq_bufs - Free receive queue buffer info elements
253  *  @hw: pointer to the hardware structure
254  **/
255 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
256 {
257         int i;
258
259         /* free descriptors */
260         for (i = 0; i < hw->aq.num_arq_entries; i++)
261                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
262
263         /* free the descriptor memory */
264         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
265
266         /* free the dma header */
267         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
268 }
269
270 /**
271  *  i40e_free_asq_bufs - Free send queue buffer info elements
272  *  @hw: pointer to the hardware structure
273  **/
274 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
275 {
276         int i;
277
278         /* only unmap if the address is non-NULL */
279         for (i = 0; i < hw->aq.num_asq_entries; i++)
280                 if (hw->aq.asq.r.asq_bi[i].pa)
281                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
282
283         /* free the buffer info list */
284         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
285
286         /* free the descriptor memory */
287         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
288
289         /* free the dma header */
290         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
291 }
292
293 /**
294  *  i40e_config_asq_regs - configure ASQ registers
295  *  @hw: pointer to the hardware structure
296  *
297  *  Configure base address and length registers for the transmit queue
298  **/
299 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
300 {
301         enum i40e_status_code ret_code = I40E_SUCCESS;
302         u32 reg = 0;
303
304         /* Clear Head and Tail */
305         wr32(hw, hw->aq.asq.head, 0);
306         wr32(hw, hw->aq.asq.tail, 0);
307
308         /* set starting point */
309 #ifdef PF_DRIVER
310 #ifdef INTEGRATED_VF
311         if (!i40e_is_vf(hw))
312                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
313                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
314 #else
315         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
316                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
317 #endif /* INTEGRATED_VF */
318 #endif /* PF_DRIVER */
319 #ifdef VF_DRIVER
320 #ifdef INTEGRATED_VF
321         if (i40e_is_vf(hw))
322                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
323                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
324 #else
325         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
326                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
327 #endif /* INTEGRATED_VF */
328 #endif /* VF_DRIVER */
329         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
330         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
331
332         /* Check one register to verify that config was applied */
333         reg = rd32(hw, hw->aq.asq.bal);
334         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
335                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
336
337         return ret_code;
338 }
339
340 /**
341  *  i40e_config_arq_regs - ARQ register configuration
342  *  @hw: pointer to the hardware structure
343  *
344  * Configure base address and length registers for the receive (event queue)
345  **/
346 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
347 {
348         enum i40e_status_code ret_code = I40E_SUCCESS;
349         u32 reg = 0;
350
351         /* Clear Head and Tail */
352         wr32(hw, hw->aq.arq.head, 0);
353         wr32(hw, hw->aq.arq.tail, 0);
354
355         /* set starting point */
356 #ifdef PF_DRIVER
357 #ifdef INTEGRATED_VF
358         if (!i40e_is_vf(hw))
359                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
360                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
361 #else
362         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
363                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
364 #endif /* INTEGRATED_VF */
365 #endif /* PF_DRIVER */
366 #ifdef VF_DRIVER
367 #ifdef INTEGRATED_VF
368         if (i40e_is_vf(hw))
369                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
370                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
371 #else
372         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
373                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
374 #endif /* INTEGRATED_VF */
375 #endif /* VF_DRIVER */
376         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
377         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
378
379         /* Update tail in the HW to post pre-allocated buffers */
380         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
381
382         /* Check one register to verify that config was applied */
383         reg = rd32(hw, hw->aq.arq.bal);
384         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
385                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
386
387         return ret_code;
388 }
389
390 /**
391  *  i40e_init_asq - main initialization routine for ASQ
392  *  @hw: pointer to the hardware structure
393  *
394  *  This is the main initialization routine for the Admin Send Queue
395  *  Prior to calling this function, drivers *MUST* set the following fields
396  *  in the hw->aq structure:
397  *     - hw->aq.num_asq_entries
398  *     - hw->aq.arq_buf_size
399  *
400  *  Do *NOT* hold the lock when calling this as the memory allocation routines
401  *  called are not going to be atomic context safe
402  **/
403 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
404 {
405         enum i40e_status_code ret_code = I40E_SUCCESS;
406
407         if (hw->aq.asq.count > 0) {
408                 /* queue already initialized */
409                 ret_code = I40E_ERR_NOT_READY;
410                 goto init_adminq_exit;
411         }
412
413         /* verify input for valid configuration */
414         if ((hw->aq.num_asq_entries == 0) ||
415             (hw->aq.asq_buf_size == 0)) {
416                 ret_code = I40E_ERR_CONFIG;
417                 goto init_adminq_exit;
418         }
419
420         hw->aq.asq.next_to_use = 0;
421         hw->aq.asq.next_to_clean = 0;
422
423         /* allocate the ring memory */
424         ret_code = i40e_alloc_adminq_asq_ring(hw);
425         if (ret_code != I40E_SUCCESS)
426                 goto init_adminq_exit;
427
428         /* allocate buffers in the rings */
429         ret_code = i40e_alloc_asq_bufs(hw);
430         if (ret_code != I40E_SUCCESS)
431                 goto init_adminq_free_rings;
432
433         /* initialize base registers */
434         ret_code = i40e_config_asq_regs(hw);
435         if (ret_code != I40E_SUCCESS)
436                 goto init_adminq_free_rings;
437
438         /* success! */
439         hw->aq.asq.count = hw->aq.num_asq_entries;
440         goto init_adminq_exit;
441
442 init_adminq_free_rings:
443         i40e_free_adminq_asq(hw);
444
445 init_adminq_exit:
446         return ret_code;
447 }
448
449 /**
450  *  i40e_init_arq - initialize ARQ
451  *  @hw: pointer to the hardware structure
452  *
453  *  The main initialization routine for the Admin Receive (Event) Queue.
454  *  Prior to calling this function, drivers *MUST* set the following fields
455  *  in the hw->aq structure:
456  *     - hw->aq.num_asq_entries
457  *     - hw->aq.arq_buf_size
458  *
459  *  Do *NOT* hold the lock when calling this as the memory allocation routines
460  *  called are not going to be atomic context safe
461  **/
462 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
463 {
464         enum i40e_status_code ret_code = I40E_SUCCESS;
465
466         if (hw->aq.arq.count > 0) {
467                 /* queue already initialized */
468                 ret_code = I40E_ERR_NOT_READY;
469                 goto init_adminq_exit;
470         }
471
472         /* verify input for valid configuration */
473         if ((hw->aq.num_arq_entries == 0) ||
474             (hw->aq.arq_buf_size == 0)) {
475                 ret_code = I40E_ERR_CONFIG;
476                 goto init_adminq_exit;
477         }
478
479         hw->aq.arq.next_to_use = 0;
480         hw->aq.arq.next_to_clean = 0;
481
482         /* allocate the ring memory */
483         ret_code = i40e_alloc_adminq_arq_ring(hw);
484         if (ret_code != I40E_SUCCESS)
485                 goto init_adminq_exit;
486
487         /* allocate buffers in the rings */
488         ret_code = i40e_alloc_arq_bufs(hw);
489         if (ret_code != I40E_SUCCESS)
490                 goto init_adminq_free_rings;
491
492         /* initialize base registers */
493         ret_code = i40e_config_arq_regs(hw);
494         if (ret_code != I40E_SUCCESS)
495                 goto init_adminq_free_rings;
496
497         /* success! */
498         hw->aq.arq.count = hw->aq.num_arq_entries;
499         goto init_adminq_exit;
500
501 init_adminq_free_rings:
502         i40e_free_adminq_arq(hw);
503
504 init_adminq_exit:
505         return ret_code;
506 }
507
508 /**
509  *  i40e_shutdown_asq - shutdown the ASQ
510  *  @hw: pointer to the hardware structure
511  *
512  *  The main shutdown routine for the Admin Send Queue
513  **/
514 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
515 {
516         enum i40e_status_code ret_code = I40E_SUCCESS;
517
518         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
519
520         if (hw->aq.asq.count == 0) {
521                 ret_code = I40E_ERR_NOT_READY;
522                 goto shutdown_asq_out;
523         }
524
525         /* Stop firmware AdminQ processing */
526         wr32(hw, hw->aq.asq.head, 0);
527         wr32(hw, hw->aq.asq.tail, 0);
528         wr32(hw, hw->aq.asq.len, 0);
529         wr32(hw, hw->aq.asq.bal, 0);
530         wr32(hw, hw->aq.asq.bah, 0);
531
532         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
533
534         /* free ring buffers */
535         i40e_free_asq_bufs(hw);
536
537 shutdown_asq_out:
538         i40e_release_spinlock(&hw->aq.asq_spinlock);
539         return ret_code;
540 }
541
542 /**
543  *  i40e_shutdown_arq - shutdown ARQ
544  *  @hw: pointer to the hardware structure
545  *
546  *  The main shutdown routine for the Admin Receive Queue
547  **/
548 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
549 {
550         enum i40e_status_code ret_code = I40E_SUCCESS;
551
552         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
553
554         if (hw->aq.arq.count == 0) {
555                 ret_code = I40E_ERR_NOT_READY;
556                 goto shutdown_arq_out;
557         }
558
559         /* Stop firmware AdminQ processing */
560         wr32(hw, hw->aq.arq.head, 0);
561         wr32(hw, hw->aq.arq.tail, 0);
562         wr32(hw, hw->aq.arq.len, 0);
563         wr32(hw, hw->aq.arq.bal, 0);
564         wr32(hw, hw->aq.arq.bah, 0);
565
566         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
567
568         /* free ring buffers */
569         i40e_free_arq_bufs(hw);
570
571 shutdown_arq_out:
572         i40e_release_spinlock(&hw->aq.arq_spinlock);
573         return ret_code;
574 }
575 #ifdef PF_DRIVER
576
577 /**
578  *  i40e_resume_aq - resume AQ processing from 0
579  *  @hw: pointer to the hardware structure
580  **/
581 STATIC void i40e_resume_aq(struct i40e_hw *hw)
582 {
583         /* Registers are reset after PF reset */
584         hw->aq.asq.next_to_use = 0;
585         hw->aq.asq.next_to_clean = 0;
586
587         i40e_config_asq_regs(hw);
588
589         hw->aq.arq.next_to_use = 0;
590         hw->aq.arq.next_to_clean = 0;
591
592         i40e_config_arq_regs(hw);
593 }
594 #endif /* PF_DRIVER */
595
596 /**
597  *  i40e_init_adminq - main initialization routine for Admin Queue
598  *  @hw: pointer to the hardware structure
599  *
600  *  Prior to calling this function, drivers *MUST* set the following fields
601  *  in the hw->aq structure:
602  *     - hw->aq.num_asq_entries
603  *     - hw->aq.num_arq_entries
604  *     - hw->aq.arq_buf_size
605  *     - hw->aq.asq_buf_size
606  **/
607 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
608 {
609 #ifdef PF_DRIVER
610         u16 cfg_ptr, oem_hi, oem_lo;
611         u16 eetrack_lo, eetrack_hi;
612 #endif
613         enum i40e_status_code ret_code;
614 #ifdef PF_DRIVER
615         int retry = 0;
616 #endif
617
618         /* verify input for valid configuration */
619         if ((hw->aq.num_arq_entries == 0) ||
620             (hw->aq.num_asq_entries == 0) ||
621             (hw->aq.arq_buf_size == 0) ||
622             (hw->aq.asq_buf_size == 0)) {
623                 ret_code = I40E_ERR_CONFIG;
624                 goto init_adminq_exit;
625         }
626         i40e_init_spinlock(&hw->aq.asq_spinlock);
627         i40e_init_spinlock(&hw->aq.arq_spinlock);
628
629         /* Set up register offsets */
630         i40e_adminq_init_regs(hw);
631
632         /* setup ASQ command write back timeout */
633         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
634
635         /* allocate the ASQ */
636         ret_code = i40e_init_asq(hw);
637         if (ret_code != I40E_SUCCESS)
638                 goto init_adminq_destroy_spinlocks;
639
640         /* allocate the ARQ */
641         ret_code = i40e_init_arq(hw);
642         if (ret_code != I40E_SUCCESS)
643                 goto init_adminq_free_asq;
644
645 #ifdef PF_DRIVER
646 #ifdef INTEGRATED_VF
647         /* VF has no need of firmware */
648         if (i40e_is_vf(hw))
649                 goto init_adminq_exit;
650 #endif
651         /* There are some cases where the firmware may not be quite ready
652          * for AdminQ operations, so we retry the AdminQ setup a few times
653          * if we see timeouts in this first AQ call.
654          */
655         do {
656                 ret_code = i40e_aq_get_firmware_version(hw,
657                                                         &hw->aq.fw_maj_ver,
658                                                         &hw->aq.fw_min_ver,
659                                                         &hw->aq.fw_build,
660                                                         &hw->aq.api_maj_ver,
661                                                         &hw->aq.api_min_ver,
662                                                         NULL);
663                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
664                         break;
665                 retry++;
666                 i40e_msec_delay(100);
667                 i40e_resume_aq(hw);
668         } while (retry < 10);
669         if (ret_code != I40E_SUCCESS)
670                 goto init_adminq_free_arq;
671
672         /* get the NVM version info */
673         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
674                            &hw->nvm.version);
675         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
676         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
677         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
678         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
679         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
680                            &oem_hi);
681         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
682                            &oem_lo);
683         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
684
685         /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
686         if ((hw->aq.api_maj_ver > 1) ||
687             ((hw->aq.api_maj_ver == 1) &&
688              (hw->aq.api_min_ver >= 7)))
689                 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
690
691         if (hw->mac.type == I40E_MAC_XL710 &&
692             hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
693             hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
694                 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
695         }
696
697         /* Newer versions of firmware require lock when reading the NVM */
698         if ((hw->aq.api_maj_ver > 1) ||
699             ((hw->aq.api_maj_ver == 1) &&
700              (hw->aq.api_min_ver >= 5)))
701                 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
702
703         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
704                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
705                 goto init_adminq_free_arq;
706         }
707
708         /* pre-emptive resource lock release */
709         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
710         hw->nvm_release_on_done = false;
711         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
712
713 #endif /* PF_DRIVER */
714         ret_code = I40E_SUCCESS;
715
716         /* success! */
717         goto init_adminq_exit;
718
719 #ifdef PF_DRIVER
720 init_adminq_free_arq:
721         i40e_shutdown_arq(hw);
722 #endif
723 init_adminq_free_asq:
724         i40e_shutdown_asq(hw);
725 init_adminq_destroy_spinlocks:
726         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
727         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
728
729 init_adminq_exit:
730         return ret_code;
731 }
732
733 /**
734  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
735  *  @hw: pointer to the hardware structure
736  **/
737 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
738 {
739         enum i40e_status_code ret_code = I40E_SUCCESS;
740
741         if (i40e_check_asq_alive(hw))
742                 i40e_aq_queue_shutdown(hw, true);
743
744         i40e_shutdown_asq(hw);
745         i40e_shutdown_arq(hw);
746         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
747         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
748
749         if (hw->nvm_buff.va)
750                 i40e_free_virt_mem(hw, &hw->nvm_buff);
751
752         return ret_code;
753 }
754
755 /**
756  *  i40e_clean_asq - cleans Admin send queue
757  *  @hw: pointer to the hardware structure
758  *
759  *  returns the number of free desc
760  **/
761 u16 i40e_clean_asq(struct i40e_hw *hw)
762 {
763         struct i40e_adminq_ring *asq = &(hw->aq.asq);
764         struct i40e_asq_cmd_details *details;
765         u16 ntc = asq->next_to_clean;
766         struct i40e_aq_desc desc_cb;
767         struct i40e_aq_desc *desc;
768
769         desc = I40E_ADMINQ_DESC(*asq, ntc);
770         details = I40E_ADMINQ_DETAILS(*asq, ntc);
771         while (rd32(hw, hw->aq.asq.head) != ntc) {
772                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
773                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
774
775                 if (details->callback) {
776                         I40E_ADMINQ_CALLBACK cb_func =
777                                         (I40E_ADMINQ_CALLBACK)details->callback;
778                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
779                                     I40E_DMA_TO_DMA);
780                         cb_func(hw, &desc_cb);
781                 }
782                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
783                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
784                 ntc++;
785                 if (ntc == asq->count)
786                         ntc = 0;
787                 desc = I40E_ADMINQ_DESC(*asq, ntc);
788                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
789         }
790
791         asq->next_to_clean = ntc;
792
793         return I40E_DESC_UNUSED(asq);
794 }
795
796 /**
797  *  i40e_asq_done - check if FW has processed the Admin Send Queue
798  *  @hw: pointer to the hw struct
799  *
800  *  Returns true if the firmware has processed all descriptors on the
801  *  admin send queue. Returns false if there are still requests pending.
802  **/
803 #ifdef VF_DRIVER
804 bool i40e_asq_done(struct i40e_hw *hw)
805 #else
806 STATIC bool i40e_asq_done(struct i40e_hw *hw)
807 #endif
808 {
809         /* AQ designers suggest use of head for better
810          * timing reliability than DD bit
811          */
812         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
813
814 }
815
816 /**
817  *  i40e_asq_send_command - send command to Admin Queue
818  *  @hw: pointer to the hw struct
819  *  @desc: prefilled descriptor describing the command (non DMA mem)
820  *  @buff: buffer to use for indirect commands
821  *  @buff_size: size of buffer for indirect commands
822  *  @cmd_details: pointer to command details structure
823  *
824  *  This is the main send command driver routine for the Admin Queue send
825  *  queue.  It runs the queue, cleans the queue, etc
826  **/
827 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
828                                 struct i40e_aq_desc *desc,
829                                 void *buff, /* can be NULL */
830                                 u16  buff_size,
831                                 struct i40e_asq_cmd_details *cmd_details)
832 {
833         enum i40e_status_code status = I40E_SUCCESS;
834         struct i40e_dma_mem *dma_buff = NULL;
835         struct i40e_asq_cmd_details *details;
836         struct i40e_aq_desc *desc_on_ring;
837         bool cmd_completed = false;
838         u16  retval = 0;
839         u32  val = 0;
840
841         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
842
843         hw->aq.asq_last_status = I40E_AQ_RC_OK;
844
845         if (hw->aq.asq.count == 0) {
846                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
847                            "AQTX: Admin queue not initialized.\n");
848                 status = I40E_ERR_QUEUE_EMPTY;
849                 goto asq_send_command_error;
850         }
851
852         val = rd32(hw, hw->aq.asq.head);
853         if (val >= hw->aq.num_asq_entries) {
854                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
855                            "AQTX: head overrun at %d\n", val);
856                 status = I40E_ERR_QUEUE_EMPTY;
857                 goto asq_send_command_error;
858         }
859
860         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
861         if (cmd_details) {
862                 i40e_memcpy(details,
863                             cmd_details,
864                             sizeof(struct i40e_asq_cmd_details),
865                             I40E_NONDMA_TO_NONDMA);
866
867                 /* If the cmd_details are defined copy the cookie.  The
868                  * CPU_TO_LE32 is not needed here because the data is ignored
869                  * by the FW, only used by the driver
870                  */
871                 if (details->cookie) {
872                         desc->cookie_high =
873                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
874                         desc->cookie_low =
875                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
876                 }
877         } else {
878                 i40e_memset(details, 0,
879                             sizeof(struct i40e_asq_cmd_details),
880                             I40E_NONDMA_MEM);
881         }
882
883         /* clear requested flags and then set additional flags if defined */
884         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
885         desc->flags |= CPU_TO_LE16(details->flags_ena);
886
887         if (buff_size > hw->aq.asq_buf_size) {
888                 i40e_debug(hw,
889                            I40E_DEBUG_AQ_MESSAGE,
890                            "AQTX: Invalid buffer size: %d.\n",
891                            buff_size);
892                 status = I40E_ERR_INVALID_SIZE;
893                 goto asq_send_command_error;
894         }
895
896         if (details->postpone && !details->async) {
897                 i40e_debug(hw,
898                            I40E_DEBUG_AQ_MESSAGE,
899                            "AQTX: Async flag not set along with postpone flag");
900                 status = I40E_ERR_PARAM;
901                 goto asq_send_command_error;
902         }
903
904         /* call clean and check queue available function to reclaim the
905          * descriptors that were processed by FW, the function returns the
906          * number of desc available
907          */
908         /* the clean function called here could be called in a separate thread
909          * in case of asynchronous completions
910          */
911         if (i40e_clean_asq(hw) == 0) {
912                 i40e_debug(hw,
913                            I40E_DEBUG_AQ_MESSAGE,
914                            "AQTX: Error queue is full.\n");
915                 status = I40E_ERR_ADMIN_QUEUE_FULL;
916                 goto asq_send_command_error;
917         }
918
919         /* initialize the temp desc pointer with the right desc */
920         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
921
922         /* if the desc is available copy the temp desc to the right place */
923         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
924                     I40E_NONDMA_TO_DMA);
925
926         /* if buff is not NULL assume indirect command */
927         if (buff != NULL) {
928                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
929                 /* copy the user buff into the respective DMA buff */
930                 i40e_memcpy(dma_buff->va, buff, buff_size,
931                             I40E_NONDMA_TO_DMA);
932                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
933
934                 /* Update the address values in the desc with the pa value
935                  * for respective buffer
936                  */
937                 desc_on_ring->params.external.addr_high =
938                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
939                 desc_on_ring->params.external.addr_low =
940                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
941         }
942
943         /* bump the tail */
944         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
945         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
946                       buff, buff_size);
947         (hw->aq.asq.next_to_use)++;
948         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
949                 hw->aq.asq.next_to_use = 0;
950         if (!details->postpone)
951                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
952
953         /* if cmd_details are not defined or async flag is not set,
954          * we need to wait for desc write back
955          */
956         if (!details->async && !details->postpone) {
957                 u32 total_delay = 0;
958
959                 do {
960                         /* AQ designers suggest use of head for better
961                          * timing reliability than DD bit
962                          */
963                         if (i40e_asq_done(hw))
964                                 break;
965                         i40e_usec_delay(50);
966                         total_delay += 50;
967                 } while (total_delay < hw->aq.asq_cmd_timeout);
968         }
969
970         /* if ready, copy the desc back to temp */
971         if (i40e_asq_done(hw)) {
972                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
973                             I40E_DMA_TO_NONDMA);
974                 if (buff != NULL)
975                         i40e_memcpy(buff, dma_buff->va, buff_size,
976                                     I40E_DMA_TO_NONDMA);
977                 retval = LE16_TO_CPU(desc->retval);
978                 if (retval != 0) {
979                         i40e_debug(hw,
980                                    I40E_DEBUG_AQ_MESSAGE,
981                                    "AQTX: Command completed with error 0x%X.\n",
982                                    retval);
983
984                         /* strip off FW internal code */
985                         retval &= 0xff;
986                 }
987                 cmd_completed = true;
988                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
989                         status = I40E_SUCCESS;
990                 else
991                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
992                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
993         }
994
995         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
996                    "AQTX: desc and buffer writeback:\n");
997         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
998
999         /* save writeback aq if requested */
1000         if (details->wb_desc)
1001                 i40e_memcpy(details->wb_desc, desc_on_ring,
1002                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
1003
1004         /* update the error if time out occurred */
1005         if ((!cmd_completed) &&
1006             (!details->async && !details->postpone)) {
1007 #ifdef PF_DRIVER
1008                 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
1009 #else
1010                 if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1011 #endif
1012                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1013                                    "AQTX: AQ Critical error.\n");
1014                         status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
1015                 } else {
1016                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1017                                    "AQTX: Writeback timeout.\n");
1018                         status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1019                 }
1020         }
1021
1022 asq_send_command_error:
1023         i40e_release_spinlock(&hw->aq.asq_spinlock);
1024         return status;
1025 }
1026
1027 /**
1028  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1029  *  @desc:     pointer to the temp descriptor (non DMA mem)
1030  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1031  *
1032  *  Fill the desc with default values
1033  **/
1034 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1035                                        u16 opcode)
1036 {
1037         /* zero out the desc */
1038         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1039                     I40E_NONDMA_MEM);
1040         desc->opcode = CPU_TO_LE16(opcode);
1041         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1042 }
1043
1044 /**
1045  *  i40e_clean_arq_element
1046  *  @hw: pointer to the hw struct
1047  *  @e: event info from the receive descriptor, includes any buffers
1048  *  @pending: number of events that could be left to process
1049  *
1050  *  This function cleans one Admin Receive Queue element and returns
1051  *  the contents through e.  It can also return how many events are
1052  *  left to process through 'pending'
1053  **/
1054 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1055                                              struct i40e_arq_event_info *e,
1056                                              u16 *pending)
1057 {
1058         enum i40e_status_code ret_code = I40E_SUCCESS;
1059         u16 ntc = hw->aq.arq.next_to_clean;
1060         struct i40e_aq_desc *desc;
1061         struct i40e_dma_mem *bi;
1062         u16 desc_idx;
1063         u16 datalen;
1064         u16 flags;
1065         u16 ntu;
1066
1067         /* pre-clean the event info */
1068         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1069
1070         /* take the lock before we start messing with the ring */
1071         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1072
1073         if (hw->aq.arq.count == 0) {
1074                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1075                            "AQRX: Admin queue not initialized.\n");
1076                 ret_code = I40E_ERR_QUEUE_EMPTY;
1077                 goto clean_arq_element_err;
1078         }
1079
1080         /* set next_to_use to head */
1081 #ifdef INTEGRATED_VF
1082         if (!i40e_is_vf(hw))
1083                 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1084         else
1085                 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1086 #else
1087 #ifdef PF_DRIVER
1088         ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1089 #endif /* PF_DRIVER */
1090 #ifdef VF_DRIVER
1091         ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1092 #endif /* VF_DRIVER */
1093 #endif /* INTEGRATED_VF */
1094         if (ntu == ntc) {
1095                 /* nothing to do - shouldn't need to update ring's values */
1096                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1097                 goto clean_arq_element_out;
1098         }
1099
1100         /* now clean the next descriptor */
1101         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1102         desc_idx = ntc;
1103
1104         hw->aq.arq_last_status =
1105                 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1106         flags = LE16_TO_CPU(desc->flags);
1107         if (flags & I40E_AQ_FLAG_ERR) {
1108                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1109                 i40e_debug(hw,
1110                            I40E_DEBUG_AQ_MESSAGE,
1111                            "AQRX: Event received with error 0x%X.\n",
1112                            hw->aq.arq_last_status);
1113         }
1114
1115         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1116                     I40E_DMA_TO_NONDMA);
1117         datalen = LE16_TO_CPU(desc->datalen);
1118         e->msg_len = min(datalen, e->buf_len);
1119         if (e->msg_buf != NULL && (e->msg_len != 0))
1120                 i40e_memcpy(e->msg_buf,
1121                             hw->aq.arq.r.arq_bi[desc_idx].va,
1122                             e->msg_len, I40E_DMA_TO_NONDMA);
1123
1124         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1125         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1126                       hw->aq.arq_buf_size);
1127
1128         /* Restore the original datalen and buffer address in the desc,
1129          * FW updates datalen to indicate the event message
1130          * size
1131          */
1132         bi = &hw->aq.arq.r.arq_bi[ntc];
1133         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1134
1135         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1136         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1137                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1138         desc->datalen = CPU_TO_LE16((u16)bi->size);
1139         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1140         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1141
1142         /* set tail = the last cleaned desc index. */
1143         wr32(hw, hw->aq.arq.tail, ntc);
1144         /* ntc is updated to tail + 1 */
1145         ntc++;
1146         if (ntc == hw->aq.num_arq_entries)
1147                 ntc = 0;
1148         hw->aq.arq.next_to_clean = ntc;
1149         hw->aq.arq.next_to_use = ntu;
1150
1151 #ifdef PF_DRIVER
1152         i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1153 #endif /* PF_DRIVER */
1154 clean_arq_element_out:
1155         /* Set pending if needed, unlock and return */
1156         if (pending != NULL)
1157                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1158 clean_arq_element_err:
1159         i40e_release_spinlock(&hw->aq.arq_spinlock);
1160
1161         return ret_code;
1162 }
1163