New upstream version 17.11.5
[deb_dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
39
40 /**
41  *  i40e_adminq_init_regs - Initialize AdminQ registers
42  *  @hw: pointer to the hardware structure
43  *
44  *  This assumes the alloc_asq and alloc_arq functions have already been called
45  **/
46 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
47 {
48         /* set head and tail registers in our local struct */
49         if (i40e_is_vf(hw)) {
50                 hw->aq.asq.tail = I40E_VF_ATQT1;
51                 hw->aq.asq.head = I40E_VF_ATQH1;
52                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
53                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
54                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
55                 hw->aq.arq.tail = I40E_VF_ARQT1;
56                 hw->aq.arq.head = I40E_VF_ARQH1;
57                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
58                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
59                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
60 #ifdef PF_DRIVER
61         } else {
62                 hw->aq.asq.tail = I40E_PF_ATQT;
63                 hw->aq.asq.head = I40E_PF_ATQH;
64                 hw->aq.asq.len  = I40E_PF_ATQLEN;
65                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
66                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
67                 hw->aq.arq.tail = I40E_PF_ARQT;
68                 hw->aq.arq.head = I40E_PF_ARQH;
69                 hw->aq.arq.len  = I40E_PF_ARQLEN;
70                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
71                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
72 #endif
73         }
74 }
75
76 /**
77  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
78  *  @hw: pointer to the hardware structure
79  **/
80 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
81 {
82         enum i40e_status_code ret_code;
83
84         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
85                                          i40e_mem_atq_ring,
86                                          (hw->aq.num_asq_entries *
87                                          sizeof(struct i40e_aq_desc)),
88                                          I40E_ADMINQ_DESC_ALIGNMENT);
89         if (ret_code)
90                 return ret_code;
91
92         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
93                                           (hw->aq.num_asq_entries *
94                                           sizeof(struct i40e_asq_cmd_details)));
95         if (ret_code) {
96                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
97                 return ret_code;
98         }
99
100         return ret_code;
101 }
102
103 /**
104  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
105  *  @hw: pointer to the hardware structure
106  **/
107 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
108 {
109         enum i40e_status_code ret_code;
110
111         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
112                                          i40e_mem_arq_ring,
113                                          (hw->aq.num_arq_entries *
114                                          sizeof(struct i40e_aq_desc)),
115                                          I40E_ADMINQ_DESC_ALIGNMENT);
116
117         return ret_code;
118 }
119
120 /**
121  *  i40e_free_adminq_asq - Free Admin Queue send rings
122  *  @hw: pointer to the hardware structure
123  *
124  *  This assumes the posted send buffers have already been cleaned
125  *  and de-allocated
126  **/
127 void i40e_free_adminq_asq(struct i40e_hw *hw)
128 {
129         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
130         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
131 }
132
133 /**
134  *  i40e_free_adminq_arq - Free Admin Queue receive rings
135  *  @hw: pointer to the hardware structure
136  *
137  *  This assumes the posted receive buffers have already been cleaned
138  *  and de-allocated
139  **/
140 void i40e_free_adminq_arq(struct i40e_hw *hw)
141 {
142         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
143 }
144
145 /**
146  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
147  *  @hw: pointer to the hardware structure
148  **/
149 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
150 {
151         enum i40e_status_code ret_code;
152         struct i40e_aq_desc *desc;
153         struct i40e_dma_mem *bi;
154         int i;
155
156         /* We'll be allocating the buffer info memory first, then we can
157          * allocate the mapped buffers for the event processing
158          */
159
160         /* buffer_info structures do not need alignment */
161         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
162                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
163         if (ret_code)
164                 goto alloc_arq_bufs;
165         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
166
167         /* allocate the mapped buffers */
168         for (i = 0; i < hw->aq.num_arq_entries; i++) {
169                 bi = &hw->aq.arq.r.arq_bi[i];
170                 ret_code = i40e_allocate_dma_mem(hw, bi,
171                                                  i40e_mem_arq_buf,
172                                                  hw->aq.arq_buf_size,
173                                                  I40E_ADMINQ_DESC_ALIGNMENT);
174                 if (ret_code)
175                         goto unwind_alloc_arq_bufs;
176
177                 /* now configure the descriptors for use */
178                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
179
180                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
181                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
182                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
183                 desc->opcode = 0;
184                 /* This is in accordance with Admin queue design, there is no
185                  * register for buffer size configuration
186                  */
187                 desc->datalen = CPU_TO_LE16((u16)bi->size);
188                 desc->retval = 0;
189                 desc->cookie_high = 0;
190                 desc->cookie_low = 0;
191                 desc->params.external.addr_high =
192                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
193                 desc->params.external.addr_low =
194                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
195                 desc->params.external.param0 = 0;
196                 desc->params.external.param1 = 0;
197         }
198
199 alloc_arq_bufs:
200         return ret_code;
201
202 unwind_alloc_arq_bufs:
203         /* don't try to free the one that failed... */
204         i--;
205         for (; i >= 0; i--)
206                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
207         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
208
209         return ret_code;
210 }
211
212 /**
213  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
214  *  @hw: pointer to the hardware structure
215  **/
216 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
217 {
218         enum i40e_status_code ret_code;
219         struct i40e_dma_mem *bi;
220         int i;
221
222         /* No mapped memory needed yet, just the buffer info structures */
223         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
224                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
225         if (ret_code)
226                 goto alloc_asq_bufs;
227         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
228
229         /* allocate the mapped buffers */
230         for (i = 0; i < hw->aq.num_asq_entries; i++) {
231                 bi = &hw->aq.asq.r.asq_bi[i];
232                 ret_code = i40e_allocate_dma_mem(hw, bi,
233                                                  i40e_mem_asq_buf,
234                                                  hw->aq.asq_buf_size,
235                                                  I40E_ADMINQ_DESC_ALIGNMENT);
236                 if (ret_code)
237                         goto unwind_alloc_asq_bufs;
238         }
239 alloc_asq_bufs:
240         return ret_code;
241
242 unwind_alloc_asq_bufs:
243         /* don't try to free the one that failed... */
244         i--;
245         for (; i >= 0; i--)
246                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
247         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
248
249         return ret_code;
250 }
251
252 /**
253  *  i40e_free_arq_bufs - Free receive queue buffer info elements
254  *  @hw: pointer to the hardware structure
255  **/
256 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
257 {
258         int i;
259
260         /* free descriptors */
261         for (i = 0; i < hw->aq.num_arq_entries; i++)
262                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
263
264         /* free the descriptor memory */
265         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
266
267         /* free the dma header */
268         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
269 }
270
271 /**
272  *  i40e_free_asq_bufs - Free send queue buffer info elements
273  *  @hw: pointer to the hardware structure
274  **/
275 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
276 {
277         int i;
278
279         /* only unmap if the address is non-NULL */
280         for (i = 0; i < hw->aq.num_asq_entries; i++)
281                 if (hw->aq.asq.r.asq_bi[i].pa)
282                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
283
284         /* free the buffer info list */
285         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
286
287         /* free the descriptor memory */
288         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
289
290         /* free the dma header */
291         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
292 }
293
294 /**
295  *  i40e_config_asq_regs - configure ASQ registers
296  *  @hw: pointer to the hardware structure
297  *
298  *  Configure base address and length registers for the transmit queue
299  **/
300 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
301 {
302         enum i40e_status_code ret_code = I40E_SUCCESS;
303         u32 reg = 0;
304
305         /* Clear Head and Tail */
306         wr32(hw, hw->aq.asq.head, 0);
307         wr32(hw, hw->aq.asq.tail, 0);
308
309         /* set starting point */
310 #ifdef PF_DRIVER
311 #ifdef INTEGRATED_VF
312         if (!i40e_is_vf(hw))
313                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
314                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
315 #else
316         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
317                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
318 #endif /* INTEGRATED_VF */
319 #endif /* PF_DRIVER */
320 #ifdef VF_DRIVER
321 #ifdef INTEGRATED_VF
322         if (i40e_is_vf(hw))
323                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
324                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
325 #else
326         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
327                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
328 #endif /* INTEGRATED_VF */
329 #endif /* VF_DRIVER */
330         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
331         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
332
333         /* Check one register to verify that config was applied */
334         reg = rd32(hw, hw->aq.asq.bal);
335         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
336                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
337
338         return ret_code;
339 }
340
341 /**
342  *  i40e_config_arq_regs - ARQ register configuration
343  *  @hw: pointer to the hardware structure
344  *
345  * Configure base address and length registers for the receive (event queue)
346  **/
347 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
348 {
349         enum i40e_status_code ret_code = I40E_SUCCESS;
350         u32 reg = 0;
351
352         /* Clear Head and Tail */
353         wr32(hw, hw->aq.arq.head, 0);
354         wr32(hw, hw->aq.arq.tail, 0);
355
356         /* set starting point */
357 #ifdef PF_DRIVER
358 #ifdef INTEGRATED_VF
359         if (!i40e_is_vf(hw))
360                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
361                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
362 #else
363         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
364                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
365 #endif /* INTEGRATED_VF */
366 #endif /* PF_DRIVER */
367 #ifdef VF_DRIVER
368 #ifdef INTEGRATED_VF
369         if (i40e_is_vf(hw))
370                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
371                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
372 #else
373         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
374                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
375 #endif /* INTEGRATED_VF */
376 #endif /* VF_DRIVER */
377         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
378         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
379
380         /* Update tail in the HW to post pre-allocated buffers */
381         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
382
383         /* Check one register to verify that config was applied */
384         reg = rd32(hw, hw->aq.arq.bal);
385         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
386                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
387
388         return ret_code;
389 }
390
391 /**
392  *  i40e_init_asq - main initialization routine for ASQ
393  *  @hw: pointer to the hardware structure
394  *
395  *  This is the main initialization routine for the Admin Send Queue
396  *  Prior to calling this function, drivers *MUST* set the following fields
397  *  in the hw->aq structure:
398  *     - hw->aq.num_asq_entries
399  *     - hw->aq.arq_buf_size
400  *
401  *  Do *NOT* hold the lock when calling this as the memory allocation routines
402  *  called are not going to be atomic context safe
403  **/
404 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
405 {
406         enum i40e_status_code ret_code = I40E_SUCCESS;
407
408         if (hw->aq.asq.count > 0) {
409                 /* queue already initialized */
410                 ret_code = I40E_ERR_NOT_READY;
411                 goto init_adminq_exit;
412         }
413
414         /* verify input for valid configuration */
415         if ((hw->aq.num_asq_entries == 0) ||
416             (hw->aq.asq_buf_size == 0)) {
417                 ret_code = I40E_ERR_CONFIG;
418                 goto init_adminq_exit;
419         }
420
421         hw->aq.asq.next_to_use = 0;
422         hw->aq.asq.next_to_clean = 0;
423
424         /* allocate the ring memory */
425         ret_code = i40e_alloc_adminq_asq_ring(hw);
426         if (ret_code != I40E_SUCCESS)
427                 goto init_adminq_exit;
428
429         /* allocate buffers in the rings */
430         ret_code = i40e_alloc_asq_bufs(hw);
431         if (ret_code != I40E_SUCCESS)
432                 goto init_adminq_free_rings;
433
434         /* initialize base registers */
435         ret_code = i40e_config_asq_regs(hw);
436         if (ret_code != I40E_SUCCESS)
437                 goto init_config_regs;
438
439         /* success! */
440         hw->aq.asq.count = hw->aq.num_asq_entries;
441         goto init_adminq_exit;
442
443 init_adminq_free_rings:
444         i40e_free_adminq_asq(hw);
445         return ret_code;
446
447 init_config_regs:
448         i40e_free_asq_bufs(hw);
449
450 init_adminq_exit:
451         return ret_code;
452 }
453
454 /**
455  *  i40e_init_arq - initialize ARQ
456  *  @hw: pointer to the hardware structure
457  *
458  *  The main initialization routine for the Admin Receive (Event) Queue.
459  *  Prior to calling this function, drivers *MUST* set the following fields
460  *  in the hw->aq structure:
461  *     - hw->aq.num_asq_entries
462  *     - hw->aq.arq_buf_size
463  *
464  *  Do *NOT* hold the lock when calling this as the memory allocation routines
465  *  called are not going to be atomic context safe
466  **/
467 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
468 {
469         enum i40e_status_code ret_code = I40E_SUCCESS;
470
471         if (hw->aq.arq.count > 0) {
472                 /* queue already initialized */
473                 ret_code = I40E_ERR_NOT_READY;
474                 goto init_adminq_exit;
475         }
476
477         /* verify input for valid configuration */
478         if ((hw->aq.num_arq_entries == 0) ||
479             (hw->aq.arq_buf_size == 0)) {
480                 ret_code = I40E_ERR_CONFIG;
481                 goto init_adminq_exit;
482         }
483
484         hw->aq.arq.next_to_use = 0;
485         hw->aq.arq.next_to_clean = 0;
486
487         /* allocate the ring memory */
488         ret_code = i40e_alloc_adminq_arq_ring(hw);
489         if (ret_code != I40E_SUCCESS)
490                 goto init_adminq_exit;
491
492         /* allocate buffers in the rings */
493         ret_code = i40e_alloc_arq_bufs(hw);
494         if (ret_code != I40E_SUCCESS)
495                 goto init_adminq_free_rings;
496
497         /* initialize base registers */
498         ret_code = i40e_config_arq_regs(hw);
499         if (ret_code != I40E_SUCCESS)
500                 goto init_adminq_free_rings;
501
502         /* success! */
503         hw->aq.arq.count = hw->aq.num_arq_entries;
504         goto init_adminq_exit;
505
506 init_adminq_free_rings:
507         i40e_free_adminq_arq(hw);
508
509 init_adminq_exit:
510         return ret_code;
511 }
512
513 /**
514  *  i40e_shutdown_asq - shutdown the ASQ
515  *  @hw: pointer to the hardware structure
516  *
517  *  The main shutdown routine for the Admin Send Queue
518  **/
519 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
520 {
521         enum i40e_status_code ret_code = I40E_SUCCESS;
522
523         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
524
525         if (hw->aq.asq.count == 0) {
526                 ret_code = I40E_ERR_NOT_READY;
527                 goto shutdown_asq_out;
528         }
529
530         /* Stop firmware AdminQ processing */
531         wr32(hw, hw->aq.asq.head, 0);
532         wr32(hw, hw->aq.asq.tail, 0);
533         wr32(hw, hw->aq.asq.len, 0);
534         wr32(hw, hw->aq.asq.bal, 0);
535         wr32(hw, hw->aq.asq.bah, 0);
536
537         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
538
539         /* free ring buffers */
540         i40e_free_asq_bufs(hw);
541
542 shutdown_asq_out:
543         i40e_release_spinlock(&hw->aq.asq_spinlock);
544         return ret_code;
545 }
546
547 /**
548  *  i40e_shutdown_arq - shutdown ARQ
549  *  @hw: pointer to the hardware structure
550  *
551  *  The main shutdown routine for the Admin Receive Queue
552  **/
553 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
554 {
555         enum i40e_status_code ret_code = I40E_SUCCESS;
556
557         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
558
559         if (hw->aq.arq.count == 0) {
560                 ret_code = I40E_ERR_NOT_READY;
561                 goto shutdown_arq_out;
562         }
563
564         /* Stop firmware AdminQ processing */
565         wr32(hw, hw->aq.arq.head, 0);
566         wr32(hw, hw->aq.arq.tail, 0);
567         wr32(hw, hw->aq.arq.len, 0);
568         wr32(hw, hw->aq.arq.bal, 0);
569         wr32(hw, hw->aq.arq.bah, 0);
570
571         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
572
573         /* free ring buffers */
574         i40e_free_arq_bufs(hw);
575
576 shutdown_arq_out:
577         i40e_release_spinlock(&hw->aq.arq_spinlock);
578         return ret_code;
579 }
580 #ifdef PF_DRIVER
581
582 /**
583  *  i40e_resume_aq - resume AQ processing from 0
584  *  @hw: pointer to the hardware structure
585  **/
586 STATIC void i40e_resume_aq(struct i40e_hw *hw)
587 {
588         /* Registers are reset after PF reset */
589         hw->aq.asq.next_to_use = 0;
590         hw->aq.asq.next_to_clean = 0;
591
592         i40e_config_asq_regs(hw);
593
594         hw->aq.arq.next_to_use = 0;
595         hw->aq.arq.next_to_clean = 0;
596
597         i40e_config_arq_regs(hw);
598 }
599 #endif /* PF_DRIVER */
600
601 /**
602  *  i40e_init_adminq - main initialization routine for Admin Queue
603  *  @hw: pointer to the hardware structure
604  *
605  *  Prior to calling this function, drivers *MUST* set the following fields
606  *  in the hw->aq structure:
607  *     - hw->aq.num_asq_entries
608  *     - hw->aq.num_arq_entries
609  *     - hw->aq.arq_buf_size
610  *     - hw->aq.asq_buf_size
611  **/
612 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
613 {
614 #ifdef PF_DRIVER
615         u16 cfg_ptr, oem_hi, oem_lo;
616         u16 eetrack_lo, eetrack_hi;
617 #endif
618         enum i40e_status_code ret_code;
619 #ifdef PF_DRIVER
620         int retry = 0;
621 #endif
622
623         /* verify input for valid configuration */
624         if ((hw->aq.num_arq_entries == 0) ||
625             (hw->aq.num_asq_entries == 0) ||
626             (hw->aq.arq_buf_size == 0) ||
627             (hw->aq.asq_buf_size == 0)) {
628                 ret_code = I40E_ERR_CONFIG;
629                 goto init_adminq_exit;
630         }
631         i40e_init_spinlock(&hw->aq.asq_spinlock);
632         i40e_init_spinlock(&hw->aq.arq_spinlock);
633
634         /* Set up register offsets */
635         i40e_adminq_init_regs(hw);
636
637         /* setup ASQ command write back timeout */
638         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
639
640         /* allocate the ASQ */
641         ret_code = i40e_init_asq(hw);
642         if (ret_code != I40E_SUCCESS)
643                 goto init_adminq_destroy_spinlocks;
644
645         /* allocate the ARQ */
646         ret_code = i40e_init_arq(hw);
647         if (ret_code != I40E_SUCCESS)
648                 goto init_adminq_free_asq;
649
650 #ifdef PF_DRIVER
651 #ifdef INTEGRATED_VF
652         /* VF has no need of firmware */
653         if (i40e_is_vf(hw))
654                 goto init_adminq_exit;
655 #endif
656         /* There are some cases where the firmware may not be quite ready
657          * for AdminQ operations, so we retry the AdminQ setup a few times
658          * if we see timeouts in this first AQ call.
659          */
660         do {
661                 ret_code = i40e_aq_get_firmware_version(hw,
662                                                         &hw->aq.fw_maj_ver,
663                                                         &hw->aq.fw_min_ver,
664                                                         &hw->aq.fw_build,
665                                                         &hw->aq.api_maj_ver,
666                                                         &hw->aq.api_min_ver,
667                                                         NULL);
668                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
669                         break;
670                 retry++;
671                 i40e_msec_delay(100);
672                 i40e_resume_aq(hw);
673         } while (retry < 10);
674         if (ret_code != I40E_SUCCESS)
675                 goto init_adminq_free_arq;
676
677         /* get the NVM version info */
678         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
679                            &hw->nvm.version);
680         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
681         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
682         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
683         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
684         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
685                            &oem_hi);
686         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
687                            &oem_lo);
688         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
689
690         /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
691         if ((hw->aq.api_maj_ver > 1) ||
692             ((hw->aq.api_maj_ver == 1) &&
693              (hw->aq.api_min_ver >= 7)))
694                 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
695
696         if (hw->mac.type ==  I40E_MAC_XL710 &&
697             hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
698             hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
699                 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
700         }
701
702         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
703                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
704                 goto init_adminq_free_arq;
705         }
706
707         /* pre-emptive resource lock release */
708         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
709         hw->nvm_release_on_done = false;
710         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
711
712 #endif /* PF_DRIVER */
713         ret_code = I40E_SUCCESS;
714
715         /* success! */
716         goto init_adminq_exit;
717
718 #ifdef PF_DRIVER
719 init_adminq_free_arq:
720         i40e_shutdown_arq(hw);
721 #endif
722 init_adminq_free_asq:
723         i40e_shutdown_asq(hw);
724 init_adminq_destroy_spinlocks:
725         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
726         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
727
728 init_adminq_exit:
729         return ret_code;
730 }
731
732 /**
733  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
734  *  @hw: pointer to the hardware structure
735  **/
736 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
737 {
738         enum i40e_status_code ret_code = I40E_SUCCESS;
739
740         if (i40e_check_asq_alive(hw))
741                 i40e_aq_queue_shutdown(hw, true);
742
743         i40e_shutdown_asq(hw);
744         i40e_shutdown_arq(hw);
745         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
746         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
747
748         if (hw->nvm_buff.va)
749                 i40e_free_virt_mem(hw, &hw->nvm_buff);
750
751         return ret_code;
752 }
753
754 /**
755  *  i40e_clean_asq - cleans Admin send queue
756  *  @hw: pointer to the hardware structure
757  *
758  *  returns the number of free desc
759  **/
760 u16 i40e_clean_asq(struct i40e_hw *hw)
761 {
762         struct i40e_adminq_ring *asq = &(hw->aq.asq);
763         struct i40e_asq_cmd_details *details;
764         u16 ntc = asq->next_to_clean;
765         struct i40e_aq_desc desc_cb;
766         struct i40e_aq_desc *desc;
767
768         desc = I40E_ADMINQ_DESC(*asq, ntc);
769         details = I40E_ADMINQ_DETAILS(*asq, ntc);
770         while (rd32(hw, hw->aq.asq.head) != ntc) {
771                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
772                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
773
774                 if (details->callback) {
775                         I40E_ADMINQ_CALLBACK cb_func =
776                                         (I40E_ADMINQ_CALLBACK)details->callback;
777                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
778                                     I40E_DMA_TO_DMA);
779                         cb_func(hw, &desc_cb);
780                 }
781                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
782                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
783                 ntc++;
784                 if (ntc == asq->count)
785                         ntc = 0;
786                 desc = I40E_ADMINQ_DESC(*asq, ntc);
787                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
788         }
789
790         asq->next_to_clean = ntc;
791
792         return I40E_DESC_UNUSED(asq);
793 }
794
795 /**
796  *  i40e_asq_done - check if FW has processed the Admin Send Queue
797  *  @hw: pointer to the hw struct
798  *
799  *  Returns true if the firmware has processed all descriptors on the
800  *  admin send queue. Returns false if there are still requests pending.
801  **/
802 #ifdef VF_DRIVER
803 bool i40e_asq_done(struct i40e_hw *hw)
804 #else
805 STATIC bool i40e_asq_done(struct i40e_hw *hw)
806 #endif
807 {
808         /* AQ designers suggest use of head for better
809          * timing reliability than DD bit
810          */
811         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
812
813 }
814
815 /**
816  *  i40e_asq_send_command - send command to Admin Queue
817  *  @hw: pointer to the hw struct
818  *  @desc: prefilled descriptor describing the command (non DMA mem)
819  *  @buff: buffer to use for indirect commands
820  *  @buff_size: size of buffer for indirect commands
821  *  @cmd_details: pointer to command details structure
822  *
823  *  This is the main send command driver routine for the Admin Queue send
824  *  queue.  It runs the queue, cleans the queue, etc
825  **/
826 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
827                                 struct i40e_aq_desc *desc,
828                                 void *buff, /* can be NULL */
829                                 u16  buff_size,
830                                 struct i40e_asq_cmd_details *cmd_details)
831 {
832         enum i40e_status_code status = I40E_SUCCESS;
833         struct i40e_dma_mem *dma_buff = NULL;
834         struct i40e_asq_cmd_details *details;
835         struct i40e_aq_desc *desc_on_ring;
836         bool cmd_completed = false;
837         u16  retval = 0;
838         u32  val = 0;
839
840         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
841
842         hw->aq.asq_last_status = I40E_AQ_RC_OK;
843
844         if (hw->aq.asq.count == 0) {
845                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
846                            "AQTX: Admin queue not initialized.\n");
847                 status = I40E_ERR_QUEUE_EMPTY;
848                 goto asq_send_command_error;
849         }
850
851         val = rd32(hw, hw->aq.asq.head);
852         if (val >= hw->aq.num_asq_entries) {
853                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
854                            "AQTX: head overrun at %d\n", val);
855                 status = I40E_ERR_QUEUE_EMPTY;
856                 goto asq_send_command_error;
857         }
858
859         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
860         if (cmd_details) {
861                 i40e_memcpy(details,
862                             cmd_details,
863                             sizeof(struct i40e_asq_cmd_details),
864                             I40E_NONDMA_TO_NONDMA);
865
866                 /* If the cmd_details are defined copy the cookie.  The
867                  * CPU_TO_LE32 is not needed here because the data is ignored
868                  * by the FW, only used by the driver
869                  */
870                 if (details->cookie) {
871                         desc->cookie_high =
872                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
873                         desc->cookie_low =
874                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
875                 }
876         } else {
877                 i40e_memset(details, 0,
878                             sizeof(struct i40e_asq_cmd_details),
879                             I40E_NONDMA_MEM);
880         }
881
882         /* clear requested flags and then set additional flags if defined */
883         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
884         desc->flags |= CPU_TO_LE16(details->flags_ena);
885
886         if (buff_size > hw->aq.asq_buf_size) {
887                 i40e_debug(hw,
888                            I40E_DEBUG_AQ_MESSAGE,
889                            "AQTX: Invalid buffer size: %d.\n",
890                            buff_size);
891                 status = I40E_ERR_INVALID_SIZE;
892                 goto asq_send_command_error;
893         }
894
895         if (details->postpone && !details->async) {
896                 i40e_debug(hw,
897                            I40E_DEBUG_AQ_MESSAGE,
898                            "AQTX: Async flag not set along with postpone flag");
899                 status = I40E_ERR_PARAM;
900                 goto asq_send_command_error;
901         }
902
903         /* call clean and check queue available function to reclaim the
904          * descriptors that were processed by FW, the function returns the
905          * number of desc available
906          */
907         /* the clean function called here could be called in a separate thread
908          * in case of asynchronous completions
909          */
910         if (i40e_clean_asq(hw) == 0) {
911                 i40e_debug(hw,
912                            I40E_DEBUG_AQ_MESSAGE,
913                            "AQTX: Error queue is full.\n");
914                 status = I40E_ERR_ADMIN_QUEUE_FULL;
915                 goto asq_send_command_error;
916         }
917
918         /* initialize the temp desc pointer with the right desc */
919         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
920
921         /* if the desc is available copy the temp desc to the right place */
922         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
923                     I40E_NONDMA_TO_DMA);
924
925         /* if buff is not NULL assume indirect command */
926         if (buff != NULL) {
927                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
928                 /* copy the user buff into the respective DMA buff */
929                 i40e_memcpy(dma_buff->va, buff, buff_size,
930                             I40E_NONDMA_TO_DMA);
931                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
932
933                 /* Update the address values in the desc with the pa value
934                  * for respective buffer
935                  */
936                 desc_on_ring->params.external.addr_high =
937                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
938                 desc_on_ring->params.external.addr_low =
939                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
940         }
941
942         /* bump the tail */
943         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
944         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
945                       buff, buff_size);
946         (hw->aq.asq.next_to_use)++;
947         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
948                 hw->aq.asq.next_to_use = 0;
949         if (!details->postpone)
950                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
951
952         /* if cmd_details are not defined or async flag is not set,
953          * we need to wait for desc write back
954          */
955         if (!details->async && !details->postpone) {
956                 u32 total_delay = 0;
957
958                 do {
959                         /* AQ designers suggest use of head for better
960                          * timing reliability than DD bit
961                          */
962                         if (i40e_asq_done(hw))
963                                 break;
964                         i40e_usec_delay(50);
965                         total_delay += 50;
966                 } while (total_delay < hw->aq.asq_cmd_timeout);
967         }
968
969         /* if ready, copy the desc back to temp */
970         if (i40e_asq_done(hw)) {
971                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
972                             I40E_DMA_TO_NONDMA);
973                 if (buff != NULL)
974                         i40e_memcpy(buff, dma_buff->va, buff_size,
975                                     I40E_DMA_TO_NONDMA);
976                 retval = LE16_TO_CPU(desc->retval);
977                 if (retval != 0) {
978                         i40e_debug(hw,
979                                    I40E_DEBUG_AQ_MESSAGE,
980                                    "AQTX: Command completed with error 0x%X.\n",
981                                    retval);
982
983                         /* strip off FW internal code */
984                         retval &= 0xff;
985                 }
986                 cmd_completed = true;
987                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
988                         status = I40E_SUCCESS;
989                 else
990                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
991                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
992         }
993
994         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
995                    "AQTX: desc and buffer writeback:\n");
996         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
997
998         /* save writeback aq if requested */
999         if (details->wb_desc)
1000                 i40e_memcpy(details->wb_desc, desc_on_ring,
1001                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
1002
1003         /* update the error if time out occurred */
1004         if ((!cmd_completed) &&
1005             (!details->async && !details->postpone)) {
1006                 i40e_debug(hw,
1007                            I40E_DEBUG_AQ_MESSAGE,
1008                            "AQTX: Writeback timeout.\n");
1009                 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1010         }
1011
1012 asq_send_command_error:
1013         i40e_release_spinlock(&hw->aq.asq_spinlock);
1014         return status;
1015 }
1016
1017 /**
1018  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1019  *  @desc:     pointer to the temp descriptor (non DMA mem)
1020  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1021  *
1022  *  Fill the desc with default values
1023  **/
1024 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1025                                        u16 opcode)
1026 {
1027         /* zero out the desc */
1028         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1029                     I40E_NONDMA_MEM);
1030         desc->opcode = CPU_TO_LE16(opcode);
1031         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1032 }
1033
1034 /**
1035  *  i40e_clean_arq_element
1036  *  @hw: pointer to the hw struct
1037  *  @e: event info from the receive descriptor, includes any buffers
1038  *  @pending: number of events that could be left to process
1039  *
1040  *  This function cleans one Admin Receive Queue element and returns
1041  *  the contents through e.  It can also return how many events are
1042  *  left to process through 'pending'
1043  **/
1044 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1045                                              struct i40e_arq_event_info *e,
1046                                              u16 *pending)
1047 {
1048         enum i40e_status_code ret_code = I40E_SUCCESS;
1049         u16 ntc = hw->aq.arq.next_to_clean;
1050         struct i40e_aq_desc *desc;
1051         struct i40e_dma_mem *bi;
1052         u16 desc_idx;
1053         u16 datalen;
1054         u16 flags;
1055         u16 ntu;
1056
1057         /* pre-clean the event info */
1058         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1059
1060         /* take the lock before we start messing with the ring */
1061         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1062
1063         if (hw->aq.arq.count == 0) {
1064                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1065                            "AQRX: Admin queue not initialized.\n");
1066                 ret_code = I40E_ERR_QUEUE_EMPTY;
1067                 goto clean_arq_element_err;
1068         }
1069
1070         /* set next_to_use to head */
1071 #ifdef PF_DRIVER
1072 #ifdef INTEGRATED_VF
1073         if (!i40e_is_vf(hw))
1074                 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1075 #else
1076         ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1077 #endif /* INTEGRATED_VF */
1078 #endif /* PF_DRIVER */
1079 #ifdef VF_DRIVER
1080 #ifdef INTEGRATED_VF
1081         if (i40e_is_vf(hw))
1082                 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1083 #else
1084         ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1085 #endif /* INTEGRATED_VF */
1086 #endif /* VF_DRIVER */
1087         if (ntu == ntc) {
1088                 /* nothing to do - shouldn't need to update ring's values */
1089                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1090                 goto clean_arq_element_out;
1091         }
1092
1093         /* now clean the next descriptor */
1094         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1095         desc_idx = ntc;
1096
1097         hw->aq.arq_last_status =
1098                 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1099         flags = LE16_TO_CPU(desc->flags);
1100         if (flags & I40E_AQ_FLAG_ERR) {
1101                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1102                 i40e_debug(hw,
1103                            I40E_DEBUG_AQ_MESSAGE,
1104                            "AQRX: Event received with error 0x%X.\n",
1105                            hw->aq.arq_last_status);
1106         }
1107
1108         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1109                     I40E_DMA_TO_NONDMA);
1110         datalen = LE16_TO_CPU(desc->datalen);
1111         e->msg_len = min(datalen, e->buf_len);
1112         if (e->msg_buf != NULL && (e->msg_len != 0))
1113                 i40e_memcpy(e->msg_buf,
1114                             hw->aq.arq.r.arq_bi[desc_idx].va,
1115                             e->msg_len, I40E_DMA_TO_NONDMA);
1116
1117         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1118         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1119                       hw->aq.arq_buf_size);
1120
1121         /* Restore the original datalen and buffer address in the desc,
1122          * FW updates datalen to indicate the event message
1123          * size
1124          */
1125         bi = &hw->aq.arq.r.arq_bi[ntc];
1126         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1127
1128         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1129         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1130                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1131         desc->datalen = CPU_TO_LE16((u16)bi->size);
1132         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1133         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1134
1135         /* set tail = the last cleaned desc index. */
1136         wr32(hw, hw->aq.arq.tail, ntc);
1137         /* ntc is updated to tail + 1 */
1138         ntc++;
1139         if (ntc == hw->aq.num_arq_entries)
1140                 ntc = 0;
1141         hw->aq.arq.next_to_clean = ntc;
1142         hw->aq.arq.next_to_use = ntu;
1143
1144 #ifdef PF_DRIVER
1145         i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
1146 #endif /* PF_DRIVER */
1147 clean_arq_element_out:
1148         /* Set pending if needed, unlock and return */
1149         if (pending != NULL)
1150                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1151 clean_arq_element_err:
1152         i40e_release_spinlock(&hw->aq.arq_spinlock);
1153
1154         return ret_code;
1155 }
1156