Imported Upstream version 16.04
[deb_dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
39
40 #ifdef PF_DRIVER
41 /**
42  * i40e_is_nvm_update_op - return true if this is an NVM update operation
43  * @desc: API request descriptor
44  **/
45 STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
46 {
47         return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48                 desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
49 }
50
51 #endif /* PF_DRIVER */
52 /**
53  *  i40e_adminq_init_regs - Initialize AdminQ registers
54  *  @hw: pointer to the hardware structure
55  *
56  *  This assumes the alloc_asq and alloc_arq functions have already been called
57  **/
58 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
59 {
60         /* set head and tail registers in our local struct */
61         if (i40e_is_vf(hw)) {
62                 hw->aq.asq.tail = I40E_VF_ATQT1;
63                 hw->aq.asq.head = I40E_VF_ATQH1;
64                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
65                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
66                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
67                 hw->aq.arq.tail = I40E_VF_ARQT1;
68                 hw->aq.arq.head = I40E_VF_ARQH1;
69                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
70                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
71                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
72 #ifdef PF_DRIVER
73         } else {
74                 hw->aq.asq.tail = I40E_PF_ATQT;
75                 hw->aq.asq.head = I40E_PF_ATQH;
76                 hw->aq.asq.len  = I40E_PF_ATQLEN;
77                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
78                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
79                 hw->aq.arq.tail = I40E_PF_ARQT;
80                 hw->aq.arq.head = I40E_PF_ARQH;
81                 hw->aq.arq.len  = I40E_PF_ARQLEN;
82                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
83                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
84 #endif
85         }
86 }
87
88 /**
89  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
90  *  @hw: pointer to the hardware structure
91  **/
92 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
93 {
94         enum i40e_status_code ret_code;
95
96         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
97                                          i40e_mem_atq_ring,
98                                          (hw->aq.num_asq_entries *
99                                          sizeof(struct i40e_aq_desc)),
100                                          I40E_ADMINQ_DESC_ALIGNMENT);
101         if (ret_code)
102                 return ret_code;
103
104         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
105                                           (hw->aq.num_asq_entries *
106                                           sizeof(struct i40e_asq_cmd_details)));
107         if (ret_code) {
108                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
109                 return ret_code;
110         }
111
112         return ret_code;
113 }
114
115 /**
116  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
117  *  @hw: pointer to the hardware structure
118  **/
119 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
120 {
121         enum i40e_status_code ret_code;
122
123         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
124                                          i40e_mem_arq_ring,
125                                          (hw->aq.num_arq_entries *
126                                          sizeof(struct i40e_aq_desc)),
127                                          I40E_ADMINQ_DESC_ALIGNMENT);
128
129         return ret_code;
130 }
131
132 /**
133  *  i40e_free_adminq_asq - Free Admin Queue send rings
134  *  @hw: pointer to the hardware structure
135  *
136  *  This assumes the posted send buffers have already been cleaned
137  *  and de-allocated
138  **/
139 void i40e_free_adminq_asq(struct i40e_hw *hw)
140 {
141         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
142 }
143
144 /**
145  *  i40e_free_adminq_arq - Free Admin Queue receive rings
146  *  @hw: pointer to the hardware structure
147  *
148  *  This assumes the posted receive buffers have already been cleaned
149  *  and de-allocated
150  **/
151 void i40e_free_adminq_arq(struct i40e_hw *hw)
152 {
153         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
154 }
155
156 /**
157  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
158  *  @hw: pointer to the hardware structure
159  **/
160 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
161 {
162         enum i40e_status_code ret_code;
163         struct i40e_aq_desc *desc;
164         struct i40e_dma_mem *bi;
165         int i;
166
167         /* We'll be allocating the buffer info memory first, then we can
168          * allocate the mapped buffers for the event processing
169          */
170
171         /* buffer_info structures do not need alignment */
172         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
173                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
174         if (ret_code)
175                 goto alloc_arq_bufs;
176         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
177
178         /* allocate the mapped buffers */
179         for (i = 0; i < hw->aq.num_arq_entries; i++) {
180                 bi = &hw->aq.arq.r.arq_bi[i];
181                 ret_code = i40e_allocate_dma_mem(hw, bi,
182                                                  i40e_mem_arq_buf,
183                                                  hw->aq.arq_buf_size,
184                                                  I40E_ADMINQ_DESC_ALIGNMENT);
185                 if (ret_code)
186                         goto unwind_alloc_arq_bufs;
187
188                 /* now configure the descriptors for use */
189                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
190
191                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
192                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
193                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
194                 desc->opcode = 0;
195                 /* This is in accordance with Admin queue design, there is no
196                  * register for buffer size configuration
197                  */
198                 desc->datalen = CPU_TO_LE16((u16)bi->size);
199                 desc->retval = 0;
200                 desc->cookie_high = 0;
201                 desc->cookie_low = 0;
202                 desc->params.external.addr_high =
203                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
204                 desc->params.external.addr_low =
205                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
206                 desc->params.external.param0 = 0;
207                 desc->params.external.param1 = 0;
208         }
209
210 alloc_arq_bufs:
211         return ret_code;
212
213 unwind_alloc_arq_bufs:
214         /* don't try to free the one that failed... */
215         i--;
216         for (; i >= 0; i--)
217                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
218         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
219
220         return ret_code;
221 }
222
223 /**
224  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
225  *  @hw: pointer to the hardware structure
226  **/
227 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
228 {
229         enum i40e_status_code ret_code;
230         struct i40e_dma_mem *bi;
231         int i;
232
233         /* No mapped memory needed yet, just the buffer info structures */
234         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
235                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
236         if (ret_code)
237                 goto alloc_asq_bufs;
238         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
239
240         /* allocate the mapped buffers */
241         for (i = 0; i < hw->aq.num_asq_entries; i++) {
242                 bi = &hw->aq.asq.r.asq_bi[i];
243                 ret_code = i40e_allocate_dma_mem(hw, bi,
244                                                  i40e_mem_asq_buf,
245                                                  hw->aq.asq_buf_size,
246                                                  I40E_ADMINQ_DESC_ALIGNMENT);
247                 if (ret_code)
248                         goto unwind_alloc_asq_bufs;
249         }
250 alloc_asq_bufs:
251         return ret_code;
252
253 unwind_alloc_asq_bufs:
254         /* don't try to free the one that failed... */
255         i--;
256         for (; i >= 0; i--)
257                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
258         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
259
260         return ret_code;
261 }
262
263 /**
264  *  i40e_free_arq_bufs - Free receive queue buffer info elements
265  *  @hw: pointer to the hardware structure
266  **/
267 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
268 {
269         int i;
270
271         /* free descriptors */
272         for (i = 0; i < hw->aq.num_arq_entries; i++)
273                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
274
275         /* free the descriptor memory */
276         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
277
278         /* free the dma header */
279         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
280 }
281
282 /**
283  *  i40e_free_asq_bufs - Free send queue buffer info elements
284  *  @hw: pointer to the hardware structure
285  **/
286 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
287 {
288         int i;
289
290         /* only unmap if the address is non-NULL */
291         for (i = 0; i < hw->aq.num_asq_entries; i++)
292                 if (hw->aq.asq.r.asq_bi[i].pa)
293                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
294
295         /* free the buffer info list */
296         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
297
298         /* free the descriptor memory */
299         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
300
301         /* free the dma header */
302         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
303 }
304
305 /**
306  *  i40e_config_asq_regs - configure ASQ registers
307  *  @hw: pointer to the hardware structure
308  *
309  *  Configure base address and length registers for the transmit queue
310  **/
311 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
312 {
313         enum i40e_status_code ret_code = I40E_SUCCESS;
314         u32 reg = 0;
315
316         /* Clear Head and Tail */
317         wr32(hw, hw->aq.asq.head, 0);
318         wr32(hw, hw->aq.asq.tail, 0);
319
320         /* set starting point */
321 #ifdef PF_DRIVER
322 #ifdef INTEGRATED_VF
323         if (!i40e_is_vf(hw))
324                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
325                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
326 #else
327         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
328                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
329 #endif /* INTEGRATED_VF */
330 #endif /* PF_DRIVER */
331 #ifdef VF_DRIVER
332 #ifdef INTEGRATED_VF
333         if (i40e_is_vf(hw))
334                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
335                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
336 #else
337         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
338                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
339 #endif /* INTEGRATED_VF */
340 #endif /* VF_DRIVER */
341         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
342         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
343
344         /* Check one register to verify that config was applied */
345         reg = rd32(hw, hw->aq.asq.bal);
346         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
347                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
348
349         return ret_code;
350 }
351
352 /**
353  *  i40e_config_arq_regs - ARQ register configuration
354  *  @hw: pointer to the hardware structure
355  *
356  * Configure base address and length registers for the receive (event queue)
357  **/
358 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
359 {
360         enum i40e_status_code ret_code = I40E_SUCCESS;
361         u32 reg = 0;
362
363         /* Clear Head and Tail */
364         wr32(hw, hw->aq.arq.head, 0);
365         wr32(hw, hw->aq.arq.tail, 0);
366
367         /* set starting point */
368 #ifdef PF_DRIVER
369 #ifdef INTEGRATED_VF
370         if (!i40e_is_vf(hw))
371                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
372                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
373 #else
374         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
375                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
376 #endif /* INTEGRATED_VF */
377 #endif /* PF_DRIVER */
378 #ifdef VF_DRIVER
379 #ifdef INTEGRATED_VF
380         if (i40e_is_vf(hw))
381                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
382                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
383 #else
384         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
385                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
386 #endif /* INTEGRATED_VF */
387 #endif /* VF_DRIVER */
388         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
389         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
390
391         /* Update tail in the HW to post pre-allocated buffers */
392         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
393
394         /* Check one register to verify that config was applied */
395         reg = rd32(hw, hw->aq.arq.bal);
396         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
397                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
398
399         return ret_code;
400 }
401
402 /**
403  *  i40e_init_asq - main initialization routine for ASQ
404  *  @hw: pointer to the hardware structure
405  *
406  *  This is the main initialization routine for the Admin Send Queue
407  *  Prior to calling this function, drivers *MUST* set the following fields
408  *  in the hw->aq structure:
409  *     - hw->aq.num_asq_entries
410  *     - hw->aq.arq_buf_size
411  *
412  *  Do *NOT* hold the lock when calling this as the memory allocation routines
413  *  called are not going to be atomic context safe
414  **/
415 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
416 {
417         enum i40e_status_code ret_code = I40E_SUCCESS;
418
419         if (hw->aq.asq.count > 0) {
420                 /* queue already initialized */
421                 ret_code = I40E_ERR_NOT_READY;
422                 goto init_adminq_exit;
423         }
424
425         /* verify input for valid configuration */
426         if ((hw->aq.num_asq_entries == 0) ||
427             (hw->aq.asq_buf_size == 0)) {
428                 ret_code = I40E_ERR_CONFIG;
429                 goto init_adminq_exit;
430         }
431
432         hw->aq.asq.next_to_use = 0;
433         hw->aq.asq.next_to_clean = 0;
434
435         /* allocate the ring memory */
436         ret_code = i40e_alloc_adminq_asq_ring(hw);
437         if (ret_code != I40E_SUCCESS)
438                 goto init_adminq_exit;
439
440         /* allocate buffers in the rings */
441         ret_code = i40e_alloc_asq_bufs(hw);
442         if (ret_code != I40E_SUCCESS)
443                 goto init_adminq_free_rings;
444
445         /* initialize base registers */
446         ret_code = i40e_config_asq_regs(hw);
447         if (ret_code != I40E_SUCCESS)
448                 goto init_adminq_free_rings;
449
450         /* success! */
451         hw->aq.asq.count = hw->aq.num_asq_entries;
452         goto init_adminq_exit;
453
454 init_adminq_free_rings:
455         i40e_free_adminq_asq(hw);
456
457 init_adminq_exit:
458         return ret_code;
459 }
460
461 /**
462  *  i40e_init_arq - initialize ARQ
463  *  @hw: pointer to the hardware structure
464  *
465  *  The main initialization routine for the Admin Receive (Event) Queue.
466  *  Prior to calling this function, drivers *MUST* set the following fields
467  *  in the hw->aq structure:
468  *     - hw->aq.num_asq_entries
469  *     - hw->aq.arq_buf_size
470  *
471  *  Do *NOT* hold the lock when calling this as the memory allocation routines
472  *  called are not going to be atomic context safe
473  **/
474 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
475 {
476         enum i40e_status_code ret_code = I40E_SUCCESS;
477
478         if (hw->aq.arq.count > 0) {
479                 /* queue already initialized */
480                 ret_code = I40E_ERR_NOT_READY;
481                 goto init_adminq_exit;
482         }
483
484         /* verify input for valid configuration */
485         if ((hw->aq.num_arq_entries == 0) ||
486             (hw->aq.arq_buf_size == 0)) {
487                 ret_code = I40E_ERR_CONFIG;
488                 goto init_adminq_exit;
489         }
490
491         hw->aq.arq.next_to_use = 0;
492         hw->aq.arq.next_to_clean = 0;
493
494         /* allocate the ring memory */
495         ret_code = i40e_alloc_adminq_arq_ring(hw);
496         if (ret_code != I40E_SUCCESS)
497                 goto init_adminq_exit;
498
499         /* allocate buffers in the rings */
500         ret_code = i40e_alloc_arq_bufs(hw);
501         if (ret_code != I40E_SUCCESS)
502                 goto init_adminq_free_rings;
503
504         /* initialize base registers */
505         ret_code = i40e_config_arq_regs(hw);
506         if (ret_code != I40E_SUCCESS)
507                 goto init_adminq_free_rings;
508
509         /* success! */
510         hw->aq.arq.count = hw->aq.num_arq_entries;
511         goto init_adminq_exit;
512
513 init_adminq_free_rings:
514         i40e_free_adminq_arq(hw);
515
516 init_adminq_exit:
517         return ret_code;
518 }
519
520 /**
521  *  i40e_shutdown_asq - shutdown the ASQ
522  *  @hw: pointer to the hardware structure
523  *
524  *  The main shutdown routine for the Admin Send Queue
525  **/
526 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
527 {
528         enum i40e_status_code ret_code = I40E_SUCCESS;
529
530         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
531
532         if (hw->aq.asq.count == 0) {
533                 ret_code = I40E_ERR_NOT_READY;
534                 goto shutdown_asq_out;
535         }
536
537         /* Stop firmware AdminQ processing */
538         wr32(hw, hw->aq.asq.head, 0);
539         wr32(hw, hw->aq.asq.tail, 0);
540         wr32(hw, hw->aq.asq.len, 0);
541         wr32(hw, hw->aq.asq.bal, 0);
542         wr32(hw, hw->aq.asq.bah, 0);
543
544         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
545
546         /* free ring buffers */
547         i40e_free_asq_bufs(hw);
548
549 shutdown_asq_out:
550         i40e_release_spinlock(&hw->aq.asq_spinlock);
551         return ret_code;
552 }
553
554 /**
555  *  i40e_shutdown_arq - shutdown ARQ
556  *  @hw: pointer to the hardware structure
557  *
558  *  The main shutdown routine for the Admin Receive Queue
559  **/
560 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
561 {
562         enum i40e_status_code ret_code = I40E_SUCCESS;
563
564         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
565
566         if (hw->aq.arq.count == 0) {
567                 ret_code = I40E_ERR_NOT_READY;
568                 goto shutdown_arq_out;
569         }
570
571         /* Stop firmware AdminQ processing */
572         wr32(hw, hw->aq.arq.head, 0);
573         wr32(hw, hw->aq.arq.tail, 0);
574         wr32(hw, hw->aq.arq.len, 0);
575         wr32(hw, hw->aq.arq.bal, 0);
576         wr32(hw, hw->aq.arq.bah, 0);
577
578         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
579
580         /* free ring buffers */
581         i40e_free_arq_bufs(hw);
582
583 shutdown_arq_out:
584         i40e_release_spinlock(&hw->aq.arq_spinlock);
585         return ret_code;
586 }
587
588 /**
589  *  i40e_init_adminq - main initialization routine for Admin Queue
590  *  @hw: pointer to the hardware structure
591  *
592  *  Prior to calling this function, drivers *MUST* set the following fields
593  *  in the hw->aq structure:
594  *     - hw->aq.num_asq_entries
595  *     - hw->aq.num_arq_entries
596  *     - hw->aq.arq_buf_size
597  *     - hw->aq.asq_buf_size
598  **/
599 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
600 {
601         enum i40e_status_code ret_code;
602 #ifdef PF_DRIVER
603         u16 eetrack_lo, eetrack_hi;
604         u16 cfg_ptr, oem_hi, oem_lo;
605         int retry = 0;
606 #endif
607         /* verify input for valid configuration */
608         if ((hw->aq.num_arq_entries == 0) ||
609             (hw->aq.num_asq_entries == 0) ||
610             (hw->aq.arq_buf_size == 0) ||
611             (hw->aq.asq_buf_size == 0)) {
612                 ret_code = I40E_ERR_CONFIG;
613                 goto init_adminq_exit;
614         }
615
616         /* initialize spin locks */
617         i40e_init_spinlock(&hw->aq.asq_spinlock);
618         i40e_init_spinlock(&hw->aq.arq_spinlock);
619
620         /* Set up register offsets */
621         i40e_adminq_init_regs(hw);
622
623         /* setup ASQ command write back timeout */
624         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
625
626         /* allocate the ASQ */
627         ret_code = i40e_init_asq(hw);
628         if (ret_code != I40E_SUCCESS)
629                 goto init_adminq_destroy_spinlocks;
630
631         /* allocate the ARQ */
632         ret_code = i40e_init_arq(hw);
633         if (ret_code != I40E_SUCCESS)
634                 goto init_adminq_free_asq;
635
636 #ifdef PF_DRIVER
637 #ifdef INTEGRATED_VF
638         /* VF has no need of firmware */
639         if (i40e_is_vf(hw))
640                 goto init_adminq_exit;
641 #endif
642         /* There are some cases where the firmware may not be quite ready
643          * for AdminQ operations, so we retry the AdminQ setup a few times
644          * if we see timeouts in this first AQ call.
645          */
646         do {
647                 ret_code = i40e_aq_get_firmware_version(hw,
648                                                         &hw->aq.fw_maj_ver,
649                                                         &hw->aq.fw_min_ver,
650                                                         &hw->aq.fw_build,
651                                                         &hw->aq.api_maj_ver,
652                                                         &hw->aq.api_min_ver,
653                                                         NULL);
654                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
655                         break;
656                 retry++;
657                 i40e_msec_delay(100);
658                 i40e_resume_aq(hw);
659         } while (retry < 10);
660         if (ret_code != I40E_SUCCESS)
661                 goto init_adminq_free_arq;
662
663         /* get the NVM version info */
664         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
665                            &hw->nvm.version);
666         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
667         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
668         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
669         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
670         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
671                            &oem_hi);
672         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
673                            &oem_lo);
674         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
675
676         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
677                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
678                 goto init_adminq_free_arq;
679         }
680
681         /* pre-emptive resource lock release */
682         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
683         hw->aq.nvm_release_on_done = false;
684         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
685
686         ret_code = i40e_aq_set_hmc_resource_profile(hw,
687                                                     I40E_HMC_PROFILE_DEFAULT,
688                                                     0,
689                                                     NULL);
690 #endif /* PF_DRIVER */
691         ret_code = I40E_SUCCESS;
692
693         /* success! */
694         goto init_adminq_exit;
695
696 #ifdef PF_DRIVER
697 init_adminq_free_arq:
698         i40e_shutdown_arq(hw);
699 #endif
700 init_adminq_free_asq:
701         i40e_shutdown_asq(hw);
702 init_adminq_destroy_spinlocks:
703         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
704         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
705
706 init_adminq_exit:
707         return ret_code;
708 }
709
710 /**
711  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
712  *  @hw: pointer to the hardware structure
713  **/
714 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
715 {
716         enum i40e_status_code ret_code = I40E_SUCCESS;
717
718         if (i40e_check_asq_alive(hw))
719                 i40e_aq_queue_shutdown(hw, true);
720
721         i40e_shutdown_asq(hw);
722         i40e_shutdown_arq(hw);
723
724         /* destroy the spinlocks */
725         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
726         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
727
728         if (hw->nvm_buff.va)
729                 i40e_free_virt_mem(hw, &hw->nvm_buff);
730
731         return ret_code;
732 }
733
734 /**
735  *  i40e_clean_asq - cleans Admin send queue
736  *  @hw: pointer to the hardware structure
737  *
738  *  returns the number of free desc
739  **/
740 u16 i40e_clean_asq(struct i40e_hw *hw)
741 {
742         struct i40e_adminq_ring *asq = &(hw->aq.asq);
743         struct i40e_asq_cmd_details *details;
744         u16 ntc = asq->next_to_clean;
745         struct i40e_aq_desc desc_cb;
746         struct i40e_aq_desc *desc;
747
748         desc = I40E_ADMINQ_DESC(*asq, ntc);
749         details = I40E_ADMINQ_DETAILS(*asq, ntc);
750
751         while (rd32(hw, hw->aq.asq.head) != ntc) {
752                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
753                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
754
755                 if (details->callback) {
756                         I40E_ADMINQ_CALLBACK cb_func =
757                                         (I40E_ADMINQ_CALLBACK)details->callback;
758                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
759                                     I40E_DMA_TO_DMA);
760                         cb_func(hw, &desc_cb);
761                 }
762                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
763                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
764                 ntc++;
765                 if (ntc == asq->count)
766                         ntc = 0;
767                 desc = I40E_ADMINQ_DESC(*asq, ntc);
768                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
769         }
770
771         asq->next_to_clean = ntc;
772
773         return I40E_DESC_UNUSED(asq);
774 }
775
776 /**
777  *  i40e_asq_done - check if FW has processed the Admin Send Queue
778  *  @hw: pointer to the hw struct
779  *
780  *  Returns true if the firmware has processed all descriptors on the
781  *  admin send queue. Returns false if there are still requests pending.
782  **/
783 bool i40e_asq_done(struct i40e_hw *hw)
784 {
785         /* AQ designers suggest use of head for better
786          * timing reliability than DD bit
787          */
788         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
789
790 }
791
792 /**
793  *  i40e_asq_send_command - send command to Admin Queue
794  *  @hw: pointer to the hw struct
795  *  @desc: prefilled descriptor describing the command (non DMA mem)
796  *  @buff: buffer to use for indirect commands
797  *  @buff_size: size of buffer for indirect commands
798  *  @cmd_details: pointer to command details structure
799  *
800  *  This is the main send command driver routine for the Admin Queue send
801  *  queue.  It runs the queue, cleans the queue, etc
802  **/
803 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
804                                 struct i40e_aq_desc *desc,
805                                 void *buff, /* can be NULL */
806                                 u16  buff_size,
807                                 struct i40e_asq_cmd_details *cmd_details)
808 {
809         enum i40e_status_code status = I40E_SUCCESS;
810         struct i40e_dma_mem *dma_buff = NULL;
811         struct i40e_asq_cmd_details *details;
812         struct i40e_aq_desc *desc_on_ring;
813         bool cmd_completed = false;
814         u16  retval = 0;
815         u32  val = 0;
816
817         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
818
819         hw->aq.asq_last_status = I40E_AQ_RC_OK;
820
821         if (hw->aq.asq.count == 0) {
822                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
823                            "AQTX: Admin queue not initialized.\n");
824                 status = I40E_ERR_QUEUE_EMPTY;
825                 goto asq_send_command_error;
826         }
827
828         val = rd32(hw, hw->aq.asq.head);
829         if (val >= hw->aq.num_asq_entries) {
830                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
831                            "AQTX: head overrun at %d\n", val);
832                 status = I40E_ERR_QUEUE_EMPTY;
833                 goto asq_send_command_error;
834         }
835
836         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
837         if (cmd_details) {
838                 i40e_memcpy(details,
839                             cmd_details,
840                             sizeof(struct i40e_asq_cmd_details),
841                             I40E_NONDMA_TO_NONDMA);
842
843                 /* If the cmd_details are defined copy the cookie.  The
844                  * CPU_TO_LE32 is not needed here because the data is ignored
845                  * by the FW, only used by the driver
846                  */
847                 if (details->cookie) {
848                         desc->cookie_high =
849                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
850                         desc->cookie_low =
851                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
852                 }
853         } else {
854                 i40e_memset(details, 0,
855                             sizeof(struct i40e_asq_cmd_details),
856                             I40E_NONDMA_MEM);
857         }
858
859         /* clear requested flags and then set additional flags if defined */
860         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
861         desc->flags |= CPU_TO_LE16(details->flags_ena);
862
863         if (buff_size > hw->aq.asq_buf_size) {
864                 i40e_debug(hw,
865                            I40E_DEBUG_AQ_MESSAGE,
866                            "AQTX: Invalid buffer size: %d.\n",
867                            buff_size);
868                 status = I40E_ERR_INVALID_SIZE;
869                 goto asq_send_command_error;
870         }
871
872         if (details->postpone && !details->async) {
873                 i40e_debug(hw,
874                            I40E_DEBUG_AQ_MESSAGE,
875                            "AQTX: Async flag not set along with postpone flag");
876                 status = I40E_ERR_PARAM;
877                 goto asq_send_command_error;
878         }
879
880         /* call clean and check queue available function to reclaim the
881          * descriptors that were processed by FW, the function returns the
882          * number of desc available
883          */
884         /* the clean function called here could be called in a separate thread
885          * in case of asynchronous completions
886          */
887         if (i40e_clean_asq(hw) == 0) {
888                 i40e_debug(hw,
889                            I40E_DEBUG_AQ_MESSAGE,
890                            "AQTX: Error queue is full.\n");
891                 status = I40E_ERR_ADMIN_QUEUE_FULL;
892                 goto asq_send_command_error;
893         }
894
895         /* initialize the temp desc pointer with the right desc */
896         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
897
898         /* if the desc is available copy the temp desc to the right place */
899         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
900                     I40E_NONDMA_TO_DMA);
901
902         /* if buff is not NULL assume indirect command */
903         if (buff != NULL) {
904                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
905                 /* copy the user buff into the respective DMA buff */
906                 i40e_memcpy(dma_buff->va, buff, buff_size,
907                             I40E_NONDMA_TO_DMA);
908                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
909
910                 /* Update the address values in the desc with the pa value
911                  * for respective buffer
912                  */
913                 desc_on_ring->params.external.addr_high =
914                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
915                 desc_on_ring->params.external.addr_low =
916                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
917         }
918
919         /* bump the tail */
920         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
921         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
922                       buff, buff_size);
923         (hw->aq.asq.next_to_use)++;
924         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
925                 hw->aq.asq.next_to_use = 0;
926         if (!details->postpone)
927                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
928
929         /* if cmd_details are not defined or async flag is not set,
930          * we need to wait for desc write back
931          */
932         if (!details->async && !details->postpone) {
933                 u32 total_delay = 0;
934
935                 do {
936                         /* AQ designers suggest use of head for better
937                          * timing reliability than DD bit
938                          */
939                         if (i40e_asq_done(hw))
940                                 break;
941                         /* ugh! delay while spin_lock */
942                         i40e_msec_delay(1);
943                         total_delay++;
944                 } while (total_delay < hw->aq.asq_cmd_timeout);
945         }
946
947         /* if ready, copy the desc back to temp */
948         if (i40e_asq_done(hw)) {
949                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
950                             I40E_DMA_TO_NONDMA);
951                 if (buff != NULL)
952                         i40e_memcpy(buff, dma_buff->va, buff_size,
953                                     I40E_DMA_TO_NONDMA);
954                 retval = LE16_TO_CPU(desc->retval);
955                 if (retval != 0) {
956                         i40e_debug(hw,
957                                    I40E_DEBUG_AQ_MESSAGE,
958                                    "AQTX: Command completed with error 0x%X.\n",
959                                    retval);
960
961                         /* strip off FW internal code */
962                         retval &= 0xff;
963                 }
964                 cmd_completed = true;
965                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
966                         status = I40E_SUCCESS;
967                 else
968                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
969                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
970         }
971
972         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
973                    "AQTX: desc and buffer writeback:\n");
974         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
975
976         /* save writeback aq if requested */
977         if (details->wb_desc)
978                 i40e_memcpy(details->wb_desc, desc_on_ring,
979                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
980
981         /* update the error if time out occurred */
982         if ((!cmd_completed) &&
983             (!details->async && !details->postpone)) {
984                 i40e_debug(hw,
985                            I40E_DEBUG_AQ_MESSAGE,
986                            "AQTX: Writeback timeout.\n");
987                 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
988         }
989
990 asq_send_command_error:
991         i40e_release_spinlock(&hw->aq.asq_spinlock);
992         return status;
993 }
994
995 /**
996  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
997  *  @desc:     pointer to the temp descriptor (non DMA mem)
998  *  @opcode:   the opcode can be used to decide which flags to turn off or on
999  *
1000  *  Fill the desc with default values
1001  **/
1002 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1003                                        u16 opcode)
1004 {
1005         /* zero out the desc */
1006         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1007                     I40E_NONDMA_MEM);
1008         desc->opcode = CPU_TO_LE16(opcode);
1009         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1010 }
1011
1012 /**
1013  *  i40e_clean_arq_element
1014  *  @hw: pointer to the hw struct
1015  *  @e: event info from the receive descriptor, includes any buffers
1016  *  @pending: number of events that could be left to process
1017  *
1018  *  This function cleans one Admin Receive Queue element and returns
1019  *  the contents through e.  It can also return how many events are
1020  *  left to process through 'pending'
1021  **/
1022 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1023                                              struct i40e_arq_event_info *e,
1024                                              u16 *pending)
1025 {
1026         enum i40e_status_code ret_code = I40E_SUCCESS;
1027         u16 ntc = hw->aq.arq.next_to_clean;
1028         struct i40e_aq_desc *desc;
1029         struct i40e_dma_mem *bi;
1030         u16 desc_idx;
1031         u16 datalen;
1032         u16 flags;
1033         u16 ntu;
1034
1035         /* pre-clean the event info */
1036         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1037
1038         /* take the lock before we start messing with the ring */
1039         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1040
1041         if (hw->aq.arq.count == 0) {
1042                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1043                            "AQRX: Admin queue not initialized.\n");
1044                 ret_code = I40E_ERR_QUEUE_EMPTY;
1045                 goto clean_arq_element_err;
1046         }
1047
1048         /* set next_to_use to head */
1049 #ifdef PF_DRIVER
1050 #ifdef INTEGRATED_VF
1051         if (!i40e_is_vf(hw))
1052                 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1053 #else
1054         ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1055 #endif /* INTEGRATED_VF */
1056 #endif /* PF_DRIVER */
1057 #ifdef VF_DRIVER
1058 #ifdef INTEGRATED_VF
1059         if (i40e_is_vf(hw))
1060                 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1061 #else
1062         ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1063 #endif /* INTEGRATED_VF */
1064 #endif /* VF_DRIVER */
1065         if (ntu == ntc) {
1066                 /* nothing to do - shouldn't need to update ring's values */
1067                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1068                 goto clean_arq_element_out;
1069         }
1070
1071         /* now clean the next descriptor */
1072         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1073         desc_idx = ntc;
1074
1075         flags = LE16_TO_CPU(desc->flags);
1076         if (flags & I40E_AQ_FLAG_ERR) {
1077                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1078                 hw->aq.arq_last_status =
1079                         (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1080                 i40e_debug(hw,
1081                            I40E_DEBUG_AQ_MESSAGE,
1082                            "AQRX: Event received with error 0x%X.\n",
1083                            hw->aq.arq_last_status);
1084         }
1085
1086         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1087                     I40E_DMA_TO_NONDMA);
1088         datalen = LE16_TO_CPU(desc->datalen);
1089         e->msg_len = min(datalen, e->buf_len);
1090         if (e->msg_buf != NULL && (e->msg_len != 0))
1091                 i40e_memcpy(e->msg_buf,
1092                             hw->aq.arq.r.arq_bi[desc_idx].va,
1093                             e->msg_len, I40E_DMA_TO_NONDMA);
1094
1095         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1096         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1097                       hw->aq.arq_buf_size);
1098
1099         /* Restore the original datalen and buffer address in the desc,
1100          * FW updates datalen to indicate the event message
1101          * size
1102          */
1103         bi = &hw->aq.arq.r.arq_bi[ntc];
1104         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1105
1106         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1107         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1108                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1109         desc->datalen = CPU_TO_LE16((u16)bi->size);
1110         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1111         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1112
1113         /* set tail = the last cleaned desc index. */
1114         wr32(hw, hw->aq.arq.tail, ntc);
1115         /* ntc is updated to tail + 1 */
1116         ntc++;
1117         if (ntc == hw->aq.num_arq_entries)
1118                 ntc = 0;
1119         hw->aq.arq.next_to_clean = ntc;
1120         hw->aq.arq.next_to_use = ntu;
1121
1122 #ifdef PF_DRIVER
1123         if (i40e_is_nvm_update_op(&e->desc)) {
1124                 if (hw->aq.nvm_release_on_done) {
1125                         i40e_release_nvm(hw);
1126                         hw->aq.nvm_release_on_done = false;
1127                 }
1128
1129                 switch (hw->nvmupd_state) {
1130                 case I40E_NVMUPD_STATE_INIT_WAIT:
1131                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1132                         break;
1133
1134                 case I40E_NVMUPD_STATE_WRITE_WAIT:
1135                         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1136                         break;
1137
1138                 default:
1139                         break;
1140                 }
1141         }
1142
1143 #endif
1144 clean_arq_element_out:
1145         /* Set pending if needed, unlock and return */
1146         if (pending != NULL)
1147                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1148 clean_arq_element_err:
1149         i40e_release_spinlock(&hw->aq.arq_spinlock);
1150
1151         return ret_code;
1152 }
1153
1154 void i40e_resume_aq(struct i40e_hw *hw)
1155 {
1156         /* Registers are reset after PF reset */
1157         hw->aq.asq.next_to_use = 0;
1158         hw->aq.asq.next_to_clean = 0;
1159
1160         i40e_config_asq_regs(hw);
1161
1162         hw->aq.arq.next_to_use = 0;
1163         hw->aq.arq.next_to_clean = 0;
1164
1165         i40e_config_arq_regs(hw);
1166 }