New upstream version 16.11.5
[deb_dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
39
40 /**
41  *  i40e_adminq_init_regs - Initialize AdminQ registers
42  *  @hw: pointer to the hardware structure
43  *
44  *  This assumes the alloc_asq and alloc_arq functions have already been called
45  **/
46 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
47 {
48         /* set head and tail registers in our local struct */
49         if (i40e_is_vf(hw)) {
50                 hw->aq.asq.tail = I40E_VF_ATQT1;
51                 hw->aq.asq.head = I40E_VF_ATQH1;
52                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
53                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
54                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
55                 hw->aq.arq.tail = I40E_VF_ARQT1;
56                 hw->aq.arq.head = I40E_VF_ARQH1;
57                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
58                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
59                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
60 #ifdef PF_DRIVER
61         } else {
62                 hw->aq.asq.tail = I40E_PF_ATQT;
63                 hw->aq.asq.head = I40E_PF_ATQH;
64                 hw->aq.asq.len  = I40E_PF_ATQLEN;
65                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
66                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
67                 hw->aq.arq.tail = I40E_PF_ARQT;
68                 hw->aq.arq.head = I40E_PF_ARQH;
69                 hw->aq.arq.len  = I40E_PF_ARQLEN;
70                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
71                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
72 #endif
73         }
74 }
75
76 /**
77  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
78  *  @hw: pointer to the hardware structure
79  **/
80 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
81 {
82         enum i40e_status_code ret_code;
83
84         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
85                                          i40e_mem_atq_ring,
86                                          (hw->aq.num_asq_entries *
87                                          sizeof(struct i40e_aq_desc)),
88                                          I40E_ADMINQ_DESC_ALIGNMENT);
89         if (ret_code)
90                 return ret_code;
91
92         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
93                                           (hw->aq.num_asq_entries *
94                                           sizeof(struct i40e_asq_cmd_details)));
95         if (ret_code) {
96                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
97                 return ret_code;
98         }
99
100         return ret_code;
101 }
102
103 /**
104  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
105  *  @hw: pointer to the hardware structure
106  **/
107 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
108 {
109         enum i40e_status_code ret_code;
110
111         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
112                                          i40e_mem_arq_ring,
113                                          (hw->aq.num_arq_entries *
114                                          sizeof(struct i40e_aq_desc)),
115                                          I40E_ADMINQ_DESC_ALIGNMENT);
116
117         return ret_code;
118 }
119
120 /**
121  *  i40e_free_adminq_asq - Free Admin Queue send rings
122  *  @hw: pointer to the hardware structure
123  *
124  *  This assumes the posted send buffers have already been cleaned
125  *  and de-allocated
126  **/
127 void i40e_free_adminq_asq(struct i40e_hw *hw)
128 {
129         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
130 }
131
132 /**
133  *  i40e_free_adminq_arq - Free Admin Queue receive rings
134  *  @hw: pointer to the hardware structure
135  *
136  *  This assumes the posted receive buffers have already been cleaned
137  *  and de-allocated
138  **/
139 void i40e_free_adminq_arq(struct i40e_hw *hw)
140 {
141         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
142 }
143
144 /**
145  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
146  *  @hw: pointer to the hardware structure
147  **/
148 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
149 {
150         enum i40e_status_code ret_code;
151         struct i40e_aq_desc *desc;
152         struct i40e_dma_mem *bi;
153         int i;
154
155         /* We'll be allocating the buffer info memory first, then we can
156          * allocate the mapped buffers for the event processing
157          */
158
159         /* buffer_info structures do not need alignment */
160         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
161                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
162         if (ret_code)
163                 goto alloc_arq_bufs;
164         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
165
166         /* allocate the mapped buffers */
167         for (i = 0; i < hw->aq.num_arq_entries; i++) {
168                 bi = &hw->aq.arq.r.arq_bi[i];
169                 ret_code = i40e_allocate_dma_mem(hw, bi,
170                                                  i40e_mem_arq_buf,
171                                                  hw->aq.arq_buf_size,
172                                                  I40E_ADMINQ_DESC_ALIGNMENT);
173                 if (ret_code)
174                         goto unwind_alloc_arq_bufs;
175
176                 /* now configure the descriptors for use */
177                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
178
179                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
180                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
181                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
182                 desc->opcode = 0;
183                 /* This is in accordance with Admin queue design, there is no
184                  * register for buffer size configuration
185                  */
186                 desc->datalen = CPU_TO_LE16((u16)bi->size);
187                 desc->retval = 0;
188                 desc->cookie_high = 0;
189                 desc->cookie_low = 0;
190                 desc->params.external.addr_high =
191                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
192                 desc->params.external.addr_low =
193                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
194                 desc->params.external.param0 = 0;
195                 desc->params.external.param1 = 0;
196         }
197
198 alloc_arq_bufs:
199         return ret_code;
200
201 unwind_alloc_arq_bufs:
202         /* don't try to free the one that failed... */
203         i--;
204         for (; i >= 0; i--)
205                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
206         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
207
208         return ret_code;
209 }
210
211 /**
212  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
213  *  @hw: pointer to the hardware structure
214  **/
215 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
216 {
217         enum i40e_status_code ret_code;
218         struct i40e_dma_mem *bi;
219         int i;
220
221         /* No mapped memory needed yet, just the buffer info structures */
222         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
223                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
224         if (ret_code)
225                 goto alloc_asq_bufs;
226         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
227
228         /* allocate the mapped buffers */
229         for (i = 0; i < hw->aq.num_asq_entries; i++) {
230                 bi = &hw->aq.asq.r.asq_bi[i];
231                 ret_code = i40e_allocate_dma_mem(hw, bi,
232                                                  i40e_mem_asq_buf,
233                                                  hw->aq.asq_buf_size,
234                                                  I40E_ADMINQ_DESC_ALIGNMENT);
235                 if (ret_code)
236                         goto unwind_alloc_asq_bufs;
237         }
238 alloc_asq_bufs:
239         return ret_code;
240
241 unwind_alloc_asq_bufs:
242         /* don't try to free the one that failed... */
243         i--;
244         for (; i >= 0; i--)
245                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
246         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
247
248         return ret_code;
249 }
250
251 /**
252  *  i40e_free_arq_bufs - Free receive queue buffer info elements
253  *  @hw: pointer to the hardware structure
254  **/
255 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
256 {
257         int i;
258
259         /* free descriptors */
260         for (i = 0; i < hw->aq.num_arq_entries; i++)
261                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
262
263         /* free the descriptor memory */
264         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
265
266         /* free the dma header */
267         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
268 }
269
270 /**
271  *  i40e_free_asq_bufs - Free send queue buffer info elements
272  *  @hw: pointer to the hardware structure
273  **/
274 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
275 {
276         int i;
277
278         /* only unmap if the address is non-NULL */
279         for (i = 0; i < hw->aq.num_asq_entries; i++)
280                 if (hw->aq.asq.r.asq_bi[i].pa)
281                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
282
283         /* free the buffer info list */
284         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
285
286         /* free the descriptor memory */
287         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
288
289         /* free the dma header */
290         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
291 }
292
293 /**
294  *  i40e_config_asq_regs - configure ASQ registers
295  *  @hw: pointer to the hardware structure
296  *
297  *  Configure base address and length registers for the transmit queue
298  **/
299 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
300 {
301         enum i40e_status_code ret_code = I40E_SUCCESS;
302         u32 reg = 0;
303
304         /* Clear Head and Tail */
305         wr32(hw, hw->aq.asq.head, 0);
306         wr32(hw, hw->aq.asq.tail, 0);
307
308         /* set starting point */
309 #ifdef PF_DRIVER
310 #ifdef INTEGRATED_VF
311         if (!i40e_is_vf(hw))
312                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
313                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
314 #else
315         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
316                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
317 #endif /* INTEGRATED_VF */
318 #endif /* PF_DRIVER */
319 #ifdef VF_DRIVER
320 #ifdef INTEGRATED_VF
321         if (i40e_is_vf(hw))
322                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
323                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
324 #else
325         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
326                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
327 #endif /* INTEGRATED_VF */
328 #endif /* VF_DRIVER */
329         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
330         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
331
332         /* Check one register to verify that config was applied */
333         reg = rd32(hw, hw->aq.asq.bal);
334         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
335                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
336
337         return ret_code;
338 }
339
340 /**
341  *  i40e_config_arq_regs - ARQ register configuration
342  *  @hw: pointer to the hardware structure
343  *
344  * Configure base address and length registers for the receive (event queue)
345  **/
346 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
347 {
348         enum i40e_status_code ret_code = I40E_SUCCESS;
349         u32 reg = 0;
350
351         /* Clear Head and Tail */
352         wr32(hw, hw->aq.arq.head, 0);
353         wr32(hw, hw->aq.arq.tail, 0);
354
355         /* set starting point */
356 #ifdef PF_DRIVER
357 #ifdef INTEGRATED_VF
358         if (!i40e_is_vf(hw))
359                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
360                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
361 #else
362         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
363                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
364 #endif /* INTEGRATED_VF */
365 #endif /* PF_DRIVER */
366 #ifdef VF_DRIVER
367 #ifdef INTEGRATED_VF
368         if (i40e_is_vf(hw))
369                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
370                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
371 #else
372         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
373                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
374 #endif /* INTEGRATED_VF */
375 #endif /* VF_DRIVER */
376         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
377         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
378
379         /* Update tail in the HW to post pre-allocated buffers */
380         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
381
382         /* Check one register to verify that config was applied */
383         reg = rd32(hw, hw->aq.arq.bal);
384         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
385                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
386
387         return ret_code;
388 }
389
390 /**
391  *  i40e_init_asq - main initialization routine for ASQ
392  *  @hw: pointer to the hardware structure
393  *
394  *  This is the main initialization routine for the Admin Send Queue
395  *  Prior to calling this function, drivers *MUST* set the following fields
396  *  in the hw->aq structure:
397  *     - hw->aq.num_asq_entries
398  *     - hw->aq.arq_buf_size
399  *
400  *  Do *NOT* hold the lock when calling this as the memory allocation routines
401  *  called are not going to be atomic context safe
402  **/
403 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
404 {
405         enum i40e_status_code ret_code = I40E_SUCCESS;
406
407         if (hw->aq.asq.count > 0) {
408                 /* queue already initialized */
409                 ret_code = I40E_ERR_NOT_READY;
410                 goto init_adminq_exit;
411         }
412
413         /* verify input for valid configuration */
414         if ((hw->aq.num_asq_entries == 0) ||
415             (hw->aq.asq_buf_size == 0)) {
416                 ret_code = I40E_ERR_CONFIG;
417                 goto init_adminq_exit;
418         }
419
420         hw->aq.asq.next_to_use = 0;
421         hw->aq.asq.next_to_clean = 0;
422
423         /* allocate the ring memory */
424         ret_code = i40e_alloc_adminq_asq_ring(hw);
425         if (ret_code != I40E_SUCCESS)
426                 goto init_adminq_exit;
427
428         /* allocate buffers in the rings */
429         ret_code = i40e_alloc_asq_bufs(hw);
430         if (ret_code != I40E_SUCCESS)
431                 goto init_adminq_free_rings;
432
433         /* initialize base registers */
434         ret_code = i40e_config_asq_regs(hw);
435         if (ret_code != I40E_SUCCESS)
436                 goto init_adminq_free_rings;
437
438         /* success! */
439         hw->aq.asq.count = hw->aq.num_asq_entries;
440         goto init_adminq_exit;
441
442 init_adminq_free_rings:
443         i40e_free_adminq_asq(hw);
444
445 init_adminq_exit:
446         return ret_code;
447 }
448
449 /**
450  *  i40e_init_arq - initialize ARQ
451  *  @hw: pointer to the hardware structure
452  *
453  *  The main initialization routine for the Admin Receive (Event) Queue.
454  *  Prior to calling this function, drivers *MUST* set the following fields
455  *  in the hw->aq structure:
456  *     - hw->aq.num_asq_entries
457  *     - hw->aq.arq_buf_size
458  *
459  *  Do *NOT* hold the lock when calling this as the memory allocation routines
460  *  called are not going to be atomic context safe
461  **/
462 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
463 {
464         enum i40e_status_code ret_code = I40E_SUCCESS;
465
466         if (hw->aq.arq.count > 0) {
467                 /* queue already initialized */
468                 ret_code = I40E_ERR_NOT_READY;
469                 goto init_adminq_exit;
470         }
471
472         /* verify input for valid configuration */
473         if ((hw->aq.num_arq_entries == 0) ||
474             (hw->aq.arq_buf_size == 0)) {
475                 ret_code = I40E_ERR_CONFIG;
476                 goto init_adminq_exit;
477         }
478
479         hw->aq.arq.next_to_use = 0;
480         hw->aq.arq.next_to_clean = 0;
481
482         /* allocate the ring memory */
483         ret_code = i40e_alloc_adminq_arq_ring(hw);
484         if (ret_code != I40E_SUCCESS)
485                 goto init_adminq_exit;
486
487         /* allocate buffers in the rings */
488         ret_code = i40e_alloc_arq_bufs(hw);
489         if (ret_code != I40E_SUCCESS)
490                 goto init_adminq_free_rings;
491
492         /* initialize base registers */
493         ret_code = i40e_config_arq_regs(hw);
494         if (ret_code != I40E_SUCCESS)
495                 goto init_adminq_free_rings;
496
497         /* success! */
498         hw->aq.arq.count = hw->aq.num_arq_entries;
499         goto init_adminq_exit;
500
501 init_adminq_free_rings:
502         i40e_free_adminq_arq(hw);
503
504 init_adminq_exit:
505         return ret_code;
506 }
507
508 /**
509  *  i40e_shutdown_asq - shutdown the ASQ
510  *  @hw: pointer to the hardware structure
511  *
512  *  The main shutdown routine for the Admin Send Queue
513  **/
514 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
515 {
516         enum i40e_status_code ret_code = I40E_SUCCESS;
517
518         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
519
520         if (hw->aq.asq.count == 0) {
521                 ret_code = I40E_ERR_NOT_READY;
522                 goto shutdown_asq_out;
523         }
524
525         /* Stop firmware AdminQ processing */
526         wr32(hw, hw->aq.asq.head, 0);
527         wr32(hw, hw->aq.asq.tail, 0);
528         wr32(hw, hw->aq.asq.len, 0);
529         wr32(hw, hw->aq.asq.bal, 0);
530         wr32(hw, hw->aq.asq.bah, 0);
531
532         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
533
534         /* free ring buffers */
535         i40e_free_asq_bufs(hw);
536
537 shutdown_asq_out:
538         i40e_release_spinlock(&hw->aq.asq_spinlock);
539         return ret_code;
540 }
541
542 /**
543  *  i40e_shutdown_arq - shutdown ARQ
544  *  @hw: pointer to the hardware structure
545  *
546  *  The main shutdown routine for the Admin Receive Queue
547  **/
548 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
549 {
550         enum i40e_status_code ret_code = I40E_SUCCESS;
551
552         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
553
554         if (hw->aq.arq.count == 0) {
555                 ret_code = I40E_ERR_NOT_READY;
556                 goto shutdown_arq_out;
557         }
558
559         /* Stop firmware AdminQ processing */
560         wr32(hw, hw->aq.arq.head, 0);
561         wr32(hw, hw->aq.arq.tail, 0);
562         wr32(hw, hw->aq.arq.len, 0);
563         wr32(hw, hw->aq.arq.bal, 0);
564         wr32(hw, hw->aq.arq.bah, 0);
565
566         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
567
568         /* free ring buffers */
569         i40e_free_arq_bufs(hw);
570
571 shutdown_arq_out:
572         i40e_release_spinlock(&hw->aq.arq_spinlock);
573         return ret_code;
574 }
575 #ifdef PF_DRIVER
576
577 /**
578  *  i40e_resume_aq - resume AQ processing from 0
579  *  @hw: pointer to the hardware structure
580  **/
581 STATIC void i40e_resume_aq(struct i40e_hw *hw)
582 {
583         /* Registers are reset after PF reset */
584         hw->aq.asq.next_to_use = 0;
585         hw->aq.asq.next_to_clean = 0;
586
587         i40e_config_asq_regs(hw);
588
589         hw->aq.arq.next_to_use = 0;
590         hw->aq.arq.next_to_clean = 0;
591
592         i40e_config_arq_regs(hw);
593 }
594 #endif /* PF_DRIVER */
595
596 /**
597  *  i40e_init_adminq - main initialization routine for Admin Queue
598  *  @hw: pointer to the hardware structure
599  *
600  *  Prior to calling this function, drivers *MUST* set the following fields
601  *  in the hw->aq structure:
602  *     - hw->aq.num_asq_entries
603  *     - hw->aq.num_arq_entries
604  *     - hw->aq.arq_buf_size
605  *     - hw->aq.asq_buf_size
606  **/
607 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
608 {
609 #ifdef PF_DRIVER
610         u16 cfg_ptr, oem_hi, oem_lo;
611         u16 eetrack_lo, eetrack_hi;
612 #endif
613         enum i40e_status_code ret_code;
614 #ifdef PF_DRIVER
615         int retry = 0;
616 #endif
617
618         /* verify input for valid configuration */
619         if ((hw->aq.num_arq_entries == 0) ||
620             (hw->aq.num_asq_entries == 0) ||
621             (hw->aq.arq_buf_size == 0) ||
622             (hw->aq.asq_buf_size == 0)) {
623                 ret_code = I40E_ERR_CONFIG;
624                 goto init_adminq_exit;
625         }
626         i40e_init_spinlock(&hw->aq.asq_spinlock);
627         i40e_init_spinlock(&hw->aq.arq_spinlock);
628
629         /* Set up register offsets */
630         i40e_adminq_init_regs(hw);
631
632         /* setup ASQ command write back timeout */
633         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
634
635         /* allocate the ASQ */
636         ret_code = i40e_init_asq(hw);
637         if (ret_code != I40E_SUCCESS)
638                 goto init_adminq_destroy_spinlocks;
639
640         /* allocate the ARQ */
641         ret_code = i40e_init_arq(hw);
642         if (ret_code != I40E_SUCCESS)
643                 goto init_adminq_free_asq;
644
645 #ifdef PF_DRIVER
646 #ifdef INTEGRATED_VF
647         /* VF has no need of firmware */
648         if (i40e_is_vf(hw))
649                 goto init_adminq_exit;
650 #endif
651         /* There are some cases where the firmware may not be quite ready
652          * for AdminQ operations, so we retry the AdminQ setup a few times
653          * if we see timeouts in this first AQ call.
654          */
655         do {
656                 ret_code = i40e_aq_get_firmware_version(hw,
657                                                         &hw->aq.fw_maj_ver,
658                                                         &hw->aq.fw_min_ver,
659                                                         &hw->aq.fw_build,
660                                                         &hw->aq.api_maj_ver,
661                                                         &hw->aq.api_min_ver,
662                                                         NULL);
663                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
664                         break;
665                 retry++;
666                 i40e_msec_delay(100);
667                 i40e_resume_aq(hw);
668         } while (retry < 10);
669         if (ret_code != I40E_SUCCESS)
670                 goto init_adminq_free_arq;
671
672         /* get the NVM version info */
673         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
674                            &hw->nvm.version);
675         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
676         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
677         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
678         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
679         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
680                            &oem_hi);
681         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
682                            &oem_lo);
683         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
684
685         /* Newer versions of firmware require lock when reading the NVM */
686         if ((hw->aq.api_maj_ver > 1) ||
687             ((hw->aq.api_maj_ver == 1) &&
688              (hw->aq.api_min_ver >= 5)))
689                 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
690
691         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
692                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
693                 goto init_adminq_free_arq;
694         }
695
696         /* pre-emptive resource lock release */
697         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
698         hw->nvm_release_on_done = false;
699         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
700
701 #endif /* PF_DRIVER */
702         ret_code = I40E_SUCCESS;
703
704         /* success! */
705         goto init_adminq_exit;
706
707 #ifdef PF_DRIVER
708 init_adminq_free_arq:
709         i40e_shutdown_arq(hw);
710 #endif
711 init_adminq_free_asq:
712         i40e_shutdown_asq(hw);
713 init_adminq_destroy_spinlocks:
714         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
715         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
716
717 init_adminq_exit:
718         return ret_code;
719 }
720
721 /**
722  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
723  *  @hw: pointer to the hardware structure
724  **/
725 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
726 {
727         enum i40e_status_code ret_code = I40E_SUCCESS;
728
729         if (i40e_check_asq_alive(hw))
730                 i40e_aq_queue_shutdown(hw, true);
731
732         i40e_shutdown_asq(hw);
733         i40e_shutdown_arq(hw);
734         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
735         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
736
737         if (hw->nvm_buff.va)
738                 i40e_free_virt_mem(hw, &hw->nvm_buff);
739
740         return ret_code;
741 }
742
743 /**
744  *  i40e_clean_asq - cleans Admin send queue
745  *  @hw: pointer to the hardware structure
746  *
747  *  returns the number of free desc
748  **/
749 u16 i40e_clean_asq(struct i40e_hw *hw)
750 {
751         struct i40e_adminq_ring *asq = &(hw->aq.asq);
752         struct i40e_asq_cmd_details *details;
753         u16 ntc = asq->next_to_clean;
754         struct i40e_aq_desc desc_cb;
755         struct i40e_aq_desc *desc;
756
757         desc = I40E_ADMINQ_DESC(*asq, ntc);
758         details = I40E_ADMINQ_DETAILS(*asq, ntc);
759         while (rd32(hw, hw->aq.asq.head) != ntc) {
760                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
761                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
762
763                 if (details->callback) {
764                         I40E_ADMINQ_CALLBACK cb_func =
765                                         (I40E_ADMINQ_CALLBACK)details->callback;
766                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
767                                     I40E_DMA_TO_DMA);
768                         cb_func(hw, &desc_cb);
769                 }
770                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
771                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
772                 ntc++;
773                 if (ntc == asq->count)
774                         ntc = 0;
775                 desc = I40E_ADMINQ_DESC(*asq, ntc);
776                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
777         }
778
779         asq->next_to_clean = ntc;
780
781         return I40E_DESC_UNUSED(asq);
782 }
783
784 /**
785  *  i40e_asq_done - check if FW has processed the Admin Send Queue
786  *  @hw: pointer to the hw struct
787  *
788  *  Returns true if the firmware has processed all descriptors on the
789  *  admin send queue. Returns false if there are still requests pending.
790  **/
791 #ifdef VF_DRIVER
792 bool i40e_asq_done(struct i40e_hw *hw)
793 #else
794 STATIC bool i40e_asq_done(struct i40e_hw *hw)
795 #endif
796 {
797         /* AQ designers suggest use of head for better
798          * timing reliability than DD bit
799          */
800         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
801
802 }
803
804 /**
805  *  i40e_asq_send_command - send command to Admin Queue
806  *  @hw: pointer to the hw struct
807  *  @desc: prefilled descriptor describing the command (non DMA mem)
808  *  @buff: buffer to use for indirect commands
809  *  @buff_size: size of buffer for indirect commands
810  *  @cmd_details: pointer to command details structure
811  *
812  *  This is the main send command driver routine for the Admin Queue send
813  *  queue.  It runs the queue, cleans the queue, etc
814  **/
815 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
816                                 struct i40e_aq_desc *desc,
817                                 void *buff, /* can be NULL */
818                                 u16  buff_size,
819                                 struct i40e_asq_cmd_details *cmd_details)
820 {
821         enum i40e_status_code status = I40E_SUCCESS;
822         struct i40e_dma_mem *dma_buff = NULL;
823         struct i40e_asq_cmd_details *details;
824         struct i40e_aq_desc *desc_on_ring;
825         bool cmd_completed = false;
826         u16  retval = 0;
827         u32  val = 0;
828
829         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
830
831         hw->aq.asq_last_status = I40E_AQ_RC_OK;
832
833         if (hw->aq.asq.count == 0) {
834                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
835                            "AQTX: Admin queue not initialized.\n");
836                 status = I40E_ERR_QUEUE_EMPTY;
837                 goto asq_send_command_error;
838         }
839
840         val = rd32(hw, hw->aq.asq.head);
841         if (val >= hw->aq.num_asq_entries) {
842                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
843                            "AQTX: head overrun at %d\n", val);
844                 status = I40E_ERR_QUEUE_EMPTY;
845                 goto asq_send_command_error;
846         }
847
848         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
849         if (cmd_details) {
850                 i40e_memcpy(details,
851                             cmd_details,
852                             sizeof(struct i40e_asq_cmd_details),
853                             I40E_NONDMA_TO_NONDMA);
854
855                 /* If the cmd_details are defined copy the cookie.  The
856                  * CPU_TO_LE32 is not needed here because the data is ignored
857                  * by the FW, only used by the driver
858                  */
859                 if (details->cookie) {
860                         desc->cookie_high =
861                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
862                         desc->cookie_low =
863                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
864                 }
865         } else {
866                 i40e_memset(details, 0,
867                             sizeof(struct i40e_asq_cmd_details),
868                             I40E_NONDMA_MEM);
869         }
870
871         /* clear requested flags and then set additional flags if defined */
872         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
873         desc->flags |= CPU_TO_LE16(details->flags_ena);
874
875         if (buff_size > hw->aq.asq_buf_size) {
876                 i40e_debug(hw,
877                            I40E_DEBUG_AQ_MESSAGE,
878                            "AQTX: Invalid buffer size: %d.\n",
879                            buff_size);
880                 status = I40E_ERR_INVALID_SIZE;
881                 goto asq_send_command_error;
882         }
883
884         if (details->postpone && !details->async) {
885                 i40e_debug(hw,
886                            I40E_DEBUG_AQ_MESSAGE,
887                            "AQTX: Async flag not set along with postpone flag");
888                 status = I40E_ERR_PARAM;
889                 goto asq_send_command_error;
890         }
891
892         /* call clean and check queue available function to reclaim the
893          * descriptors that were processed by FW, the function returns the
894          * number of desc available
895          */
896         /* the clean function called here could be called in a separate thread
897          * in case of asynchronous completions
898          */
899         if (i40e_clean_asq(hw) == 0) {
900                 i40e_debug(hw,
901                            I40E_DEBUG_AQ_MESSAGE,
902                            "AQTX: Error queue is full.\n");
903                 status = I40E_ERR_ADMIN_QUEUE_FULL;
904                 goto asq_send_command_error;
905         }
906
907         /* initialize the temp desc pointer with the right desc */
908         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
909
910         /* if the desc is available copy the temp desc to the right place */
911         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
912                     I40E_NONDMA_TO_DMA);
913
914         /* if buff is not NULL assume indirect command */
915         if (buff != NULL) {
916                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
917                 /* copy the user buff into the respective DMA buff */
918                 i40e_memcpy(dma_buff->va, buff, buff_size,
919                             I40E_NONDMA_TO_DMA);
920                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
921
922                 /* Update the address values in the desc with the pa value
923                  * for respective buffer
924                  */
925                 desc_on_ring->params.external.addr_high =
926                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
927                 desc_on_ring->params.external.addr_low =
928                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
929         }
930
931         /* bump the tail */
932         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
933         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
934                       buff, buff_size);
935         (hw->aq.asq.next_to_use)++;
936         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
937                 hw->aq.asq.next_to_use = 0;
938         if (!details->postpone)
939                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
940
941         /* if cmd_details are not defined or async flag is not set,
942          * we need to wait for desc write back
943          */
944         if (!details->async && !details->postpone) {
945                 u32 total_delay = 0;
946
947                 do {
948                         /* AQ designers suggest use of head for better
949                          * timing reliability than DD bit
950                          */
951                         if (i40e_asq_done(hw))
952                                 break;
953                         i40e_msec_delay(1);
954                         total_delay++;
955                 } while (total_delay < hw->aq.asq_cmd_timeout);
956         }
957
958         /* if ready, copy the desc back to temp */
959         if (i40e_asq_done(hw)) {
960                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
961                             I40E_DMA_TO_NONDMA);
962                 if (buff != NULL)
963                         i40e_memcpy(buff, dma_buff->va, buff_size,
964                                     I40E_DMA_TO_NONDMA);
965                 retval = LE16_TO_CPU(desc->retval);
966                 if (retval != 0) {
967                         i40e_debug(hw,
968                                    I40E_DEBUG_AQ_MESSAGE,
969                                    "AQTX: Command completed with error 0x%X.\n",
970                                    retval);
971
972                         /* strip off FW internal code */
973                         retval &= 0xff;
974                 }
975                 cmd_completed = true;
976                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
977                         status = I40E_SUCCESS;
978                 else
979                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
980                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
981         }
982
983         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
984                    "AQTX: desc and buffer writeback:\n");
985         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
986
987         /* save writeback aq if requested */
988         if (details->wb_desc)
989                 i40e_memcpy(details->wb_desc, desc_on_ring,
990                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
991
992         /* update the error if time out occurred */
993         if ((!cmd_completed) &&
994             (!details->async && !details->postpone)) {
995                 i40e_debug(hw,
996                            I40E_DEBUG_AQ_MESSAGE,
997                            "AQTX: Writeback timeout.\n");
998                 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
999         }
1000
1001 asq_send_command_error:
1002         i40e_release_spinlock(&hw->aq.asq_spinlock);
1003         return status;
1004 }
1005
1006 /**
1007  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1008  *  @desc:     pointer to the temp descriptor (non DMA mem)
1009  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1010  *
1011  *  Fill the desc with default values
1012  **/
1013 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1014                                        u16 opcode)
1015 {
1016         /* zero out the desc */
1017         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1018                     I40E_NONDMA_MEM);
1019         desc->opcode = CPU_TO_LE16(opcode);
1020         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1021 }
1022
1023 /**
1024  *  i40e_clean_arq_element
1025  *  @hw: pointer to the hw struct
1026  *  @e: event info from the receive descriptor, includes any buffers
1027  *  @pending: number of events that could be left to process
1028  *
1029  *  This function cleans one Admin Receive Queue element and returns
1030  *  the contents through e.  It can also return how many events are
1031  *  left to process through 'pending'
1032  **/
1033 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1034                                              struct i40e_arq_event_info *e,
1035                                              u16 *pending)
1036 {
1037         enum i40e_status_code ret_code = I40E_SUCCESS;
1038         u16 ntc = hw->aq.arq.next_to_clean;
1039         struct i40e_aq_desc *desc;
1040         struct i40e_dma_mem *bi;
1041         u16 desc_idx;
1042         u16 datalen;
1043         u16 flags;
1044         u16 ntu;
1045
1046         /* pre-clean the event info */
1047         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1048
1049         /* take the lock before we start messing with the ring */
1050         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1051
1052         if (hw->aq.arq.count == 0) {
1053                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1054                            "AQRX: Admin queue not initialized.\n");
1055                 ret_code = I40E_ERR_QUEUE_EMPTY;
1056                 goto clean_arq_element_err;
1057         }
1058
1059         /* set next_to_use to head */
1060 #ifdef INTEGRATED_VF
1061         if (!i40e_is_vf(hw))
1062                 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1063         else
1064                 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1065 #else
1066 #ifdef PF_DRIVER
1067         ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1068 #endif /* PF_DRIVER */
1069 #ifdef VF_DRIVER
1070         ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1071 #endif /* VF_DRIVER */
1072 #endif /* INTEGRATED_VF */
1073         if (ntu == ntc) {
1074                 /* nothing to do - shouldn't need to update ring's values */
1075                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1076                 goto clean_arq_element_out;
1077         }
1078
1079         /* now clean the next descriptor */
1080         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1081         desc_idx = ntc;
1082
1083         flags = LE16_TO_CPU(desc->flags);
1084         if (flags & I40E_AQ_FLAG_ERR) {
1085                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1086                 hw->aq.arq_last_status =
1087                         (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1088                 i40e_debug(hw,
1089                            I40E_DEBUG_AQ_MESSAGE,
1090                            "AQRX: Event received with error 0x%X.\n",
1091                            hw->aq.arq_last_status);
1092         }
1093
1094         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1095                     I40E_DMA_TO_NONDMA);
1096         datalen = LE16_TO_CPU(desc->datalen);
1097         e->msg_len = min(datalen, e->buf_len);
1098         if (e->msg_buf != NULL && (e->msg_len != 0))
1099                 i40e_memcpy(e->msg_buf,
1100                             hw->aq.arq.r.arq_bi[desc_idx].va,
1101                             e->msg_len, I40E_DMA_TO_NONDMA);
1102
1103         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1104         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1105                       hw->aq.arq_buf_size);
1106
1107         /* Restore the original datalen and buffer address in the desc,
1108          * FW updates datalen to indicate the event message
1109          * size
1110          */
1111         bi = &hw->aq.arq.r.arq_bi[ntc];
1112         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1113
1114         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1115         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1116                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1117         desc->datalen = CPU_TO_LE16((u16)bi->size);
1118         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1119         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1120
1121         /* set tail = the last cleaned desc index. */
1122         wr32(hw, hw->aq.arq.tail, ntc);
1123         /* ntc is updated to tail + 1 */
1124         ntc++;
1125         if (ntc == hw->aq.num_arq_entries)
1126                 ntc = 0;
1127         hw->aq.arq.next_to_clean = ntc;
1128         hw->aq.arq.next_to_use = ntu;
1129
1130 #ifdef PF_DRIVER
1131         i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
1132 #endif /* PF_DRIVER */
1133 clean_arq_element_out:
1134         /* Set pending if needed, unlock and return */
1135         if (pending != NULL)
1136                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1137 clean_arq_element_err:
1138         i40e_release_spinlock(&hw->aq.arq_spinlock);
1139
1140         return ret_code;
1141 }
1142