New upstream version 16.11.9
[deb_dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
39
40 /**
41  *  i40e_adminq_init_regs - Initialize AdminQ registers
42  *  @hw: pointer to the hardware structure
43  *
44  *  This assumes the alloc_asq and alloc_arq functions have already been called
45  **/
46 STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
47 {
48         /* set head and tail registers in our local struct */
49         if (i40e_is_vf(hw)) {
50                 hw->aq.asq.tail = I40E_VF_ATQT1;
51                 hw->aq.asq.head = I40E_VF_ATQH1;
52                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
53                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
54                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
55                 hw->aq.arq.tail = I40E_VF_ARQT1;
56                 hw->aq.arq.head = I40E_VF_ARQH1;
57                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
58                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
59                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
60 #ifdef PF_DRIVER
61         } else {
62                 hw->aq.asq.tail = I40E_PF_ATQT;
63                 hw->aq.asq.head = I40E_PF_ATQH;
64                 hw->aq.asq.len  = I40E_PF_ATQLEN;
65                 hw->aq.asq.bal  = I40E_PF_ATQBAL;
66                 hw->aq.asq.bah  = I40E_PF_ATQBAH;
67                 hw->aq.arq.tail = I40E_PF_ARQT;
68                 hw->aq.arq.head = I40E_PF_ARQH;
69                 hw->aq.arq.len  = I40E_PF_ARQLEN;
70                 hw->aq.arq.bal  = I40E_PF_ARQBAL;
71                 hw->aq.arq.bah  = I40E_PF_ARQBAH;
72 #endif
73         }
74 }
75
76 /**
77  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
78  *  @hw: pointer to the hardware structure
79  **/
80 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
81 {
82         enum i40e_status_code ret_code;
83
84         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
85                                          i40e_mem_atq_ring,
86                                          (hw->aq.num_asq_entries *
87                                          sizeof(struct i40e_aq_desc)),
88                                          I40E_ADMINQ_DESC_ALIGNMENT);
89         if (ret_code)
90                 return ret_code;
91
92         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
93                                           (hw->aq.num_asq_entries *
94                                           sizeof(struct i40e_asq_cmd_details)));
95         if (ret_code) {
96                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
97                 return ret_code;
98         }
99
100         return ret_code;
101 }
102
103 /**
104  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
105  *  @hw: pointer to the hardware structure
106  **/
107 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
108 {
109         enum i40e_status_code ret_code;
110
111         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
112                                          i40e_mem_arq_ring,
113                                          (hw->aq.num_arq_entries *
114                                          sizeof(struct i40e_aq_desc)),
115                                          I40E_ADMINQ_DESC_ALIGNMENT);
116
117         return ret_code;
118 }
119
120 /**
121  *  i40e_free_adminq_asq - Free Admin Queue send rings
122  *  @hw: pointer to the hardware structure
123  *
124  *  This assumes the posted send buffers have already been cleaned
125  *  and de-allocated
126  **/
127 void i40e_free_adminq_asq(struct i40e_hw *hw)
128 {
129         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
130         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
131 }
132
133 /**
134  *  i40e_free_adminq_arq - Free Admin Queue receive rings
135  *  @hw: pointer to the hardware structure
136  *
137  *  This assumes the posted receive buffers have already been cleaned
138  *  and de-allocated
139  **/
140 void i40e_free_adminq_arq(struct i40e_hw *hw)
141 {
142         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
143 }
144
145 /**
146  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
147  *  @hw: pointer to the hardware structure
148  **/
149 STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
150 {
151         enum i40e_status_code ret_code;
152         struct i40e_aq_desc *desc;
153         struct i40e_dma_mem *bi;
154         int i;
155
156         /* We'll be allocating the buffer info memory first, then we can
157          * allocate the mapped buffers for the event processing
158          */
159
160         /* buffer_info structures do not need alignment */
161         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
162                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
163         if (ret_code)
164                 goto alloc_arq_bufs;
165         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
166
167         /* allocate the mapped buffers */
168         for (i = 0; i < hw->aq.num_arq_entries; i++) {
169                 bi = &hw->aq.arq.r.arq_bi[i];
170                 ret_code = i40e_allocate_dma_mem(hw, bi,
171                                                  i40e_mem_arq_buf,
172                                                  hw->aq.arq_buf_size,
173                                                  I40E_ADMINQ_DESC_ALIGNMENT);
174                 if (ret_code)
175                         goto unwind_alloc_arq_bufs;
176
177                 /* now configure the descriptors for use */
178                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
179
180                 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
181                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
182                         desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
183                 desc->opcode = 0;
184                 /* This is in accordance with Admin queue design, there is no
185                  * register for buffer size configuration
186                  */
187                 desc->datalen = CPU_TO_LE16((u16)bi->size);
188                 desc->retval = 0;
189                 desc->cookie_high = 0;
190                 desc->cookie_low = 0;
191                 desc->params.external.addr_high =
192                         CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
193                 desc->params.external.addr_low =
194                         CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
195                 desc->params.external.param0 = 0;
196                 desc->params.external.param1 = 0;
197         }
198
199 alloc_arq_bufs:
200         return ret_code;
201
202 unwind_alloc_arq_bufs:
203         /* don't try to free the one that failed... */
204         i--;
205         for (; i >= 0; i--)
206                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
207         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
208
209         return ret_code;
210 }
211
212 /**
213  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
214  *  @hw: pointer to the hardware structure
215  **/
216 STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
217 {
218         enum i40e_status_code ret_code;
219         struct i40e_dma_mem *bi;
220         int i;
221
222         /* No mapped memory needed yet, just the buffer info structures */
223         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
224                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
225         if (ret_code)
226                 goto alloc_asq_bufs;
227         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
228
229         /* allocate the mapped buffers */
230         for (i = 0; i < hw->aq.num_asq_entries; i++) {
231                 bi = &hw->aq.asq.r.asq_bi[i];
232                 ret_code = i40e_allocate_dma_mem(hw, bi,
233                                                  i40e_mem_asq_buf,
234                                                  hw->aq.asq_buf_size,
235                                                  I40E_ADMINQ_DESC_ALIGNMENT);
236                 if (ret_code)
237                         goto unwind_alloc_asq_bufs;
238         }
239 alloc_asq_bufs:
240         return ret_code;
241
242 unwind_alloc_asq_bufs:
243         /* don't try to free the one that failed... */
244         i--;
245         for (; i >= 0; i--)
246                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
247         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
248
249         return ret_code;
250 }
251
252 /**
253  *  i40e_free_arq_bufs - Free receive queue buffer info elements
254  *  @hw: pointer to the hardware structure
255  **/
256 STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
257 {
258         int i;
259
260         /* free descriptors */
261         for (i = 0; i < hw->aq.num_arq_entries; i++)
262                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
263
264         /* free the descriptor memory */
265         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
266
267         /* free the dma header */
268         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
269 }
270
271 /**
272  *  i40e_free_asq_bufs - Free send queue buffer info elements
273  *  @hw: pointer to the hardware structure
274  **/
275 STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
276 {
277         int i;
278
279         /* only unmap if the address is non-NULL */
280         for (i = 0; i < hw->aq.num_asq_entries; i++)
281                 if (hw->aq.asq.r.asq_bi[i].pa)
282                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
283
284         /* free the buffer info list */
285         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
286
287         /* free the descriptor memory */
288         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
289
290         /* free the dma header */
291         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
292 }
293
294 /**
295  *  i40e_config_asq_regs - configure ASQ registers
296  *  @hw: pointer to the hardware structure
297  *
298  *  Configure base address and length registers for the transmit queue
299  **/
300 STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
301 {
302         enum i40e_status_code ret_code = I40E_SUCCESS;
303         u32 reg = 0;
304
305         /* Clear Head and Tail */
306         wr32(hw, hw->aq.asq.head, 0);
307         wr32(hw, hw->aq.asq.tail, 0);
308
309         /* set starting point */
310 #ifdef PF_DRIVER
311 #ifdef INTEGRATED_VF
312         if (!i40e_is_vf(hw))
313                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
314                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
315 #else
316         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
317                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
318 #endif /* INTEGRATED_VF */
319 #endif /* PF_DRIVER */
320 #ifdef VF_DRIVER
321 #ifdef INTEGRATED_VF
322         if (i40e_is_vf(hw))
323                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
324                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
325 #else
326         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
327                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
328 #endif /* INTEGRATED_VF */
329 #endif /* VF_DRIVER */
330         wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
331         wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
332
333         /* Check one register to verify that config was applied */
334         reg = rd32(hw, hw->aq.asq.bal);
335         if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
336                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
337
338         return ret_code;
339 }
340
341 /**
342  *  i40e_config_arq_regs - ARQ register configuration
343  *  @hw: pointer to the hardware structure
344  *
345  * Configure base address and length registers for the receive (event queue)
346  **/
347 STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
348 {
349         enum i40e_status_code ret_code = I40E_SUCCESS;
350         u32 reg = 0;
351
352         /* Clear Head and Tail */
353         wr32(hw, hw->aq.arq.head, 0);
354         wr32(hw, hw->aq.arq.tail, 0);
355
356         /* set starting point */
357 #ifdef PF_DRIVER
358 #ifdef INTEGRATED_VF
359         if (!i40e_is_vf(hw))
360                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
361                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
362 #else
363         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
364                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
365 #endif /* INTEGRATED_VF */
366 #endif /* PF_DRIVER */
367 #ifdef VF_DRIVER
368 #ifdef INTEGRATED_VF
369         if (i40e_is_vf(hw))
370                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
371                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
372 #else
373         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
374                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
375 #endif /* INTEGRATED_VF */
376 #endif /* VF_DRIVER */
377         wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
378         wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
379
380         /* Update tail in the HW to post pre-allocated buffers */
381         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
382
383         /* Check one register to verify that config was applied */
384         reg = rd32(hw, hw->aq.arq.bal);
385         if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
386                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
387
388         return ret_code;
389 }
390
391 /**
392  *  i40e_init_asq - main initialization routine for ASQ
393  *  @hw: pointer to the hardware structure
394  *
395  *  This is the main initialization routine for the Admin Send Queue
396  *  Prior to calling this function, drivers *MUST* set the following fields
397  *  in the hw->aq structure:
398  *     - hw->aq.num_asq_entries
399  *     - hw->aq.arq_buf_size
400  *
401  *  Do *NOT* hold the lock when calling this as the memory allocation routines
402  *  called are not going to be atomic context safe
403  **/
404 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
405 {
406         enum i40e_status_code ret_code = I40E_SUCCESS;
407
408         if (hw->aq.asq.count > 0) {
409                 /* queue already initialized */
410                 ret_code = I40E_ERR_NOT_READY;
411                 goto init_adminq_exit;
412         }
413
414         /* verify input for valid configuration */
415         if ((hw->aq.num_asq_entries == 0) ||
416             (hw->aq.asq_buf_size == 0)) {
417                 ret_code = I40E_ERR_CONFIG;
418                 goto init_adminq_exit;
419         }
420
421         hw->aq.asq.next_to_use = 0;
422         hw->aq.asq.next_to_clean = 0;
423
424         /* allocate the ring memory */
425         ret_code = i40e_alloc_adminq_asq_ring(hw);
426         if (ret_code != I40E_SUCCESS)
427                 goto init_adminq_exit;
428
429         /* allocate buffers in the rings */
430         ret_code = i40e_alloc_asq_bufs(hw);
431         if (ret_code != I40E_SUCCESS)
432                 goto init_adminq_free_rings;
433
434         /* initialize base registers */
435         ret_code = i40e_config_asq_regs(hw);
436         if (ret_code != I40E_SUCCESS)
437                 goto init_config_regs;
438
439         /* success! */
440         hw->aq.asq.count = hw->aq.num_asq_entries;
441         goto init_adminq_exit;
442
443 init_adminq_free_rings:
444         i40e_free_adminq_asq(hw);
445         return ret_code;
446
447 init_config_regs:
448         i40e_free_asq_bufs(hw);
449
450 init_adminq_exit:
451         return ret_code;
452 }
453
454 /**
455  *  i40e_init_arq - initialize ARQ
456  *  @hw: pointer to the hardware structure
457  *
458  *  The main initialization routine for the Admin Receive (Event) Queue.
459  *  Prior to calling this function, drivers *MUST* set the following fields
460  *  in the hw->aq structure:
461  *     - hw->aq.num_asq_entries
462  *     - hw->aq.arq_buf_size
463  *
464  *  Do *NOT* hold the lock when calling this as the memory allocation routines
465  *  called are not going to be atomic context safe
466  **/
467 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
468 {
469         enum i40e_status_code ret_code = I40E_SUCCESS;
470
471         if (hw->aq.arq.count > 0) {
472                 /* queue already initialized */
473                 ret_code = I40E_ERR_NOT_READY;
474                 goto init_adminq_exit;
475         }
476
477         /* verify input for valid configuration */
478         if ((hw->aq.num_arq_entries == 0) ||
479             (hw->aq.arq_buf_size == 0)) {
480                 ret_code = I40E_ERR_CONFIG;
481                 goto init_adminq_exit;
482         }
483
484         hw->aq.arq.next_to_use = 0;
485         hw->aq.arq.next_to_clean = 0;
486
487         /* allocate the ring memory */
488         ret_code = i40e_alloc_adminq_arq_ring(hw);
489         if (ret_code != I40E_SUCCESS)
490                 goto init_adminq_exit;
491
492         /* allocate buffers in the rings */
493         ret_code = i40e_alloc_arq_bufs(hw);
494         if (ret_code != I40E_SUCCESS)
495                 goto init_adminq_free_rings;
496
497         /* initialize base registers */
498         ret_code = i40e_config_arq_regs(hw);
499         if (ret_code != I40E_SUCCESS)
500                 goto init_adminq_free_rings;
501
502         /* success! */
503         hw->aq.arq.count = hw->aq.num_arq_entries;
504         goto init_adminq_exit;
505
506 init_adminq_free_rings:
507         i40e_free_adminq_arq(hw);
508
509 init_adminq_exit:
510         return ret_code;
511 }
512
513 /**
514  *  i40e_shutdown_asq - shutdown the ASQ
515  *  @hw: pointer to the hardware structure
516  *
517  *  The main shutdown routine for the Admin Send Queue
518  **/
519 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
520 {
521         enum i40e_status_code ret_code = I40E_SUCCESS;
522
523         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
524
525         if (hw->aq.asq.count == 0) {
526                 ret_code = I40E_ERR_NOT_READY;
527                 goto shutdown_asq_out;
528         }
529
530         /* Stop firmware AdminQ processing */
531         wr32(hw, hw->aq.asq.head, 0);
532         wr32(hw, hw->aq.asq.tail, 0);
533         wr32(hw, hw->aq.asq.len, 0);
534         wr32(hw, hw->aq.asq.bal, 0);
535         wr32(hw, hw->aq.asq.bah, 0);
536
537         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
538
539         /* free ring buffers */
540         i40e_free_asq_bufs(hw);
541
542 shutdown_asq_out:
543         i40e_release_spinlock(&hw->aq.asq_spinlock);
544         return ret_code;
545 }
546
547 /**
548  *  i40e_shutdown_arq - shutdown ARQ
549  *  @hw: pointer to the hardware structure
550  *
551  *  The main shutdown routine for the Admin Receive Queue
552  **/
553 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
554 {
555         enum i40e_status_code ret_code = I40E_SUCCESS;
556
557         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
558
559         if (hw->aq.arq.count == 0) {
560                 ret_code = I40E_ERR_NOT_READY;
561                 goto shutdown_arq_out;
562         }
563
564         /* Stop firmware AdminQ processing */
565         wr32(hw, hw->aq.arq.head, 0);
566         wr32(hw, hw->aq.arq.tail, 0);
567         wr32(hw, hw->aq.arq.len, 0);
568         wr32(hw, hw->aq.arq.bal, 0);
569         wr32(hw, hw->aq.arq.bah, 0);
570
571         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
572
573         /* free ring buffers */
574         i40e_free_arq_bufs(hw);
575
576 shutdown_arq_out:
577         i40e_release_spinlock(&hw->aq.arq_spinlock);
578         return ret_code;
579 }
580 #ifdef PF_DRIVER
581
582 /**
583  *  i40e_resume_aq - resume AQ processing from 0
584  *  @hw: pointer to the hardware structure
585  **/
586 STATIC void i40e_resume_aq(struct i40e_hw *hw)
587 {
588         /* Registers are reset after PF reset */
589         hw->aq.asq.next_to_use = 0;
590         hw->aq.asq.next_to_clean = 0;
591
592         i40e_config_asq_regs(hw);
593
594         hw->aq.arq.next_to_use = 0;
595         hw->aq.arq.next_to_clean = 0;
596
597         i40e_config_arq_regs(hw);
598 }
599 #endif /* PF_DRIVER */
600
601 /**
602  *  i40e_init_adminq - main initialization routine for Admin Queue
603  *  @hw: pointer to the hardware structure
604  *
605  *  Prior to calling this function, drivers *MUST* set the following fields
606  *  in the hw->aq structure:
607  *     - hw->aq.num_asq_entries
608  *     - hw->aq.num_arq_entries
609  *     - hw->aq.arq_buf_size
610  *     - hw->aq.asq_buf_size
611  **/
612 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
613 {
614 #ifdef PF_DRIVER
615         u16 cfg_ptr, oem_hi, oem_lo;
616         u16 eetrack_lo, eetrack_hi;
617 #endif
618         enum i40e_status_code ret_code;
619 #ifdef PF_DRIVER
620         int retry = 0;
621 #endif
622
623         /* verify input for valid configuration */
624         if ((hw->aq.num_arq_entries == 0) ||
625             (hw->aq.num_asq_entries == 0) ||
626             (hw->aq.arq_buf_size == 0) ||
627             (hw->aq.asq_buf_size == 0)) {
628                 ret_code = I40E_ERR_CONFIG;
629                 goto init_adminq_exit;
630         }
631         i40e_init_spinlock(&hw->aq.asq_spinlock);
632         i40e_init_spinlock(&hw->aq.arq_spinlock);
633
634         /* Set up register offsets */
635         i40e_adminq_init_regs(hw);
636
637         /* setup ASQ command write back timeout */
638         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
639
640         /* allocate the ASQ */
641         ret_code = i40e_init_asq(hw);
642         if (ret_code != I40E_SUCCESS)
643                 goto init_adminq_destroy_spinlocks;
644
645         /* allocate the ARQ */
646         ret_code = i40e_init_arq(hw);
647         if (ret_code != I40E_SUCCESS)
648                 goto init_adminq_free_asq;
649
650 #ifdef PF_DRIVER
651 #ifdef INTEGRATED_VF
652         /* VF has no need of firmware */
653         if (i40e_is_vf(hw))
654                 goto init_adminq_exit;
655 #endif
656         /* There are some cases where the firmware may not be quite ready
657          * for AdminQ operations, so we retry the AdminQ setup a few times
658          * if we see timeouts in this first AQ call.
659          */
660         do {
661                 ret_code = i40e_aq_get_firmware_version(hw,
662                                                         &hw->aq.fw_maj_ver,
663                                                         &hw->aq.fw_min_ver,
664                                                         &hw->aq.fw_build,
665                                                         &hw->aq.api_maj_ver,
666                                                         &hw->aq.api_min_ver,
667                                                         NULL);
668                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
669                         break;
670                 retry++;
671                 i40e_msec_delay(100);
672                 i40e_resume_aq(hw);
673         } while (retry < 10);
674         if (ret_code != I40E_SUCCESS)
675                 goto init_adminq_free_arq;
676
677         /* get the NVM version info */
678         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
679                            &hw->nvm.version);
680         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
681         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
682         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
683         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
684         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
685                            &oem_hi);
686         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
687                            &oem_lo);
688         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
689
690         /* Newer versions of firmware require lock when reading the NVM */
691         if ((hw->aq.api_maj_ver > 1) ||
692             ((hw->aq.api_maj_ver == 1) &&
693              (hw->aq.api_min_ver >= 5)))
694                 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
695
696         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
697                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
698                 goto init_adminq_free_arq;
699         }
700
701         /* pre-emptive resource lock release */
702         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
703         hw->nvm_release_on_done = false;
704         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
705
706 #endif /* PF_DRIVER */
707         ret_code = I40E_SUCCESS;
708
709         /* success! */
710         goto init_adminq_exit;
711
712 #ifdef PF_DRIVER
713 init_adminq_free_arq:
714         i40e_shutdown_arq(hw);
715 #endif
716 init_adminq_free_asq:
717         i40e_shutdown_asq(hw);
718 init_adminq_destroy_spinlocks:
719         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
720         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
721
722 init_adminq_exit:
723         return ret_code;
724 }
725
726 /**
727  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
728  *  @hw: pointer to the hardware structure
729  **/
730 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
731 {
732         enum i40e_status_code ret_code = I40E_SUCCESS;
733
734         if (i40e_check_asq_alive(hw))
735                 i40e_aq_queue_shutdown(hw, true);
736
737         i40e_shutdown_asq(hw);
738         i40e_shutdown_arq(hw);
739         i40e_destroy_spinlock(&hw->aq.asq_spinlock);
740         i40e_destroy_spinlock(&hw->aq.arq_spinlock);
741
742         if (hw->nvm_buff.va)
743                 i40e_free_virt_mem(hw, &hw->nvm_buff);
744
745         return ret_code;
746 }
747
748 /**
749  *  i40e_clean_asq - cleans Admin send queue
750  *  @hw: pointer to the hardware structure
751  *
752  *  returns the number of free desc
753  **/
754 u16 i40e_clean_asq(struct i40e_hw *hw)
755 {
756         struct i40e_adminq_ring *asq = &(hw->aq.asq);
757         struct i40e_asq_cmd_details *details;
758         u16 ntc = asq->next_to_clean;
759         struct i40e_aq_desc desc_cb;
760         struct i40e_aq_desc *desc;
761
762         desc = I40E_ADMINQ_DESC(*asq, ntc);
763         details = I40E_ADMINQ_DETAILS(*asq, ntc);
764         while (rd32(hw, hw->aq.asq.head) != ntc) {
765                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
766                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
767
768                 if (details->callback) {
769                         I40E_ADMINQ_CALLBACK cb_func =
770                                         (I40E_ADMINQ_CALLBACK)details->callback;
771                         i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
772                                     I40E_DMA_TO_DMA);
773                         cb_func(hw, &desc_cb);
774                 }
775                 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
776                 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
777                 ntc++;
778                 if (ntc == asq->count)
779                         ntc = 0;
780                 desc = I40E_ADMINQ_DESC(*asq, ntc);
781                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
782         }
783
784         asq->next_to_clean = ntc;
785
786         return I40E_DESC_UNUSED(asq);
787 }
788
789 /**
790  *  i40e_asq_done - check if FW has processed the Admin Send Queue
791  *  @hw: pointer to the hw struct
792  *
793  *  Returns true if the firmware has processed all descriptors on the
794  *  admin send queue. Returns false if there are still requests pending.
795  **/
796 #ifdef VF_DRIVER
797 bool i40e_asq_done(struct i40e_hw *hw)
798 #else
799 STATIC bool i40e_asq_done(struct i40e_hw *hw)
800 #endif
801 {
802         /* AQ designers suggest use of head for better
803          * timing reliability than DD bit
804          */
805         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
806
807 }
808
809 /**
810  *  i40e_asq_send_command - send command to Admin Queue
811  *  @hw: pointer to the hw struct
812  *  @desc: prefilled descriptor describing the command (non DMA mem)
813  *  @buff: buffer to use for indirect commands
814  *  @buff_size: size of buffer for indirect commands
815  *  @cmd_details: pointer to command details structure
816  *
817  *  This is the main send command driver routine for the Admin Queue send
818  *  queue.  It runs the queue, cleans the queue, etc
819  **/
820 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
821                                 struct i40e_aq_desc *desc,
822                                 void *buff, /* can be NULL */
823                                 u16  buff_size,
824                                 struct i40e_asq_cmd_details *cmd_details)
825 {
826         enum i40e_status_code status = I40E_SUCCESS;
827         struct i40e_dma_mem *dma_buff = NULL;
828         struct i40e_asq_cmd_details *details;
829         struct i40e_aq_desc *desc_on_ring;
830         bool cmd_completed = false;
831         u16  retval = 0;
832         u32  val = 0;
833
834         i40e_acquire_spinlock(&hw->aq.asq_spinlock);
835
836         hw->aq.asq_last_status = I40E_AQ_RC_OK;
837
838         if (hw->aq.asq.count == 0) {
839                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
840                            "AQTX: Admin queue not initialized.\n");
841                 status = I40E_ERR_QUEUE_EMPTY;
842                 goto asq_send_command_error;
843         }
844
845         val = rd32(hw, hw->aq.asq.head);
846         if (val >= hw->aq.num_asq_entries) {
847                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
848                            "AQTX: head overrun at %d\n", val);
849                 status = I40E_ERR_QUEUE_EMPTY;
850                 goto asq_send_command_error;
851         }
852
853         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
854         if (cmd_details) {
855                 i40e_memcpy(details,
856                             cmd_details,
857                             sizeof(struct i40e_asq_cmd_details),
858                             I40E_NONDMA_TO_NONDMA);
859
860                 /* If the cmd_details are defined copy the cookie.  The
861                  * CPU_TO_LE32 is not needed here because the data is ignored
862                  * by the FW, only used by the driver
863                  */
864                 if (details->cookie) {
865                         desc->cookie_high =
866                                 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
867                         desc->cookie_low =
868                                 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
869                 }
870         } else {
871                 i40e_memset(details, 0,
872                             sizeof(struct i40e_asq_cmd_details),
873                             I40E_NONDMA_MEM);
874         }
875
876         /* clear requested flags and then set additional flags if defined */
877         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
878         desc->flags |= CPU_TO_LE16(details->flags_ena);
879
880         if (buff_size > hw->aq.asq_buf_size) {
881                 i40e_debug(hw,
882                            I40E_DEBUG_AQ_MESSAGE,
883                            "AQTX: Invalid buffer size: %d.\n",
884                            buff_size);
885                 status = I40E_ERR_INVALID_SIZE;
886                 goto asq_send_command_error;
887         }
888
889         if (details->postpone && !details->async) {
890                 i40e_debug(hw,
891                            I40E_DEBUG_AQ_MESSAGE,
892                            "AQTX: Async flag not set along with postpone flag");
893                 status = I40E_ERR_PARAM;
894                 goto asq_send_command_error;
895         }
896
897         /* call clean and check queue available function to reclaim the
898          * descriptors that were processed by FW, the function returns the
899          * number of desc available
900          */
901         /* the clean function called here could be called in a separate thread
902          * in case of asynchronous completions
903          */
904         if (i40e_clean_asq(hw) == 0) {
905                 i40e_debug(hw,
906                            I40E_DEBUG_AQ_MESSAGE,
907                            "AQTX: Error queue is full.\n");
908                 status = I40E_ERR_ADMIN_QUEUE_FULL;
909                 goto asq_send_command_error;
910         }
911
912         /* initialize the temp desc pointer with the right desc */
913         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
914
915         /* if the desc is available copy the temp desc to the right place */
916         i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
917                     I40E_NONDMA_TO_DMA);
918
919         /* if buff is not NULL assume indirect command */
920         if (buff != NULL) {
921                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
922                 /* copy the user buff into the respective DMA buff */
923                 i40e_memcpy(dma_buff->va, buff, buff_size,
924                             I40E_NONDMA_TO_DMA);
925                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
926
927                 /* Update the address values in the desc with the pa value
928                  * for respective buffer
929                  */
930                 desc_on_ring->params.external.addr_high =
931                                 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
932                 desc_on_ring->params.external.addr_low =
933                                 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
934         }
935
936         /* bump the tail */
937         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
938         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
939                       buff, buff_size);
940         (hw->aq.asq.next_to_use)++;
941         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
942                 hw->aq.asq.next_to_use = 0;
943         if (!details->postpone)
944                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
945
946         /* if cmd_details are not defined or async flag is not set,
947          * we need to wait for desc write back
948          */
949         if (!details->async && !details->postpone) {
950                 u32 total_delay = 0;
951
952                 do {
953                         /* AQ designers suggest use of head for better
954                          * timing reliability than DD bit
955                          */
956                         if (i40e_asq_done(hw))
957                                 break;
958                         i40e_msec_delay(1);
959                         total_delay++;
960                 } while (total_delay < hw->aq.asq_cmd_timeout);
961         }
962
963         /* if ready, copy the desc back to temp */
964         if (i40e_asq_done(hw)) {
965                 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
966                             I40E_DMA_TO_NONDMA);
967                 if (buff != NULL)
968                         i40e_memcpy(buff, dma_buff->va, buff_size,
969                                     I40E_DMA_TO_NONDMA);
970                 retval = LE16_TO_CPU(desc->retval);
971                 if (retval != 0) {
972                         i40e_debug(hw,
973                                    I40E_DEBUG_AQ_MESSAGE,
974                                    "AQTX: Command completed with error 0x%X.\n",
975                                    retval);
976
977                         /* strip off FW internal code */
978                         retval &= 0xff;
979                 }
980                 cmd_completed = true;
981                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
982                         status = I40E_SUCCESS;
983                 else
984                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
985                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
986         }
987
988         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
989                    "AQTX: desc and buffer writeback:\n");
990         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
991
992         /* save writeback aq if requested */
993         if (details->wb_desc)
994                 i40e_memcpy(details->wb_desc, desc_on_ring,
995                             sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
996
997         /* update the error if time out occurred */
998         if ((!cmd_completed) &&
999             (!details->async && !details->postpone)) {
1000                 i40e_debug(hw,
1001                            I40E_DEBUG_AQ_MESSAGE,
1002                            "AQTX: Writeback timeout.\n");
1003                 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1004         }
1005
1006 asq_send_command_error:
1007         i40e_release_spinlock(&hw->aq.asq_spinlock);
1008         return status;
1009 }
1010
1011 /**
1012  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1013  *  @desc:     pointer to the temp descriptor (non DMA mem)
1014  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1015  *
1016  *  Fill the desc with default values
1017  **/
1018 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1019                                        u16 opcode)
1020 {
1021         /* zero out the desc */
1022         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1023                     I40E_NONDMA_MEM);
1024         desc->opcode = CPU_TO_LE16(opcode);
1025         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1026 }
1027
1028 /**
1029  *  i40e_clean_arq_element
1030  *  @hw: pointer to the hw struct
1031  *  @e: event info from the receive descriptor, includes any buffers
1032  *  @pending: number of events that could be left to process
1033  *
1034  *  This function cleans one Admin Receive Queue element and returns
1035  *  the contents through e.  It can also return how many events are
1036  *  left to process through 'pending'
1037  **/
1038 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1039                                              struct i40e_arq_event_info *e,
1040                                              u16 *pending)
1041 {
1042         enum i40e_status_code ret_code = I40E_SUCCESS;
1043         u16 ntc = hw->aq.arq.next_to_clean;
1044         struct i40e_aq_desc *desc;
1045         struct i40e_dma_mem *bi;
1046         u16 desc_idx;
1047         u16 datalen;
1048         u16 flags;
1049         u16 ntu;
1050
1051         /* pre-clean the event info */
1052         i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1053
1054         /* take the lock before we start messing with the ring */
1055         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1056
1057         if (hw->aq.arq.count == 0) {
1058                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1059                            "AQRX: Admin queue not initialized.\n");
1060                 ret_code = I40E_ERR_QUEUE_EMPTY;
1061                 goto clean_arq_element_err;
1062         }
1063
1064         /* set next_to_use to head */
1065 #ifdef INTEGRATED_VF
1066         if (!i40e_is_vf(hw))
1067                 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1068         else
1069                 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1070 #else
1071 #ifdef PF_DRIVER
1072         ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1073 #endif /* PF_DRIVER */
1074 #ifdef VF_DRIVER
1075         ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1076 #endif /* VF_DRIVER */
1077 #endif /* INTEGRATED_VF */
1078         if (ntu == ntc) {
1079                 /* nothing to do - shouldn't need to update ring's values */
1080                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1081                 goto clean_arq_element_out;
1082         }
1083
1084         /* now clean the next descriptor */
1085         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1086         desc_idx = ntc;
1087
1088         flags = LE16_TO_CPU(desc->flags);
1089         if (flags & I40E_AQ_FLAG_ERR) {
1090                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1091                 hw->aq.arq_last_status =
1092                         (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1093                 i40e_debug(hw,
1094                            I40E_DEBUG_AQ_MESSAGE,
1095                            "AQRX: Event received with error 0x%X.\n",
1096                            hw->aq.arq_last_status);
1097         }
1098
1099         i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1100                     I40E_DMA_TO_NONDMA);
1101         datalen = LE16_TO_CPU(desc->datalen);
1102         e->msg_len = min(datalen, e->buf_len);
1103         if (e->msg_buf != NULL && (e->msg_len != 0))
1104                 i40e_memcpy(e->msg_buf,
1105                             hw->aq.arq.r.arq_bi[desc_idx].va,
1106                             e->msg_len, I40E_DMA_TO_NONDMA);
1107
1108         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1109         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1110                       hw->aq.arq_buf_size);
1111
1112         /* Restore the original datalen and buffer address in the desc,
1113          * FW updates datalen to indicate the event message
1114          * size
1115          */
1116         bi = &hw->aq.arq.r.arq_bi[ntc];
1117         i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1118
1119         desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1120         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1121                 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1122         desc->datalen = CPU_TO_LE16((u16)bi->size);
1123         desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1124         desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1125
1126         /* set tail = the last cleaned desc index. */
1127         wr32(hw, hw->aq.arq.tail, ntc);
1128         /* ntc is updated to tail + 1 */
1129         ntc++;
1130         if (ntc == hw->aq.num_arq_entries)
1131                 ntc = 0;
1132         hw->aq.arq.next_to_clean = ntc;
1133         hw->aq.arq.next_to_use = ntu;
1134
1135 #ifdef PF_DRIVER
1136         i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
1137 #endif /* PF_DRIVER */
1138 clean_arq_element_out:
1139         /* Set pending if needed, unlock and return */
1140         if (pending != NULL)
1141                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1142 clean_arq_element_err:
1143         i40e_release_spinlock(&hw->aq.arq_spinlock);
1144
1145         return ret_code;
1146 }
1147