New upstream version 18.02
[deb_dpdk.git] / drivers / net / avf / base / avf_adminq.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "avf_status.h"
35 #include "avf_type.h"
36 #include "avf_register.h"
37 #include "avf_adminq.h"
38 #include "avf_prototype.h"
39
40 /**
41  *  avf_adminq_init_regs - Initialize AdminQ registers
42  *  @hw: pointer to the hardware structure
43  *
44  *  This assumes the alloc_asq and alloc_arq functions have already been called
45  **/
46 STATIC void avf_adminq_init_regs(struct avf_hw *hw)
47 {
48         /* set head and tail registers in our local struct */
49         if (avf_is_vf(hw)) {
50                 hw->aq.asq.tail = AVF_ATQT1;
51                 hw->aq.asq.head = AVF_ATQH1;
52                 hw->aq.asq.len  = AVF_ATQLEN1;
53                 hw->aq.asq.bal  = AVF_ATQBAL1;
54                 hw->aq.asq.bah  = AVF_ATQBAH1;
55                 hw->aq.arq.tail = AVF_ARQT1;
56                 hw->aq.arq.head = AVF_ARQH1;
57                 hw->aq.arq.len  = AVF_ARQLEN1;
58                 hw->aq.arq.bal  = AVF_ARQBAL1;
59                 hw->aq.arq.bah  = AVF_ARQBAH1;
60         }
61 }
62
63 /**
64  *  avf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
65  *  @hw: pointer to the hardware structure
66  **/
67 enum avf_status_code avf_alloc_adminq_asq_ring(struct avf_hw *hw)
68 {
69         enum avf_status_code ret_code;
70
71         ret_code = avf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
72                                          avf_mem_atq_ring,
73                                          (hw->aq.num_asq_entries *
74                                          sizeof(struct avf_aq_desc)),
75                                          AVF_ADMINQ_DESC_ALIGNMENT);
76         if (ret_code)
77                 return ret_code;
78
79         ret_code = avf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
80                                           (hw->aq.num_asq_entries *
81                                           sizeof(struct avf_asq_cmd_details)));
82         if (ret_code) {
83                 avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
84                 return ret_code;
85         }
86
87         return ret_code;
88 }
89
90 /**
91  *  avf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
92  *  @hw: pointer to the hardware structure
93  **/
94 enum avf_status_code avf_alloc_adminq_arq_ring(struct avf_hw *hw)
95 {
96         enum avf_status_code ret_code;
97
98         ret_code = avf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
99                                          avf_mem_arq_ring,
100                                          (hw->aq.num_arq_entries *
101                                          sizeof(struct avf_aq_desc)),
102                                          AVF_ADMINQ_DESC_ALIGNMENT);
103
104         return ret_code;
105 }
106
107 /**
108  *  avf_free_adminq_asq - Free Admin Queue send rings
109  *  @hw: pointer to the hardware structure
110  *
111  *  This assumes the posted send buffers have already been cleaned
112  *  and de-allocated
113  **/
114 void avf_free_adminq_asq(struct avf_hw *hw)
115 {
116         avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
117 }
118
119 /**
120  *  avf_free_adminq_arq - Free Admin Queue receive rings
121  *  @hw: pointer to the hardware structure
122  *
123  *  This assumes the posted receive buffers have already been cleaned
124  *  and de-allocated
125  **/
126 void avf_free_adminq_arq(struct avf_hw *hw)
127 {
128         avf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
129 }
130
131 /**
132  *  avf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
133  *  @hw: pointer to the hardware structure
134  **/
135 STATIC enum avf_status_code avf_alloc_arq_bufs(struct avf_hw *hw)
136 {
137         enum avf_status_code ret_code;
138         struct avf_aq_desc *desc;
139         struct avf_dma_mem *bi;
140         int i;
141
142         /* We'll be allocating the buffer info memory first, then we can
143          * allocate the mapped buffers for the event processing
144          */
145
146         /* buffer_info structures do not need alignment */
147         ret_code = avf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
148                 (hw->aq.num_arq_entries * sizeof(struct avf_dma_mem)));
149         if (ret_code)
150                 goto alloc_arq_bufs;
151         hw->aq.arq.r.arq_bi = (struct avf_dma_mem *)hw->aq.arq.dma_head.va;
152
153         /* allocate the mapped buffers */
154         for (i = 0; i < hw->aq.num_arq_entries; i++) {
155                 bi = &hw->aq.arq.r.arq_bi[i];
156                 ret_code = avf_allocate_dma_mem(hw, bi,
157                                                  avf_mem_arq_buf,
158                                                  hw->aq.arq_buf_size,
159                                                  AVF_ADMINQ_DESC_ALIGNMENT);
160                 if (ret_code)
161                         goto unwind_alloc_arq_bufs;
162
163                 /* now configure the descriptors for use */
164                 desc = AVF_ADMINQ_DESC(hw->aq.arq, i);
165
166                 desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_BUF);
167                 if (hw->aq.arq_buf_size > AVF_AQ_LARGE_BUF)
168                         desc->flags |= CPU_TO_LE16(AVF_AQ_FLAG_LB);
169                 desc->opcode = 0;
170                 /* This is in accordance with Admin queue design, there is no
171                  * register for buffer size configuration
172                  */
173                 desc->datalen = CPU_TO_LE16((u16)bi->size);
174                 desc->retval = 0;
175                 desc->cookie_high = 0;
176                 desc->cookie_low = 0;
177                 desc->params.external.addr_high =
178                         CPU_TO_LE32(AVF_HI_DWORD(bi->pa));
179                 desc->params.external.addr_low =
180                         CPU_TO_LE32(AVF_LO_DWORD(bi->pa));
181                 desc->params.external.param0 = 0;
182                 desc->params.external.param1 = 0;
183         }
184
185 alloc_arq_bufs:
186         return ret_code;
187
188 unwind_alloc_arq_bufs:
189         /* don't try to free the one that failed... */
190         i--;
191         for (; i >= 0; i--)
192                 avf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
193         avf_free_virt_mem(hw, &hw->aq.arq.dma_head);
194
195         return ret_code;
196 }
197
198 /**
199  *  avf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
200  *  @hw: pointer to the hardware structure
201  **/
202 STATIC enum avf_status_code avf_alloc_asq_bufs(struct avf_hw *hw)
203 {
204         enum avf_status_code ret_code;
205         struct avf_dma_mem *bi;
206         int i;
207
208         /* No mapped memory needed yet, just the buffer info structures */
209         ret_code = avf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
210                 (hw->aq.num_asq_entries * sizeof(struct avf_dma_mem)));
211         if (ret_code)
212                 goto alloc_asq_bufs;
213         hw->aq.asq.r.asq_bi = (struct avf_dma_mem *)hw->aq.asq.dma_head.va;
214
215         /* allocate the mapped buffers */
216         for (i = 0; i < hw->aq.num_asq_entries; i++) {
217                 bi = &hw->aq.asq.r.asq_bi[i];
218                 ret_code = avf_allocate_dma_mem(hw, bi,
219                                                  avf_mem_asq_buf,
220                                                  hw->aq.asq_buf_size,
221                                                  AVF_ADMINQ_DESC_ALIGNMENT);
222                 if (ret_code)
223                         goto unwind_alloc_asq_bufs;
224         }
225 alloc_asq_bufs:
226         return ret_code;
227
228 unwind_alloc_asq_bufs:
229         /* don't try to free the one that failed... */
230         i--;
231         for (; i >= 0; i--)
232                 avf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
233         avf_free_virt_mem(hw, &hw->aq.asq.dma_head);
234
235         return ret_code;
236 }
237
238 /**
239  *  avf_free_arq_bufs - Free receive queue buffer info elements
240  *  @hw: pointer to the hardware structure
241  **/
242 STATIC void avf_free_arq_bufs(struct avf_hw *hw)
243 {
244         int i;
245
246         /* free descriptors */
247         for (i = 0; i < hw->aq.num_arq_entries; i++)
248                 avf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
249
250         /* free the descriptor memory */
251         avf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
252
253         /* free the dma header */
254         avf_free_virt_mem(hw, &hw->aq.arq.dma_head);
255 }
256
257 /**
258  *  avf_free_asq_bufs - Free send queue buffer info elements
259  *  @hw: pointer to the hardware structure
260  **/
261 STATIC void avf_free_asq_bufs(struct avf_hw *hw)
262 {
263         int i;
264
265         /* only unmap if the address is non-NULL */
266         for (i = 0; i < hw->aq.num_asq_entries; i++)
267                 if (hw->aq.asq.r.asq_bi[i].pa)
268                         avf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
269
270         /* free the buffer info list */
271         avf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
272
273         /* free the descriptor memory */
274         avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
275
276         /* free the dma header */
277         avf_free_virt_mem(hw, &hw->aq.asq.dma_head);
278 }
279
280 /**
281  *  avf_config_asq_regs - configure ASQ registers
282  *  @hw: pointer to the hardware structure
283  *
284  *  Configure base address and length registers for the transmit queue
285  **/
286 STATIC enum avf_status_code avf_config_asq_regs(struct avf_hw *hw)
287 {
288         enum avf_status_code ret_code = AVF_SUCCESS;
289         u32 reg = 0;
290
291         /* Clear Head and Tail */
292         wr32(hw, hw->aq.asq.head, 0);
293         wr32(hw, hw->aq.asq.tail, 0);
294
295         /* set starting point */
296 #ifdef INTEGRATED_VF
297         if (avf_is_vf(hw))
298                 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
299                                           AVF_ATQLEN1_ATQENABLE_MASK));
300 #else
301         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
302                                   AVF_ATQLEN1_ATQENABLE_MASK));
303 #endif /* INTEGRATED_VF */
304         wr32(hw, hw->aq.asq.bal, AVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
305         wr32(hw, hw->aq.asq.bah, AVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
306
307         /* Check one register to verify that config was applied */
308         reg = rd32(hw, hw->aq.asq.bal);
309         if (reg != AVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
310                 ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
311
312         return ret_code;
313 }
314
315 /**
316  *  avf_config_arq_regs - ARQ register configuration
317  *  @hw: pointer to the hardware structure
318  *
319  * Configure base address and length registers for the receive (event queue)
320  **/
321 STATIC enum avf_status_code avf_config_arq_regs(struct avf_hw *hw)
322 {
323         enum avf_status_code ret_code = AVF_SUCCESS;
324         u32 reg = 0;
325
326         /* Clear Head and Tail */
327         wr32(hw, hw->aq.arq.head, 0);
328         wr32(hw, hw->aq.arq.tail, 0);
329
330         /* set starting point */
331 #ifdef INTEGRATED_VF
332         if (avf_is_vf(hw))
333                 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
334                                           AVF_ARQLEN1_ARQENABLE_MASK));
335 #else
336         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
337                                   AVF_ARQLEN1_ARQENABLE_MASK));
338 #endif /* INTEGRATED_VF */
339         wr32(hw, hw->aq.arq.bal, AVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
340         wr32(hw, hw->aq.arq.bah, AVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
341
342         /* Update tail in the HW to post pre-allocated buffers */
343         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
344
345         /* Check one register to verify that config was applied */
346         reg = rd32(hw, hw->aq.arq.bal);
347         if (reg != AVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
348                 ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
349
350         return ret_code;
351 }
352
353 /**
354  *  avf_init_asq - main initialization routine for ASQ
355  *  @hw: pointer to the hardware structure
356  *
357  *  This is the main initialization routine for the Admin Send Queue
358  *  Prior to calling this function, drivers *MUST* set the following fields
359  *  in the hw->aq structure:
360  *     - hw->aq.num_asq_entries
361  *     - hw->aq.arq_buf_size
362  *
363  *  Do *NOT* hold the lock when calling this as the memory allocation routines
364  *  called are not going to be atomic context safe
365  **/
366 enum avf_status_code avf_init_asq(struct avf_hw *hw)
367 {
368         enum avf_status_code ret_code = AVF_SUCCESS;
369
370         if (hw->aq.asq.count > 0) {
371                 /* queue already initialized */
372                 ret_code = AVF_ERR_NOT_READY;
373                 goto init_adminq_exit;
374         }
375
376         /* verify input for valid configuration */
377         if ((hw->aq.num_asq_entries == 0) ||
378             (hw->aq.asq_buf_size == 0)) {
379                 ret_code = AVF_ERR_CONFIG;
380                 goto init_adminq_exit;
381         }
382
383         hw->aq.asq.next_to_use = 0;
384         hw->aq.asq.next_to_clean = 0;
385
386         /* allocate the ring memory */
387         ret_code = avf_alloc_adminq_asq_ring(hw);
388         if (ret_code != AVF_SUCCESS)
389                 goto init_adminq_exit;
390
391         /* allocate buffers in the rings */
392         ret_code = avf_alloc_asq_bufs(hw);
393         if (ret_code != AVF_SUCCESS)
394                 goto init_adminq_free_rings;
395
396         /* initialize base registers */
397         ret_code = avf_config_asq_regs(hw);
398         if (ret_code != AVF_SUCCESS)
399                 goto init_adminq_free_rings;
400
401         /* success! */
402         hw->aq.asq.count = hw->aq.num_asq_entries;
403         goto init_adminq_exit;
404
405 init_adminq_free_rings:
406         avf_free_adminq_asq(hw);
407
408 init_adminq_exit:
409         return ret_code;
410 }
411
412 /**
413  *  avf_init_arq - initialize ARQ
414  *  @hw: pointer to the hardware structure
415  *
416  *  The main initialization routine for the Admin Receive (Event) Queue.
417  *  Prior to calling this function, drivers *MUST* set the following fields
418  *  in the hw->aq structure:
419  *     - hw->aq.num_asq_entries
420  *     - hw->aq.arq_buf_size
421  *
422  *  Do *NOT* hold the lock when calling this as the memory allocation routines
423  *  called are not going to be atomic context safe
424  **/
425 enum avf_status_code avf_init_arq(struct avf_hw *hw)
426 {
427         enum avf_status_code ret_code = AVF_SUCCESS;
428
429         if (hw->aq.arq.count > 0) {
430                 /* queue already initialized */
431                 ret_code = AVF_ERR_NOT_READY;
432                 goto init_adminq_exit;
433         }
434
435         /* verify input for valid configuration */
436         if ((hw->aq.num_arq_entries == 0) ||
437             (hw->aq.arq_buf_size == 0)) {
438                 ret_code = AVF_ERR_CONFIG;
439                 goto init_adminq_exit;
440         }
441
442         hw->aq.arq.next_to_use = 0;
443         hw->aq.arq.next_to_clean = 0;
444
445         /* allocate the ring memory */
446         ret_code = avf_alloc_adminq_arq_ring(hw);
447         if (ret_code != AVF_SUCCESS)
448                 goto init_adminq_exit;
449
450         /* allocate buffers in the rings */
451         ret_code = avf_alloc_arq_bufs(hw);
452         if (ret_code != AVF_SUCCESS)
453                 goto init_adminq_free_rings;
454
455         /* initialize base registers */
456         ret_code = avf_config_arq_regs(hw);
457         if (ret_code != AVF_SUCCESS)
458                 goto init_adminq_free_rings;
459
460         /* success! */
461         hw->aq.arq.count = hw->aq.num_arq_entries;
462         goto init_adminq_exit;
463
464 init_adminq_free_rings:
465         avf_free_adminq_arq(hw);
466
467 init_adminq_exit:
468         return ret_code;
469 }
470
471 /**
472  *  avf_shutdown_asq - shutdown the ASQ
473  *  @hw: pointer to the hardware structure
474  *
475  *  The main shutdown routine for the Admin Send Queue
476  **/
477 enum avf_status_code avf_shutdown_asq(struct avf_hw *hw)
478 {
479         enum avf_status_code ret_code = AVF_SUCCESS;
480
481         avf_acquire_spinlock(&hw->aq.asq_spinlock);
482
483         if (hw->aq.asq.count == 0) {
484                 ret_code = AVF_ERR_NOT_READY;
485                 goto shutdown_asq_out;
486         }
487
488         /* Stop firmware AdminQ processing */
489         wr32(hw, hw->aq.asq.head, 0);
490         wr32(hw, hw->aq.asq.tail, 0);
491         wr32(hw, hw->aq.asq.len, 0);
492         wr32(hw, hw->aq.asq.bal, 0);
493         wr32(hw, hw->aq.asq.bah, 0);
494
495         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
496
497         /* free ring buffers */
498         avf_free_asq_bufs(hw);
499
500 shutdown_asq_out:
501         avf_release_spinlock(&hw->aq.asq_spinlock);
502         return ret_code;
503 }
504
505 /**
506  *  avf_shutdown_arq - shutdown ARQ
507  *  @hw: pointer to the hardware structure
508  *
509  *  The main shutdown routine for the Admin Receive Queue
510  **/
511 enum avf_status_code avf_shutdown_arq(struct avf_hw *hw)
512 {
513         enum avf_status_code ret_code = AVF_SUCCESS;
514
515         avf_acquire_spinlock(&hw->aq.arq_spinlock);
516
517         if (hw->aq.arq.count == 0) {
518                 ret_code = AVF_ERR_NOT_READY;
519                 goto shutdown_arq_out;
520         }
521
522         /* Stop firmware AdminQ processing */
523         wr32(hw, hw->aq.arq.head, 0);
524         wr32(hw, hw->aq.arq.tail, 0);
525         wr32(hw, hw->aq.arq.len, 0);
526         wr32(hw, hw->aq.arq.bal, 0);
527         wr32(hw, hw->aq.arq.bah, 0);
528
529         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
530
531         /* free ring buffers */
532         avf_free_arq_bufs(hw);
533
534 shutdown_arq_out:
535         avf_release_spinlock(&hw->aq.arq_spinlock);
536         return ret_code;
537 }
538
539 /**
540  *  avf_init_adminq - main initialization routine for Admin Queue
541  *  @hw: pointer to the hardware structure
542  *
543  *  Prior to calling this function, drivers *MUST* set the following fields
544  *  in the hw->aq structure:
545  *     - hw->aq.num_asq_entries
546  *     - hw->aq.num_arq_entries
547  *     - hw->aq.arq_buf_size
548  *     - hw->aq.asq_buf_size
549  **/
550 enum avf_status_code avf_init_adminq(struct avf_hw *hw)
551 {
552         enum avf_status_code ret_code;
553
554         /* verify input for valid configuration */
555         if ((hw->aq.num_arq_entries == 0) ||
556             (hw->aq.num_asq_entries == 0) ||
557             (hw->aq.arq_buf_size == 0) ||
558             (hw->aq.asq_buf_size == 0)) {
559                 ret_code = AVF_ERR_CONFIG;
560                 goto init_adminq_exit;
561         }
562         avf_init_spinlock(&hw->aq.asq_spinlock);
563         avf_init_spinlock(&hw->aq.arq_spinlock);
564
565         /* Set up register offsets */
566         avf_adminq_init_regs(hw);
567
568         /* setup ASQ command write back timeout */
569         hw->aq.asq_cmd_timeout = AVF_ASQ_CMD_TIMEOUT;
570
571         /* allocate the ASQ */
572         ret_code = avf_init_asq(hw);
573         if (ret_code != AVF_SUCCESS)
574                 goto init_adminq_destroy_spinlocks;
575
576         /* allocate the ARQ */
577         ret_code = avf_init_arq(hw);
578         if (ret_code != AVF_SUCCESS)
579                 goto init_adminq_free_asq;
580
581         ret_code = AVF_SUCCESS;
582
583         /* success! */
584         goto init_adminq_exit;
585
586 init_adminq_free_asq:
587         avf_shutdown_asq(hw);
588 init_adminq_destroy_spinlocks:
589         avf_destroy_spinlock(&hw->aq.asq_spinlock);
590         avf_destroy_spinlock(&hw->aq.arq_spinlock);
591
592 init_adminq_exit:
593         return ret_code;
594 }
595
596 /**
597  *  avf_shutdown_adminq - shutdown routine for the Admin Queue
598  *  @hw: pointer to the hardware structure
599  **/
600 enum avf_status_code avf_shutdown_adminq(struct avf_hw *hw)
601 {
602         enum avf_status_code ret_code = AVF_SUCCESS;
603
604         if (avf_check_asq_alive(hw))
605                 avf_aq_queue_shutdown(hw, true);
606
607         avf_shutdown_asq(hw);
608         avf_shutdown_arq(hw);
609         avf_destroy_spinlock(&hw->aq.asq_spinlock);
610         avf_destroy_spinlock(&hw->aq.arq_spinlock);
611
612         if (hw->nvm_buff.va)
613                 avf_free_virt_mem(hw, &hw->nvm_buff);
614
615         return ret_code;
616 }
617
618 /**
619  *  avf_clean_asq - cleans Admin send queue
620  *  @hw: pointer to the hardware structure
621  *
622  *  returns the number of free desc
623  **/
624 u16 avf_clean_asq(struct avf_hw *hw)
625 {
626         struct avf_adminq_ring *asq = &(hw->aq.asq);
627         struct avf_asq_cmd_details *details;
628         u16 ntc = asq->next_to_clean;
629         struct avf_aq_desc desc_cb;
630         struct avf_aq_desc *desc;
631
632         desc = AVF_ADMINQ_DESC(*asq, ntc);
633         details = AVF_ADMINQ_DETAILS(*asq, ntc);
634         while (rd32(hw, hw->aq.asq.head) != ntc) {
635                 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
636                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
637
638                 if (details->callback) {
639                         AVF_ADMINQ_CALLBACK cb_func =
640                                         (AVF_ADMINQ_CALLBACK)details->callback;
641                         avf_memcpy(&desc_cb, desc, sizeof(struct avf_aq_desc),
642                                     AVF_DMA_TO_DMA);
643                         cb_func(hw, &desc_cb);
644                 }
645                 avf_memset(desc, 0, sizeof(*desc), AVF_DMA_MEM);
646                 avf_memset(details, 0, sizeof(*details), AVF_NONDMA_MEM);
647                 ntc++;
648                 if (ntc == asq->count)
649                         ntc = 0;
650                 desc = AVF_ADMINQ_DESC(*asq, ntc);
651                 details = AVF_ADMINQ_DETAILS(*asq, ntc);
652         }
653
654         asq->next_to_clean = ntc;
655
656         return AVF_DESC_UNUSED(asq);
657 }
658
659 /**
660  *  avf_asq_done - check if FW has processed the Admin Send Queue
661  *  @hw: pointer to the hw struct
662  *
663  *  Returns true if the firmware has processed all descriptors on the
664  *  admin send queue. Returns false if there are still requests pending.
665  **/
666 bool avf_asq_done(struct avf_hw *hw)
667 {
668         /* AQ designers suggest use of head for better
669          * timing reliability than DD bit
670          */
671         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
672
673 }
674
675 /**
676  *  avf_asq_send_command - send command to Admin Queue
677  *  @hw: pointer to the hw struct
678  *  @desc: prefilled descriptor describing the command (non DMA mem)
679  *  @buff: buffer to use for indirect commands
680  *  @buff_size: size of buffer for indirect commands
681  *  @cmd_details: pointer to command details structure
682  *
683  *  This is the main send command driver routine for the Admin Queue send
684  *  queue.  It runs the queue, cleans the queue, etc
685  **/
686 enum avf_status_code avf_asq_send_command(struct avf_hw *hw,
687                                 struct avf_aq_desc *desc,
688                                 void *buff, /* can be NULL */
689                                 u16  buff_size,
690                                 struct avf_asq_cmd_details *cmd_details)
691 {
692         enum avf_status_code status = AVF_SUCCESS;
693         struct avf_dma_mem *dma_buff = NULL;
694         struct avf_asq_cmd_details *details;
695         struct avf_aq_desc *desc_on_ring;
696         bool cmd_completed = false;
697         u16  retval = 0;
698         u32  val = 0;
699
700         avf_acquire_spinlock(&hw->aq.asq_spinlock);
701
702         hw->aq.asq_last_status = AVF_AQ_RC_OK;
703
704         if (hw->aq.asq.count == 0) {
705                 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
706                            "AQTX: Admin queue not initialized.\n");
707                 status = AVF_ERR_QUEUE_EMPTY;
708                 goto asq_send_command_error;
709         }
710
711         val = rd32(hw, hw->aq.asq.head);
712         if (val >= hw->aq.num_asq_entries) {
713                 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
714                            "AQTX: head overrun at %d\n", val);
715                 status = AVF_ERR_QUEUE_EMPTY;
716                 goto asq_send_command_error;
717         }
718
719         details = AVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
720         if (cmd_details) {
721                 avf_memcpy(details,
722                             cmd_details,
723                             sizeof(struct avf_asq_cmd_details),
724                             AVF_NONDMA_TO_NONDMA);
725
726                 /* If the cmd_details are defined copy the cookie.  The
727                  * CPU_TO_LE32 is not needed here because the data is ignored
728                  * by the FW, only used by the driver
729                  */
730                 if (details->cookie) {
731                         desc->cookie_high =
732                                 CPU_TO_LE32(AVF_HI_DWORD(details->cookie));
733                         desc->cookie_low =
734                                 CPU_TO_LE32(AVF_LO_DWORD(details->cookie));
735                 }
736         } else {
737                 avf_memset(details, 0,
738                             sizeof(struct avf_asq_cmd_details),
739                             AVF_NONDMA_MEM);
740         }
741
742         /* clear requested flags and then set additional flags if defined */
743         desc->flags &= ~CPU_TO_LE16(details->flags_dis);
744         desc->flags |= CPU_TO_LE16(details->flags_ena);
745
746         if (buff_size > hw->aq.asq_buf_size) {
747                 avf_debug(hw,
748                            AVF_DEBUG_AQ_MESSAGE,
749                            "AQTX: Invalid buffer size: %d.\n",
750                            buff_size);
751                 status = AVF_ERR_INVALID_SIZE;
752                 goto asq_send_command_error;
753         }
754
755         if (details->postpone && !details->async) {
756                 avf_debug(hw,
757                            AVF_DEBUG_AQ_MESSAGE,
758                            "AQTX: Async flag not set along with postpone flag");
759                 status = AVF_ERR_PARAM;
760                 goto asq_send_command_error;
761         }
762
763         /* call clean and check queue available function to reclaim the
764          * descriptors that were processed by FW, the function returns the
765          * number of desc available
766          */
767         /* the clean function called here could be called in a separate thread
768          * in case of asynchronous completions
769          */
770         if (avf_clean_asq(hw) == 0) {
771                 avf_debug(hw,
772                            AVF_DEBUG_AQ_MESSAGE,
773                            "AQTX: Error queue is full.\n");
774                 status = AVF_ERR_ADMIN_QUEUE_FULL;
775                 goto asq_send_command_error;
776         }
777
778         /* initialize the temp desc pointer with the right desc */
779         desc_on_ring = AVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
780
781         /* if the desc is available copy the temp desc to the right place */
782         avf_memcpy(desc_on_ring, desc, sizeof(struct avf_aq_desc),
783                     AVF_NONDMA_TO_DMA);
784
785         /* if buff is not NULL assume indirect command */
786         if (buff != NULL) {
787                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
788                 /* copy the user buff into the respective DMA buff */
789                 avf_memcpy(dma_buff->va, buff, buff_size,
790                             AVF_NONDMA_TO_DMA);
791                 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
792
793                 /* Update the address values in the desc with the pa value
794                  * for respective buffer
795                  */
796                 desc_on_ring->params.external.addr_high =
797                                 CPU_TO_LE32(AVF_HI_DWORD(dma_buff->pa));
798                 desc_on_ring->params.external.addr_low =
799                                 CPU_TO_LE32(AVF_LO_DWORD(dma_buff->pa));
800         }
801
802         /* bump the tail */
803         avf_debug(hw, AVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
804         avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
805                       buff, buff_size);
806         (hw->aq.asq.next_to_use)++;
807         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
808                 hw->aq.asq.next_to_use = 0;
809         if (!details->postpone)
810                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
811
812         /* if cmd_details are not defined or async flag is not set,
813          * we need to wait for desc write back
814          */
815         if (!details->async && !details->postpone) {
816                 u32 total_delay = 0;
817
818                 do {
819                         /* AQ designers suggest use of head for better
820                          * timing reliability than DD bit
821                          */
822                         if (avf_asq_done(hw))
823                                 break;
824                         avf_usec_delay(50);
825                         total_delay += 50;
826                 } while (total_delay < hw->aq.asq_cmd_timeout);
827         }
828
829         /* if ready, copy the desc back to temp */
830         if (avf_asq_done(hw)) {
831                 avf_memcpy(desc, desc_on_ring, sizeof(struct avf_aq_desc),
832                             AVF_DMA_TO_NONDMA);
833                 if (buff != NULL)
834                         avf_memcpy(buff, dma_buff->va, buff_size,
835                                     AVF_DMA_TO_NONDMA);
836                 retval = LE16_TO_CPU(desc->retval);
837                 if (retval != 0) {
838                         avf_debug(hw,
839                                    AVF_DEBUG_AQ_MESSAGE,
840                                    "AQTX: Command completed with error 0x%X.\n",
841                                    retval);
842
843                         /* strip off FW internal code */
844                         retval &= 0xff;
845                 }
846                 cmd_completed = true;
847                 if ((enum avf_admin_queue_err)retval == AVF_AQ_RC_OK)
848                         status = AVF_SUCCESS;
849                 else
850                         status = AVF_ERR_ADMIN_QUEUE_ERROR;
851                 hw->aq.asq_last_status = (enum avf_admin_queue_err)retval;
852         }
853
854         avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
855                    "AQTX: desc and buffer writeback:\n");
856         avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
857
858         /* save writeback aq if requested */
859         if (details->wb_desc)
860                 avf_memcpy(details->wb_desc, desc_on_ring,
861                             sizeof(struct avf_aq_desc), AVF_DMA_TO_NONDMA);
862
863         /* update the error if time out occurred */
864         if ((!cmd_completed) &&
865             (!details->async && !details->postpone)) {
866                 if (rd32(hw, hw->aq.asq.len) & AVF_ATQLEN1_ATQCRIT_MASK) {
867                         avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
868                                    "AQTX: AQ Critical error.\n");
869                         status = AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
870                 } else {
871                         avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
872                                    "AQTX: Writeback timeout.\n");
873                         status = AVF_ERR_ADMIN_QUEUE_TIMEOUT;
874                 }
875         }
876
877 asq_send_command_error:
878         avf_release_spinlock(&hw->aq.asq_spinlock);
879         return status;
880 }
881
882 /**
883  *  avf_fill_default_direct_cmd_desc - AQ descriptor helper function
884  *  @desc:     pointer to the temp descriptor (non DMA mem)
885  *  @opcode:   the opcode can be used to decide which flags to turn off or on
886  *
887  *  Fill the desc with default values
888  **/
889 void avf_fill_default_direct_cmd_desc(struct avf_aq_desc *desc,
890                                        u16 opcode)
891 {
892         /* zero out the desc */
893         avf_memset((void *)desc, 0, sizeof(struct avf_aq_desc),
894                     AVF_NONDMA_MEM);
895         desc->opcode = CPU_TO_LE16(opcode);
896         desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_SI);
897 }
898
899 /**
900  *  avf_clean_arq_element
901  *  @hw: pointer to the hw struct
902  *  @e: event info from the receive descriptor, includes any buffers
903  *  @pending: number of events that could be left to process
904  *
905  *  This function cleans one Admin Receive Queue element and returns
906  *  the contents through e.  It can also return how many events are
907  *  left to process through 'pending'
908  **/
909 enum avf_status_code avf_clean_arq_element(struct avf_hw *hw,
910                                              struct avf_arq_event_info *e,
911                                              u16 *pending)
912 {
913         enum avf_status_code ret_code = AVF_SUCCESS;
914         u16 ntc = hw->aq.arq.next_to_clean;
915         struct avf_aq_desc *desc;
916         struct avf_dma_mem *bi;
917         u16 desc_idx;
918         u16 datalen;
919         u16 flags;
920         u16 ntu;
921
922         /* pre-clean the event info */
923         avf_memset(&e->desc, 0, sizeof(e->desc), AVF_NONDMA_MEM);
924
925         /* take the lock before we start messing with the ring */
926         avf_acquire_spinlock(&hw->aq.arq_spinlock);
927
928         if (hw->aq.arq.count == 0) {
929                 avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
930                            "AQRX: Admin queue not initialized.\n");
931                 ret_code = AVF_ERR_QUEUE_EMPTY;
932                 goto clean_arq_element_err;
933         }
934
935         /* set next_to_use to head */
936 #ifdef INTEGRATED_VF
937         if (!avf_is_vf(hw))
938                 ntu = rd32(hw, hw->aq.arq.head) & AVF_PF_ARQH_ARQH_MASK;
939         else
940                 ntu = rd32(hw, hw->aq.arq.head) & AVF_ARQH1_ARQH_MASK;
941 #else
942         ntu = rd32(hw, hw->aq.arq.head) & AVF_ARQH1_ARQH_MASK;
943 #endif /* INTEGRATED_VF */
944         if (ntu == ntc) {
945                 /* nothing to do - shouldn't need to update ring's values */
946                 ret_code = AVF_ERR_ADMIN_QUEUE_NO_WORK;
947                 goto clean_arq_element_out;
948         }
949
950         /* now clean the next descriptor */
951         desc = AVF_ADMINQ_DESC(hw->aq.arq, ntc);
952         desc_idx = ntc;
953
954         hw->aq.arq_last_status =
955                 (enum avf_admin_queue_err)LE16_TO_CPU(desc->retval);
956         flags = LE16_TO_CPU(desc->flags);
957         if (flags & AVF_AQ_FLAG_ERR) {
958                 ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
959                 avf_debug(hw,
960                            AVF_DEBUG_AQ_MESSAGE,
961                            "AQRX: Event received with error 0x%X.\n",
962                            hw->aq.arq_last_status);
963         }
964
965         avf_memcpy(&e->desc, desc, sizeof(struct avf_aq_desc),
966                     AVF_DMA_TO_NONDMA);
967         datalen = LE16_TO_CPU(desc->datalen);
968         e->msg_len = min(datalen, e->buf_len);
969         if (e->msg_buf != NULL && (e->msg_len != 0))
970                 avf_memcpy(e->msg_buf,
971                             hw->aq.arq.r.arq_bi[desc_idx].va,
972                             e->msg_len, AVF_DMA_TO_NONDMA);
973
974         avf_debug(hw, AVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
975         avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
976                       hw->aq.arq_buf_size);
977
978         /* Restore the original datalen and buffer address in the desc,
979          * FW updates datalen to indicate the event message
980          * size
981          */
982         bi = &hw->aq.arq.r.arq_bi[ntc];
983         avf_memset((void *)desc, 0, sizeof(struct avf_aq_desc), AVF_DMA_MEM);
984
985         desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_BUF);
986         if (hw->aq.arq_buf_size > AVF_AQ_LARGE_BUF)
987                 desc->flags |= CPU_TO_LE16(AVF_AQ_FLAG_LB);
988         desc->datalen = CPU_TO_LE16((u16)bi->size);
989         desc->params.external.addr_high = CPU_TO_LE32(AVF_HI_DWORD(bi->pa));
990         desc->params.external.addr_low = CPU_TO_LE32(AVF_LO_DWORD(bi->pa));
991
992         /* set tail = the last cleaned desc index. */
993         wr32(hw, hw->aq.arq.tail, ntc);
994         /* ntc is updated to tail + 1 */
995         ntc++;
996         if (ntc == hw->aq.num_arq_entries)
997                 ntc = 0;
998         hw->aq.arq.next_to_clean = ntc;
999         hw->aq.arq.next_to_use = ntu;
1000
1001 clean_arq_element_out:
1002         /* Set pending if needed, unlock and return */
1003         if (pending != NULL)
1004                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1005 clean_arq_element_err:
1006         avf_release_spinlock(&hw->aq.arq_spinlock);
1007
1008         return ret_code;
1009 }
1010