New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / net / thunderx / base / nicvf_hw.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium, Inc. 2016.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium, Inc nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <unistd.h>
34 #include <math.h>
35 #include <errno.h>
36 #include <stdarg.h>
37 #include <stdint.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <assert.h>
42
43 #include "nicvf_plat.h"
44
45 struct nicvf_reg_info {
46         uint32_t offset;
47         const char *name;
48 };
49
50 #define NICVF_REG_POLL_ITER_NR   (10)
51 #define NICVF_REG_POLL_DELAY_US  (2000)
52 #define NICVF_REG_INFO(reg) {reg, #reg}
53
54 static const struct nicvf_reg_info nicvf_reg_tbl[] = {
55         NICVF_REG_INFO(NIC_VF_CFG),
56         NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
57         NICVF_REG_INFO(NIC_VF_INT),
58         NICVF_REG_INFO(NIC_VF_INT_W1S),
59         NICVF_REG_INFO(NIC_VF_ENA_W1C),
60         NICVF_REG_INFO(NIC_VF_ENA_W1S),
61         NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
62         NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
63 };
64
65 static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
66         {NIC_VNIC_RSS_KEY_0_4 + 0,  "NIC_VNIC_RSS_KEY_0"},
67         {NIC_VNIC_RSS_KEY_0_4 + 8,  "NIC_VNIC_RSS_KEY_1"},
68         {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
69         {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
70         {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
71         {NIC_VNIC_TX_STAT_0_4 + 0,  "NIC_VNIC_STAT_TX_OCTS"},
72         {NIC_VNIC_TX_STAT_0_4 + 8,  "NIC_VNIC_STAT_TX_UCAST"},
73         {NIC_VNIC_TX_STAT_0_4 + 16,  "NIC_VNIC_STAT_TX_BCAST"},
74         {NIC_VNIC_TX_STAT_0_4 + 24,  "NIC_VNIC_STAT_TX_MCAST"},
75         {NIC_VNIC_TX_STAT_0_4 + 32,  "NIC_VNIC_STAT_TX_DROP"},
76         {NIC_VNIC_RX_STAT_0_13 + 0,  "NIC_VNIC_STAT_RX_OCTS"},
77         {NIC_VNIC_RX_STAT_0_13 + 8,  "NIC_VNIC_STAT_RX_UCAST"},
78         {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
79         {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
80         {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
81         {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
82         {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
83         {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
84         {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
85         {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
86         {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
87         {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
88         {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
89         {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
90 };
91
92 static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
93         NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
94         NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
95         NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
96         NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
97         NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
98         NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
99         NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
100         NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
101         NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
102         NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
103 };
104
105 static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
106         NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
107         NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
108         NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
109 };
110
111 static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
112         NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
113         NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
114         NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
115         NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
116         NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
117         NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
118         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
119         NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
120         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
121         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
122 };
123
124 static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
125         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
126         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
127         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
128         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
129         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
130         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
131         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
132         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
133         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
134 };
135
136 int
137 nicvf_base_init(struct nicvf *nic)
138 {
139         nic->hwcap = 0;
140         if (nic->subsystem_device_id == 0)
141                 return NICVF_ERR_BASE_INIT;
142
143         if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
144                 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
145
146         if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
147                 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
148
149         if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN83XX_NICVF)
150                 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2 |
151                                 NICVF_CAP_DISABLE_APAD;
152
153         return NICVF_OK;
154 }
155
156 /* dump on stdout if data is NULL */
157 int
158 nicvf_reg_dump(struct nicvf *nic,  uint64_t *data)
159 {
160         uint32_t i, q;
161         bool dump_stdout;
162
163         dump_stdout = data ? 0 : 1;
164
165         for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
166                 if (dump_stdout)
167                         nicvf_log("%24s  = 0x%" PRIx64 "\n",
168                                 nicvf_reg_tbl[i].name,
169                                 nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
170                 else
171                         *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
172
173         for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
174                 if (dump_stdout)
175                         nicvf_log("%24s  = 0x%" PRIx64 "\n",
176                                 nicvf_multi_reg_tbl[i].name,
177                                 nicvf_reg_read(nic,
178                                         nicvf_multi_reg_tbl[i].offset));
179                 else
180                         *data++ = nicvf_reg_read(nic,
181                                         nicvf_multi_reg_tbl[i].offset);
182
183         for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
184                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
185                         if (dump_stdout)
186                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
187                                         nicvf_qset_cq_reg_tbl[i].name, q,
188                                         nicvf_queue_reg_read(nic,
189                                         nicvf_qset_cq_reg_tbl[i].offset, q));
190                         else
191                                 *data++ = nicvf_queue_reg_read(nic,
192                                         nicvf_qset_cq_reg_tbl[i].offset, q);
193
194         for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
195                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
196                         if (dump_stdout)
197                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
198                                         nicvf_qset_rq_reg_tbl[i].name, q,
199                                         nicvf_queue_reg_read(nic,
200                                         nicvf_qset_rq_reg_tbl[i].offset, q));
201                         else
202                                 *data++ = nicvf_queue_reg_read(nic,
203                                         nicvf_qset_rq_reg_tbl[i].offset, q);
204
205         for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
206                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
207                         if (dump_stdout)
208                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
209                                         nicvf_qset_sq_reg_tbl[i].name, q,
210                                         nicvf_queue_reg_read(nic,
211                                         nicvf_qset_sq_reg_tbl[i].offset, q));
212                         else
213                                 *data++ = nicvf_queue_reg_read(nic,
214                                         nicvf_qset_sq_reg_tbl[i].offset, q);
215
216         for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
217                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
218                         if (dump_stdout)
219                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
220                                         nicvf_qset_rbdr_reg_tbl[i].name, q,
221                                         nicvf_queue_reg_read(nic,
222                                         nicvf_qset_rbdr_reg_tbl[i].offset, q));
223                         else
224                                 *data++ = nicvf_queue_reg_read(nic,
225                                         nicvf_qset_rbdr_reg_tbl[i].offset, q);
226         return 0;
227 }
228
229 int
230 nicvf_reg_get_count(void)
231 {
232         int nr_regs;
233
234         nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
235         nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
236         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
237                         MAX_CMP_QUEUES_PER_QS;
238         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
239                         MAX_RCV_QUEUES_PER_QS;
240         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
241                         MAX_SND_QUEUES_PER_QS;
242         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
243                         MAX_RCV_BUF_DESC_RINGS_PER_QS;
244
245         return nr_regs;
246 }
247
248 static int
249 nicvf_qset_config_internal(struct nicvf *nic, bool enable)
250 {
251         int ret;
252         struct pf_qs_cfg pf_qs_cfg = {.value = 0};
253
254         pf_qs_cfg.ena = enable ? 1 : 0;
255         pf_qs_cfg.vnic = nic->vf_id;
256         ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
257         return ret ? NICVF_ERR_SET_QS : 0;
258 }
259
260 /* Requests PF to assign and enable Qset */
261 int
262 nicvf_qset_config(struct nicvf *nic)
263 {
264         /* Enable Qset */
265         return nicvf_qset_config_internal(nic, true);
266 }
267
268 int
269 nicvf_qset_reclaim(struct nicvf *nic)
270 {
271         /* Disable Qset */
272         return nicvf_qset_config_internal(nic, false);
273 }
274
275 static int
276 cmpfunc(const void *a, const void *b)
277 {
278         return (*(const uint32_t *)a - *(const uint32_t *)b);
279 }
280
281 static uint32_t
282 nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
283 {
284         uint32_t i;
285
286         qsort(list, entries, sizeof(uint32_t), cmpfunc);
287         for (i = 0; i < entries; i++)
288                 if (val <= list[i])
289                         break;
290         /* Not in the list */
291         if (i >= entries)
292                 return 0;
293         else
294                 return list[i];
295 }
296
297 static void
298 nicvf_handle_qset_err_intr(struct nicvf *nic)
299 {
300         uint16_t qidx;
301         uint64_t status;
302
303         nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
304         nicvf_reg_dump(nic, NULL);
305
306         for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
307                 status = nicvf_queue_reg_read(
308                                 nic, NIC_QSET_CQ_0_7_STATUS, qidx);
309                 if (!(status & NICVF_CQ_ERR_MASK))
310                         continue;
311
312                 if (status & NICVF_CQ_WR_FULL)
313                         nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
314                 if (status & NICVF_CQ_WR_DISABLE)
315                         nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
316                 if (status & NICVF_CQ_WR_FAULT)
317                         nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
318                 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
319         }
320
321         for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
322                 status = nicvf_queue_reg_read(
323                                 nic, NIC_QSET_SQ_0_7_STATUS, qidx);
324                 if (!(status & NICVF_SQ_ERR_MASK))
325                         continue;
326
327                 if (status & NICVF_SQ_ERR_STOPPED)
328                         nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
329                 if (status & NICVF_SQ_ERR_SEND)
330                         nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
331                 if (status & NICVF_SQ_ERR_DPE)
332                         nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
333                 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
334         }
335
336         for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
337                 status = nicvf_queue_reg_read(nic,
338                                 NIC_QSET_RBDR_0_1_STATUS0, qidx);
339                 status &= NICVF_RBDR_FIFO_STATE_MASK;
340                 status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
341
342                 if (status == RBDR_FIFO_STATE_FAIL)
343                         nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
344                 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
345         }
346
347         nicvf_disable_all_interrupts(nic);
348         abort();
349 }
350
351 /*
352  * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
353  * This function is not re-entrant.
354  * The caller should provide proper serialization.
355  */
356 int
357 nicvf_reg_poll_interrupts(struct nicvf *nic)
358 {
359         int msg = 0;
360         uint64_t intr;
361
362         intr = nicvf_reg_read(nic, NIC_VF_INT);
363         if (intr & NICVF_INTR_MBOX_MASK) {
364                 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
365                 msg = nicvf_handle_mbx_intr(nic);
366         }
367         if (intr & NICVF_INTR_QS_ERR_MASK) {
368                 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
369                 nicvf_handle_qset_err_intr(nic);
370         }
371         return msg;
372 }
373
374 static int
375 nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
376                     uint32_t bit_pos, uint32_t bits, uint64_t val)
377 {
378         uint64_t bit_mask;
379         uint64_t reg_val;
380         int timeout = NICVF_REG_POLL_ITER_NR;
381
382         bit_mask = (1ULL << bits) - 1;
383         bit_mask = (bit_mask << bit_pos);
384
385         while (timeout) {
386                 reg_val = nicvf_queue_reg_read(nic, offset, qidx);
387                 if (((reg_val & bit_mask) >> bit_pos) == val)
388                         return NICVF_OK;
389                 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
390                 timeout--;
391         }
392         return NICVF_ERR_REG_POLL;
393 }
394
395 int
396 nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
397 {
398         uint64_t status;
399         int timeout = NICVF_REG_POLL_ITER_NR;
400         struct nicvf_rbdr *rbdr = nic->rbdr;
401
402         /* Save head and tail pointers for freeing up buffers */
403         if (rbdr) {
404                 rbdr->head = nicvf_queue_reg_read(nic,
405                                 NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
406                 rbdr->tail = nicvf_queue_reg_read(nic,
407                                 NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
408                 rbdr->next_tail = rbdr->tail;
409         }
410
411         /* Reset RBDR */
412         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
413                                 NICVF_RBDR_RESET);
414
415         /* Disable RBDR */
416         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
417         if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
418                                 62, 2, 0x00))
419                 return NICVF_ERR_RBDR_DISABLE;
420
421         while (1) {
422                 status = nicvf_queue_reg_read(nic,
423                                 NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
424                 if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
425                         break;
426                 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
427                 timeout--;
428                 if (!timeout)
429                         return NICVF_ERR_RBDR_PREFETCH;
430         }
431
432         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
433                         NICVF_RBDR_RESET);
434         if (nicvf_qset_poll_reg(nic, qidx,
435                         NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
436                 return NICVF_ERR_RBDR_RESET1;
437
438         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
439         if (nicvf_qset_poll_reg(nic, qidx,
440                         NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
441                 return NICVF_ERR_RBDR_RESET2;
442
443         return NICVF_OK;
444 }
445
446 static int
447 nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
448 {
449         int val;
450
451         val = nicvf_log2_u32(len) - len_shift;
452
453         assert(val >= NICVF_QSIZE_MIN_VAL);
454         assert(val <= NICVF_QSIZE_MAX_VAL);
455         return val;
456 }
457
458 int
459 nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
460 {
461         int ret;
462         uint64_t head, tail;
463         struct nicvf_rbdr *rbdr = nic->rbdr;
464         struct rbdr_cfg rbdr_cfg = {.value = 0};
465
466         ret = nicvf_qset_rbdr_reclaim(nic, qidx);
467         if (ret)
468                 return ret;
469
470         /* Set descriptor base address */
471         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
472
473         /* Enable RBDR  & set queue size */
474         rbdr_cfg.ena = 1;
475         rbdr_cfg.reset = 0;
476         rbdr_cfg.ldwb = 0;
477         rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
478                                                 RBDR_SIZE_SHIFT);
479         rbdr_cfg.avg_con = 0;
480         rbdr_cfg.lines = rbdr->buffsz / 128;
481
482         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
483
484         /* Verify proper RBDR reset */
485         head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
486         tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
487
488         if (head | tail)
489                 return NICVF_ERR_RBDR_RESET;
490
491         return NICVF_OK;
492 }
493
494 uint32_t
495 nicvf_qsize_rbdr_roundup(uint32_t val)
496 {
497         uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
498                         RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
499                         RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
500                         RBDR_QUEUE_SZ_512K};
501         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
502 }
503
504 int
505 nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
506                           uint16_t ridx, rbdr_pool_get_handler handler,
507                           uint32_t max_buffs)
508 {
509         struct rbdr_entry_t *desc, *desc0;
510         struct nicvf_rbdr *rbdr = nic->rbdr;
511         uint32_t count;
512         nicvf_iova_addr_t phy;
513
514         assert(rbdr != NULL);
515         desc = rbdr->desc;
516         count = 0;
517         /* Don't fill beyond max numbers of desc */
518         while (count < rbdr->qlen_mask) {
519                 if (count >= max_buffs)
520                         break;
521                 desc0 = desc + count;
522                 phy = handler(dev, nic);
523                 if (phy) {
524                         desc0->full_addr = phy;
525                         count++;
526                 } else {
527                         break;
528                 }
529         }
530         nicvf_smp_wmb();
531         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
532         rbdr->tail = nicvf_queue_reg_read(nic,
533                                 NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
534         rbdr->next_tail = rbdr->tail;
535         nicvf_smp_rmb();
536         return 0;
537 }
538
539 int
540 nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
541 {
542         return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
543 }
544
545 int
546 nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
547 {
548         uint64_t head, tail;
549         struct sq_cfg sq_cfg;
550
551         sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
552
553         /* Disable send queue */
554         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
555
556         /* Check if SQ is stopped */
557         if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
558                                 NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
559                 return NICVF_ERR_SQ_DISABLE;
560
561         /* Reset send queue */
562         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
563         head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
564         tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
565         if (head | tail)
566                 return  NICVF_ERR_SQ_RESET;
567
568         return 0;
569 }
570
571 int
572 nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
573 {
574         int ret;
575         struct sq_cfg sq_cfg = {.value = 0};
576
577         ret = nicvf_qset_sq_reclaim(nic, qidx);
578         if (ret)
579                 return ret;
580
581         /* Send a mailbox msg to PF to config SQ */
582         if (nicvf_mbox_sq_config(nic, qidx))
583                 return  NICVF_ERR_SQ_PF_CFG;
584
585         /* Set queue base address */
586         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
587
588         /* Enable send queue  & set queue size */
589         sq_cfg.cq_limit = 0;
590         sq_cfg.ena = 1;
591         sq_cfg.reset = 0;
592         sq_cfg.ldwb = 0;
593         sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
594         sq_cfg.tstmp_bgx_intf = 0;
595         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
596
597         /* Ring doorbell so that H/W restarts processing SQEs */
598         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
599
600         return 0;
601 }
602
603 uint32_t
604 nicvf_qsize_sq_roundup(uint32_t val)
605 {
606         uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
607                         SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
608                         SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
609                         SND_QUEUE_SZ_64K};
610         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
611 }
612
613 int
614 nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
615 {
616         /* Disable receive queue */
617         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
618         return nicvf_mbox_rq_sync(nic);
619 }
620
621 int
622 nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
623 {
624         struct pf_rq_cfg pf_rq_cfg = {.value = 0};
625         struct rq_cfg rq_cfg = {.value = 0};
626
627         if (nicvf_qset_rq_reclaim(nic, qidx))
628                 return NICVF_ERR_RQ_CLAIM;
629
630         pf_rq_cfg.strip_pre_l2 = 0;
631         /* First cache line of RBDR data will be allocated into L2C */
632         pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
633         pf_rq_cfg.cq_qs = nic->vf_id;
634         pf_rq_cfg.cq_idx = qidx;
635         pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
636         pf_rq_cfg.rbdr_cont_idx = 0;
637         pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
638         pf_rq_cfg.rbdr_strt_idx = 0;
639
640         /* Send a mailbox msg to PF to config RQ */
641         if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
642                 return NICVF_ERR_RQ_PF_CFG;
643
644         /* Select Rx backpressure */
645         if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
646                 return NICVF_ERR_RQ_BP_CFG;
647
648         /* Send a mailbox msg to PF to config RQ drop */
649         if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
650                 return NICVF_ERR_RQ_DROP_CFG;
651
652         /* Enable Receive queue */
653         rq_cfg.ena = 1;
654         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
655
656         return 0;
657 }
658
659 int
660 nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
661 {
662         uint64_t tail, head;
663
664         /* Disable completion queue */
665         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
666         if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
667                 return NICVF_ERR_CQ_DISABLE;
668
669         /* Reset completion queue */
670         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
671         tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
672         head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
673         if (head | tail)
674                 return  NICVF_ERR_CQ_RESET;
675
676         /* Disable timer threshold (doesn't get reset upon CQ reset) */
677         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
678         return 0;
679 }
680
681 int
682 nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
683 {
684         int ret;
685         struct cq_cfg cq_cfg = {.value = 0};
686
687         ret = nicvf_qset_cq_reclaim(nic, qidx);
688         if (ret)
689                 return ret;
690
691         /* Set completion queue base address */
692         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
693
694         cq_cfg.ena = 1;
695         cq_cfg.reset = 0;
696         /* Writes of CQE will be allocated into L2C */
697         cq_cfg.caching = 1;
698         cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
699         cq_cfg.avg_con = 0;
700         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
701
702         /* Set threshold value for interrupt generation */
703         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
704         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
705         return 0;
706 }
707
708 uint32_t
709 nicvf_qsize_cq_roundup(uint32_t val)
710 {
711         uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
712                         CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
713                         CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
714                         CMP_QUEUE_SZ_64K};
715         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
716 }
717
718
719 void
720 nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
721 {
722         uint64_t val;
723
724         val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
725         if (enable)
726                 val |= (STRIP_FIRST_VLAN << 25);
727         else
728                 val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
729
730         nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
731 }
732
733 void
734 nicvf_apad_config(struct nicvf *nic, bool enable)
735 {
736         uint64_t val;
737
738         /* APAD always enabled in this device */
739         if (!(nic->hwcap & NICVF_CAP_DISABLE_APAD))
740                 return;
741
742         val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
743         if (enable)
744                 val &= ~(1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
745         else
746                 val |= (1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
747
748         nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
749 }
750
751 void
752 nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
753 {
754         int idx;
755         uint64_t addr, val;
756         uint64_t *keyptr = (uint64_t *)key;
757
758         addr = NIC_VNIC_RSS_KEY_0_4;
759         for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
760                 val = nicvf_cpu_to_be_64(*keyptr);
761                 nicvf_reg_write(nic, addr, val);
762                 addr += sizeof(uint64_t);
763                 keyptr++;
764         }
765 }
766
767 void
768 nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
769 {
770         int idx;
771         uint64_t addr, val;
772         uint64_t *keyptr = (uint64_t *)key;
773
774         addr = NIC_VNIC_RSS_KEY_0_4;
775         for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
776                 val = nicvf_reg_read(nic, addr);
777                 *keyptr = nicvf_be_to_cpu_64(val);
778                 addr += sizeof(uint64_t);
779                 keyptr++;
780         }
781 }
782
783 void
784 nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
785 {
786         nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
787 }
788
789 uint64_t
790 nicvf_rss_get_cfg(struct nicvf *nic)
791 {
792         return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
793 }
794
795 int
796 nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
797 {
798         uint32_t idx;
799         struct nicvf_rss_reta_info *rss = &nic->rss_info;
800
801         /* result will be stored in nic->rss_info.rss_size */
802         if (nicvf_mbox_get_rss_size(nic))
803                 return NICVF_ERR_RSS_GET_SZ;
804
805         assert(rss->rss_size > 0);
806         rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
807         for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
808                 rss->ind_tbl[idx] = tbl[idx];
809
810         if (nicvf_mbox_config_rss(nic))
811                 return NICVF_ERR_RSS_TBL_UPDATE;
812
813         return NICVF_OK;
814 }
815
816 int
817 nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
818 {
819         uint32_t idx;
820         struct nicvf_rss_reta_info *rss = &nic->rss_info;
821
822         /* result will be stored in nic->rss_info.rss_size */
823         if (nicvf_mbox_get_rss_size(nic))
824                 return NICVF_ERR_RSS_GET_SZ;
825
826         assert(rss->rss_size > 0);
827         rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
828
829         for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
830                 tbl[idx] = rss->ind_tbl[idx];
831
832         return NICVF_OK;
833 }
834
835 int
836 nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg)
837 {
838         uint32_t idx;
839         uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
840         uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
841                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
842                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
843                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
844                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
845                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
846         };
847
848         if (nic->cpi_alg != CPI_ALG_NONE)
849                 return -EINVAL;
850
851         if (cfg == 0)
852                 return -EINVAL;
853
854         /* Update default RSS key and cfg */
855         nicvf_rss_set_key(nic, default_key);
856         nicvf_rss_set_cfg(nic, cfg);
857
858         /* Update default RSS RETA */
859         for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
860                 default_reta[idx] = idx % qcnt;
861
862         return nicvf_rss_reta_update(nic, default_reta,
863                         NIC_MAX_RSS_IDR_TBL_SIZE);
864 }
865
866 int
867 nicvf_rss_term(struct nicvf *nic)
868 {
869         uint32_t idx;
870         uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
871
872         nicvf_rss_set_cfg(nic, 0);
873         /* Redirect the output to 0th queue  */
874         for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
875                 disable_rss[idx] = 0;
876
877         return nicvf_rss_reta_update(nic, disable_rss,
878                         NIC_MAX_RSS_IDR_TBL_SIZE);
879 }
880
881 int
882 nicvf_loopback_config(struct nicvf *nic, bool enable)
883 {
884         if (enable && nic->loopback_supported == 0)
885                 return NICVF_ERR_LOOPBACK_CFG;
886
887         return nicvf_mbox_loopback_config(nic, enable);
888 }
889
890 void
891 nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
892 {
893         stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
894         stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
895         stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
896         stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
897         stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
898         stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
899         stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
900         stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
901         stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
902         stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
903         stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
904         stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
905         stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
906         stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
907
908         stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
909         stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
910         stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
911         stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
912         stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
913 }
914
915 void
916 nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
917                        uint16_t qidx)
918 {
919         qstats->q_rx_bytes =
920                 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
921         qstats->q_rx_packets =
922                 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
923 }
924
925 void
926 nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
927                        uint16_t qidx)
928 {
929         qstats->q_tx_bytes =
930                 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
931         qstats->q_tx_packets =
932                 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);
933 }