Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / net / thunderx / base / nicvf_hw.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium networks Ltd. 2016.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium networks nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <unistd.h>
34 #include <math.h>
35 #include <errno.h>
36 #include <stdarg.h>
37 #include <stdint.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <assert.h>
42
43 #include "nicvf_plat.h"
44
45 struct nicvf_reg_info {
46         uint32_t offset;
47         const char *name;
48 };
49
50 #define NICVF_REG_POLL_ITER_NR   (10)
51 #define NICVF_REG_POLL_DELAY_US  (2000)
52 #define NICVF_REG_INFO(reg) {reg, #reg}
53
54 static const struct nicvf_reg_info nicvf_reg_tbl[] = {
55         NICVF_REG_INFO(NIC_VF_CFG),
56         NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
57         NICVF_REG_INFO(NIC_VF_INT),
58         NICVF_REG_INFO(NIC_VF_INT_W1S),
59         NICVF_REG_INFO(NIC_VF_ENA_W1C),
60         NICVF_REG_INFO(NIC_VF_ENA_W1S),
61         NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
62         NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
63 };
64
65 static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
66         {NIC_VNIC_RSS_KEY_0_4 + 0,  "NIC_VNIC_RSS_KEY_0"},
67         {NIC_VNIC_RSS_KEY_0_4 + 8,  "NIC_VNIC_RSS_KEY_1"},
68         {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
69         {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
70         {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
71         {NIC_VNIC_TX_STAT_0_4 + 0,  "NIC_VNIC_STAT_TX_OCTS"},
72         {NIC_VNIC_TX_STAT_0_4 + 8,  "NIC_VNIC_STAT_TX_UCAST"},
73         {NIC_VNIC_TX_STAT_0_4 + 16,  "NIC_VNIC_STAT_TX_BCAST"},
74         {NIC_VNIC_TX_STAT_0_4 + 24,  "NIC_VNIC_STAT_TX_MCAST"},
75         {NIC_VNIC_TX_STAT_0_4 + 32,  "NIC_VNIC_STAT_TX_DROP"},
76         {NIC_VNIC_RX_STAT_0_13 + 0,  "NIC_VNIC_STAT_RX_OCTS"},
77         {NIC_VNIC_RX_STAT_0_13 + 8,  "NIC_VNIC_STAT_RX_UCAST"},
78         {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
79         {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
80         {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
81         {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
82         {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
83         {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
84         {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
85         {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
86         {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
87         {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
88         {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
89         {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
90 };
91
92 static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
93         NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
94         NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
95         NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
96         NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
97         NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
98         NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
99         NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
100         NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
101         NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
102         NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
103 };
104
105 static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
106         NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
107         NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
108         NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
109 };
110
111 static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
112         NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
113         NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
114         NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
115         NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
116         NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
117         NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
118         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
119         NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
120         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
121         NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
122 };
123
124 static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
125         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
126         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
127         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
128         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
129         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
130         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
131         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
132         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
133         NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
134 };
135
136 int
137 nicvf_base_init(struct nicvf *nic)
138 {
139         nic->hwcap = 0;
140         if (nic->subsystem_device_id == 0)
141                 return NICVF_ERR_BASE_INIT;
142
143         if (nicvf_hw_version(nic) == NICVF_PASS2)
144                 nic->hwcap |= NICVF_CAP_TUNNEL_PARSING;
145
146         return NICVF_OK;
147 }
148
149 /* dump on stdout if data is NULL */
150 int
151 nicvf_reg_dump(struct nicvf *nic,  uint64_t *data)
152 {
153         uint32_t i, q;
154         bool dump_stdout;
155
156         dump_stdout = data ? 0 : 1;
157
158         for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
159                 if (dump_stdout)
160                         nicvf_log("%24s  = 0x%" PRIx64 "\n",
161                                 nicvf_reg_tbl[i].name,
162                                 nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
163                 else
164                         *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
165
166         for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
167                 if (dump_stdout)
168                         nicvf_log("%24s  = 0x%" PRIx64 "\n",
169                                 nicvf_multi_reg_tbl[i].name,
170                                 nicvf_reg_read(nic,
171                                         nicvf_multi_reg_tbl[i].offset));
172                 else
173                         *data++ = nicvf_reg_read(nic,
174                                         nicvf_multi_reg_tbl[i].offset);
175
176         for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
177                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
178                         if (dump_stdout)
179                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
180                                         nicvf_qset_cq_reg_tbl[i].name, q,
181                                         nicvf_queue_reg_read(nic,
182                                         nicvf_qset_cq_reg_tbl[i].offset, q));
183                         else
184                                 *data++ = nicvf_queue_reg_read(nic,
185                                         nicvf_qset_cq_reg_tbl[i].offset, q);
186
187         for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
188                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
189                         if (dump_stdout)
190                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
191                                         nicvf_qset_rq_reg_tbl[i].name, q,
192                                         nicvf_queue_reg_read(nic,
193                                         nicvf_qset_rq_reg_tbl[i].offset, q));
194                         else
195                                 *data++ = nicvf_queue_reg_read(nic,
196                                         nicvf_qset_rq_reg_tbl[i].offset, q);
197
198         for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
199                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
200                         if (dump_stdout)
201                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
202                                         nicvf_qset_sq_reg_tbl[i].name, q,
203                                         nicvf_queue_reg_read(nic,
204                                         nicvf_qset_sq_reg_tbl[i].offset, q));
205                         else
206                                 *data++ = nicvf_queue_reg_read(nic,
207                                         nicvf_qset_sq_reg_tbl[i].offset, q);
208
209         for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
210                 for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
211                         if (dump_stdout)
212                                 nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
213                                         nicvf_qset_rbdr_reg_tbl[i].name, q,
214                                         nicvf_queue_reg_read(nic,
215                                         nicvf_qset_rbdr_reg_tbl[i].offset, q));
216                         else
217                                 *data++ = nicvf_queue_reg_read(nic,
218                                         nicvf_qset_rbdr_reg_tbl[i].offset, q);
219         return 0;
220 }
221
222 int
223 nicvf_reg_get_count(void)
224 {
225         int nr_regs;
226
227         nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
228         nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
229         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
230                         MAX_CMP_QUEUES_PER_QS;
231         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
232                         MAX_RCV_QUEUES_PER_QS;
233         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
234                         MAX_SND_QUEUES_PER_QS;
235         nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
236                         MAX_RCV_BUF_DESC_RINGS_PER_QS;
237
238         return nr_regs;
239 }
240
241 static int
242 nicvf_qset_config_internal(struct nicvf *nic, bool enable)
243 {
244         int ret;
245         struct pf_qs_cfg pf_qs_cfg = {.value = 0};
246
247         pf_qs_cfg.ena = enable ? 1 : 0;
248         pf_qs_cfg.vnic = nic->vf_id;
249         ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
250         return ret ? NICVF_ERR_SET_QS : 0;
251 }
252
253 /* Requests PF to assign and enable Qset */
254 int
255 nicvf_qset_config(struct nicvf *nic)
256 {
257         /* Enable Qset */
258         return nicvf_qset_config_internal(nic, true);
259 }
260
261 int
262 nicvf_qset_reclaim(struct nicvf *nic)
263 {
264         /* Disable Qset */
265         return nicvf_qset_config_internal(nic, false);
266 }
267
268 static int
269 cmpfunc(const void *a, const void *b)
270 {
271         return (*(const uint32_t *)a - *(const uint32_t *)b);
272 }
273
274 static uint32_t
275 nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
276 {
277         uint32_t i;
278
279         qsort(list, entries, sizeof(uint32_t), cmpfunc);
280         for (i = 0; i < entries; i++)
281                 if (val <= list[i])
282                         break;
283         /* Not in the list */
284         if (i >= entries)
285                 return 0;
286         else
287                 return list[i];
288 }
289
290 static void
291 nicvf_handle_qset_err_intr(struct nicvf *nic)
292 {
293         uint16_t qidx;
294         uint64_t status;
295
296         nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
297         nicvf_reg_dump(nic, NULL);
298
299         for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
300                 status = nicvf_queue_reg_read(
301                                 nic, NIC_QSET_CQ_0_7_STATUS, qidx);
302                 if (!(status & NICVF_CQ_ERR_MASK))
303                         continue;
304
305                 if (status & NICVF_CQ_WR_FULL)
306                         nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
307                 if (status & NICVF_CQ_WR_DISABLE)
308                         nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
309                 if (status & NICVF_CQ_WR_FAULT)
310                         nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
311                 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
312         }
313
314         for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
315                 status = nicvf_queue_reg_read(
316                                 nic, NIC_QSET_SQ_0_7_STATUS, qidx);
317                 if (!(status & NICVF_SQ_ERR_MASK))
318                         continue;
319
320                 if (status & NICVF_SQ_ERR_STOPPED)
321                         nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
322                 if (status & NICVF_SQ_ERR_SEND)
323                         nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
324                 if (status & NICVF_SQ_ERR_DPE)
325                         nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
326                 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
327         }
328
329         for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
330                 status = nicvf_queue_reg_read(nic,
331                                 NIC_QSET_RBDR_0_1_STATUS0, qidx);
332                 status &= NICVF_RBDR_FIFO_STATE_MASK;
333                 status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
334
335                 if (status == RBDR_FIFO_STATE_FAIL)
336                         nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
337                 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
338         }
339
340         nicvf_disable_all_interrupts(nic);
341         abort();
342 }
343
344 /*
345  * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
346  * This function is not re-entrant.
347  * The caller should provide proper serialization.
348  */
349 int
350 nicvf_reg_poll_interrupts(struct nicvf *nic)
351 {
352         int msg = 0;
353         uint64_t intr;
354
355         intr = nicvf_reg_read(nic, NIC_VF_INT);
356         if (intr & NICVF_INTR_MBOX_MASK) {
357                 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
358                 msg = nicvf_handle_mbx_intr(nic);
359         }
360         if (intr & NICVF_INTR_QS_ERR_MASK) {
361                 nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
362                 nicvf_handle_qset_err_intr(nic);
363         }
364         return msg;
365 }
366
367 static int
368 nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
369                     uint32_t bit_pos, uint32_t bits, uint64_t val)
370 {
371         uint64_t bit_mask;
372         uint64_t reg_val;
373         int timeout = NICVF_REG_POLL_ITER_NR;
374
375         bit_mask = (1ULL << bits) - 1;
376         bit_mask = (bit_mask << bit_pos);
377
378         while (timeout) {
379                 reg_val = nicvf_queue_reg_read(nic, offset, qidx);
380                 if (((reg_val & bit_mask) >> bit_pos) == val)
381                         return NICVF_OK;
382                 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
383                 timeout--;
384         }
385         return NICVF_ERR_REG_POLL;
386 }
387
388 int
389 nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
390 {
391         uint64_t status;
392         int timeout = NICVF_REG_POLL_ITER_NR;
393         struct nicvf_rbdr *rbdr = nic->rbdr;
394
395         /* Save head and tail pointers for freeing up buffers */
396         if (rbdr) {
397                 rbdr->head = nicvf_queue_reg_read(nic,
398                                 NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
399                 rbdr->tail = nicvf_queue_reg_read(nic,
400                                 NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
401                 rbdr->next_tail = rbdr->tail;
402         }
403
404         /* Reset RBDR */
405         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
406                                 NICVF_RBDR_RESET);
407
408         /* Disable RBDR */
409         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
410         if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
411                                 62, 2, 0x00))
412                 return NICVF_ERR_RBDR_DISABLE;
413
414         while (1) {
415                 status = nicvf_queue_reg_read(nic,
416                                 NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
417                 if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
418                         break;
419                 nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
420                 timeout--;
421                 if (!timeout)
422                         return NICVF_ERR_RBDR_PREFETCH;
423         }
424
425         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
426                         NICVF_RBDR_RESET);
427         if (nicvf_qset_poll_reg(nic, qidx,
428                         NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
429                 return NICVF_ERR_RBDR_RESET1;
430
431         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
432         if (nicvf_qset_poll_reg(nic, qidx,
433                         NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
434                 return NICVF_ERR_RBDR_RESET2;
435
436         return NICVF_OK;
437 }
438
439 static int
440 nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
441 {
442         int val;
443
444         val = ((uint32_t)log2(len) - len_shift);
445         assert(val >= NICVF_QSIZE_MIN_VAL);
446         assert(val <= NICVF_QSIZE_MAX_VAL);
447         return val;
448 }
449
450 int
451 nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
452 {
453         int ret;
454         uint64_t head, tail;
455         struct nicvf_rbdr *rbdr = nic->rbdr;
456         struct rbdr_cfg rbdr_cfg = {.value = 0};
457
458         ret = nicvf_qset_rbdr_reclaim(nic, qidx);
459         if (ret)
460                 return ret;
461
462         /* Set descriptor base address */
463         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
464
465         /* Enable RBDR  & set queue size */
466         rbdr_cfg.ena = 1;
467         rbdr_cfg.reset = 0;
468         rbdr_cfg.ldwb = 0;
469         rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
470                                                 RBDR_SIZE_SHIFT);
471         rbdr_cfg.avg_con = 0;
472         rbdr_cfg.lines = rbdr->buffsz / 128;
473
474         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
475
476         /* Verify proper RBDR reset */
477         head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
478         tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
479
480         if (head | tail)
481                 return NICVF_ERR_RBDR_RESET;
482
483         return NICVF_OK;
484 }
485
486 uint32_t
487 nicvf_qsize_rbdr_roundup(uint32_t val)
488 {
489         uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
490                         RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
491                         RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
492                         RBDR_QUEUE_SZ_512K};
493         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
494 }
495
496 int
497 nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
498                           rbdr_pool_get_handler handler,
499                           void *opaque, uint32_t max_buffs)
500 {
501         struct rbdr_entry_t *desc, *desc0;
502         struct nicvf_rbdr *rbdr = nic->rbdr;
503         uint32_t count;
504         nicvf_phys_addr_t phy;
505
506         assert(rbdr != NULL);
507         desc = rbdr->desc;
508         count = 0;
509         /* Don't fill beyond max numbers of desc */
510         while (count < rbdr->qlen_mask) {
511                 if (count >= max_buffs)
512                         break;
513                 desc0 = desc + count;
514                 phy = handler(opaque);
515                 if (phy) {
516                         desc0->full_addr = phy;
517                         count++;
518                 } else {
519                         break;
520                 }
521         }
522         nicvf_smp_wmb();
523         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
524         rbdr->tail = nicvf_queue_reg_read(nic,
525                                 NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
526         rbdr->next_tail = rbdr->tail;
527         nicvf_smp_rmb();
528         return 0;
529 }
530
531 int
532 nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
533 {
534         return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
535 }
536
537 int
538 nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
539 {
540         uint64_t head, tail;
541         struct sq_cfg sq_cfg;
542
543         sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
544
545         /* Disable send queue */
546         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
547
548         /* Check if SQ is stopped */
549         if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
550                                 NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
551                 return NICVF_ERR_SQ_DISABLE;
552
553         /* Reset send queue */
554         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
555         head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
556         tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
557         if (head | tail)
558                 return  NICVF_ERR_SQ_RESET;
559
560         return 0;
561 }
562
563 int
564 nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
565 {
566         int ret;
567         struct sq_cfg sq_cfg = {.value = 0};
568
569         ret = nicvf_qset_sq_reclaim(nic, qidx);
570         if (ret)
571                 return ret;
572
573         /* Send a mailbox msg to PF to config SQ */
574         if (nicvf_mbox_sq_config(nic, qidx))
575                 return  NICVF_ERR_SQ_PF_CFG;
576
577         /* Set queue base address */
578         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
579
580         /* Enable send queue  & set queue size */
581         sq_cfg.ena = 1;
582         sq_cfg.reset = 0;
583         sq_cfg.ldwb = 0;
584         sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
585         sq_cfg.tstmp_bgx_intf = 0;
586         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
587
588         /* Ring doorbell so that H/W restarts processing SQEs */
589         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
590
591         return 0;
592 }
593
594 uint32_t
595 nicvf_qsize_sq_roundup(uint32_t val)
596 {
597         uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
598                         SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
599                         SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
600                         SND_QUEUE_SZ_64K};
601         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
602 }
603
604 int
605 nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
606 {
607         /* Disable receive queue */
608         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
609         return nicvf_mbox_rq_sync(nic);
610 }
611
612 int
613 nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
614 {
615         struct pf_rq_cfg pf_rq_cfg = {.value = 0};
616         struct rq_cfg rq_cfg = {.value = 0};
617
618         if (nicvf_qset_rq_reclaim(nic, qidx))
619                 return NICVF_ERR_RQ_CLAIM;
620
621         pf_rq_cfg.strip_pre_l2 = 0;
622         /* First cache line of RBDR data will be allocated into L2C */
623         pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
624         pf_rq_cfg.cq_qs = nic->vf_id;
625         pf_rq_cfg.cq_idx = qidx;
626         pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
627         pf_rq_cfg.rbdr_cont_idx = 0;
628         pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
629         pf_rq_cfg.rbdr_strt_idx = 0;
630
631         /* Send a mailbox msg to PF to config RQ */
632         if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
633                 return NICVF_ERR_RQ_PF_CFG;
634
635         /* Select Rx backpressure */
636         if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
637                 return NICVF_ERR_RQ_BP_CFG;
638
639         /* Send a mailbox msg to PF to config RQ drop */
640         if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
641                 return NICVF_ERR_RQ_DROP_CFG;
642
643         /* Enable Receive queue */
644         rq_cfg.ena = 1;
645         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
646
647         return 0;
648 }
649
650 int
651 nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
652 {
653         uint64_t tail, head;
654
655         /* Disable completion queue */
656         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
657         if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
658                 return NICVF_ERR_CQ_DISABLE;
659
660         /* Reset completion queue */
661         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
662         tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
663         head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
664         if (head | tail)
665                 return  NICVF_ERR_CQ_RESET;
666
667         /* Disable timer threshold (doesn't get reset upon CQ reset) */
668         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
669         return 0;
670 }
671
672 int
673 nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
674 {
675         int ret;
676         struct cq_cfg cq_cfg = {.value = 0};
677
678         ret = nicvf_qset_cq_reclaim(nic, qidx);
679         if (ret)
680                 return ret;
681
682         /* Set completion queue base address */
683         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
684
685         cq_cfg.ena = 1;
686         cq_cfg.reset = 0;
687         /* Writes of CQE will be allocated into L2C */
688         cq_cfg.caching = 1;
689         cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
690         cq_cfg.avg_con = 0;
691         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
692
693         /* Set threshold value for interrupt generation */
694         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
695         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
696         return 0;
697 }
698
699 uint32_t
700 nicvf_qsize_cq_roundup(uint32_t val)
701 {
702         uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
703                         CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
704                         CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
705                         CMP_QUEUE_SZ_64K};
706         return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
707 }
708
709
710 void
711 nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
712 {
713         uint64_t val;
714
715         val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
716         if (enable)
717                 val |= (STRIP_FIRST_VLAN << 25);
718         else
719                 val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
720
721         nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
722 }
723
724 void
725 nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
726 {
727         int idx;
728         uint64_t addr, val;
729         uint64_t *keyptr = (uint64_t *)key;
730
731         addr = NIC_VNIC_RSS_KEY_0_4;
732         for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
733                 val = nicvf_cpu_to_be_64(*keyptr);
734                 nicvf_reg_write(nic, addr, val);
735                 addr += sizeof(uint64_t);
736                 keyptr++;
737         }
738 }
739
740 void
741 nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
742 {
743         int idx;
744         uint64_t addr, val;
745         uint64_t *keyptr = (uint64_t *)key;
746
747         addr = NIC_VNIC_RSS_KEY_0_4;
748         for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
749                 val = nicvf_reg_read(nic, addr);
750                 *keyptr = nicvf_be_to_cpu_64(val);
751                 addr += sizeof(uint64_t);
752                 keyptr++;
753         }
754 }
755
756 void
757 nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
758 {
759         nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
760 }
761
762 uint64_t
763 nicvf_rss_get_cfg(struct nicvf *nic)
764 {
765         return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
766 }
767
768 int
769 nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
770 {
771         uint32_t idx;
772         struct nicvf_rss_reta_info *rss = &nic->rss_info;
773
774         /* result will be stored in nic->rss_info.rss_size */
775         if (nicvf_mbox_get_rss_size(nic))
776                 return NICVF_ERR_RSS_GET_SZ;
777
778         assert(rss->rss_size > 0);
779         rss->hash_bits = (uint8_t)log2(rss->rss_size);
780         for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
781                 rss->ind_tbl[idx] = tbl[idx];
782
783         if (nicvf_mbox_config_rss(nic))
784                 return NICVF_ERR_RSS_TBL_UPDATE;
785
786         return NICVF_OK;
787 }
788
789 int
790 nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
791 {
792         uint32_t idx;
793         struct nicvf_rss_reta_info *rss = &nic->rss_info;
794
795         /* result will be stored in nic->rss_info.rss_size */
796         if (nicvf_mbox_get_rss_size(nic))
797                 return NICVF_ERR_RSS_GET_SZ;
798
799         assert(rss->rss_size > 0);
800         rss->hash_bits = (uint8_t)log2(rss->rss_size);
801         for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
802                 tbl[idx] = rss->ind_tbl[idx];
803
804         return NICVF_OK;
805 }
806
807 int
808 nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg)
809 {
810         uint32_t idx;
811         uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
812         uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
813                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
814                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
815                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
816                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
817                 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
818         };
819
820         if (nic->cpi_alg != CPI_ALG_NONE)
821                 return -EINVAL;
822
823         if (cfg == 0)
824                 return -EINVAL;
825
826         /* Update default RSS key and cfg */
827         nicvf_rss_set_key(nic, default_key);
828         nicvf_rss_set_cfg(nic, cfg);
829
830         /* Update default RSS RETA */
831         for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
832                 default_reta[idx] = idx % qcnt;
833
834         return nicvf_rss_reta_update(nic, default_reta,
835                         NIC_MAX_RSS_IDR_TBL_SIZE);
836 }
837
838 int
839 nicvf_rss_term(struct nicvf *nic)
840 {
841         uint32_t idx;
842         uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
843
844         nicvf_rss_set_cfg(nic, 0);
845         /* Redirect the output to 0th queue  */
846         for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
847                 disable_rss[idx] = 0;
848
849         return nicvf_rss_reta_update(nic, disable_rss,
850                         NIC_MAX_RSS_IDR_TBL_SIZE);
851 }
852
853 int
854 nicvf_loopback_config(struct nicvf *nic, bool enable)
855 {
856         if (enable && nic->loopback_supported == 0)
857                 return NICVF_ERR_LOOPBACK_CFG;
858
859         return nicvf_mbox_loopback_config(nic, enable);
860 }
861
862 void
863 nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
864 {
865         stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
866         stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
867         stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
868         stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
869         stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
870         stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
871         stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
872         stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
873         stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
874         stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
875         stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
876         stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
877         stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
878         stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
879
880         stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
881         stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
882         stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
883         stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
884         stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
885 }
886
887 void
888 nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
889                        uint16_t qidx)
890 {
891         qstats->q_rx_bytes =
892                 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
893         qstats->q_rx_packets =
894                 nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
895 }
896
897 void
898 nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
899                        uint16_t qidx)
900 {
901         qstats->q_tx_bytes =
902                 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
903         qstats->q_tx_packets =
904                 nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);
905 }