e710e6367f7893354211a69037216bed29ed978e
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                2000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
175 {
176         int rc;
177
178         rte_spinlock_lock(&bp->hwrm_lock);
179         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
180         rte_spinlock_unlock(&bp->hwrm_lock);
181         return rc;
182 }
183
184 #define HWRM_PREP(req, type, cr, resp) \
185         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
186         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
187         req.cmpl_ring = rte_cpu_to_le_16(cr); \
188         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
189         req.target_id = rte_cpu_to_le_16(0xffff); \
190         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
191
192 #define HWRM_CHECK_RESULT \
193         { \
194                 if (rc) { \
195                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
196                                 __func__, rc); \
197                         return rc; \
198                 } \
199                 if (resp->error_code) { \
200                         rc = rte_le_to_cpu_16(resp->error_code); \
201                         if (resp->resp_len >= 16) { \
202                                 struct hwrm_err_output *tmp_hwrm_err_op = \
203                                                         (void *)resp; \
204                                 RTE_LOG(ERR, PMD, \
205                                         "%s error %d:%d:%08x:%04x\n", \
206                                         __func__, \
207                                         rc, tmp_hwrm_err_op->cmd_err, \
208                                         rte_le_to_cpu_32(\
209                                                 tmp_hwrm_err_op->opaque_0), \
210                                         rte_le_to_cpu_16(\
211                                                 tmp_hwrm_err_op->opaque_1)); \
212                         } \
213                         else { \
214                                 RTE_LOG(ERR, PMD, \
215                                         "%s error %d\n", __func__, rc); \
216                         } \
217                         return rc; \
218                 } \
219         }
220
221 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
222 {
223         int rc = 0;
224         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
225         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
226
227         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
228         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
229         req.mask = 0;
230
231         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
232
233         HWRM_CHECK_RESULT;
234
235         return rc;
236 }
237
238 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
239                                  struct bnxt_vnic_info *vnic,
240                                  uint16_t vlan_count,
241                                  struct bnxt_vlan_table_entry *vlan_table)
242 {
243         int rc = 0;
244         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
245         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
246         uint32_t mask = 0;
247
248         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
249         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
250
251         /* FIXME add multicast flag, when multicast adding options is supported
252          * by ethtool.
253          */
254         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
255                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
256         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
257                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
258         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
259                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
260         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
261                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
262         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
263                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
264         if (vnic->mc_addr_cnt) {
265                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
266                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
267                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
268         }
269         if (vlan_table) {
270                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
271                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
272                 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
273                          rte_mem_virt2phy(vlan_table));
274                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
275         }
276         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
277                                     mask);
278
279         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
280
281         HWRM_CHECK_RESULT;
282
283         return rc;
284 }
285
286 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
287                         uint16_t vlan_count,
288                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
289 {
290         int rc = 0;
291         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
292         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
293                                                 bp->hwrm_cmd_resp_addr;
294
295         /*
296          * Older HWRM versions did not support this command, and the set_rx_mask
297          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
298          * removed from set_rx_mask call, and this command was added.
299          *
300          * This command is also present from 1.7.8.11 and higher,
301          * as well as 1.7.8.0
302          */
303         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
304                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
305                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
306                                         (11)))
307                                 return 0;
308                 }
309         }
310         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp);
311         req.fid = rte_cpu_to_le_16(fid);
312
313         req.vlan_tag_mask_tbl_addr =
314                 rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
315         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
316
317         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
318
319         HWRM_CHECK_RESULT;
320
321         return rc;
322 }
323
324 int bnxt_hwrm_clear_filter(struct bnxt *bp,
325                            struct bnxt_filter_info *filter)
326 {
327         int rc = 0;
328         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
329         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
330
331         if (filter->fw_l2_filter_id == UINT64_MAX)
332                 return 0;
333
334         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
335
336         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
337
338         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
339
340         HWRM_CHECK_RESULT;
341
342         filter->fw_l2_filter_id = -1;
343
344         return 0;
345 }
346
347 int bnxt_hwrm_set_filter(struct bnxt *bp,
348                          uint16_t dst_id,
349                          struct bnxt_filter_info *filter)
350 {
351         int rc = 0;
352         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
353         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
354         uint32_t enables = 0;
355
356         if (filter->fw_l2_filter_id != UINT64_MAX)
357                 bnxt_hwrm_clear_filter(bp, filter);
358
359         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
360
361         req.flags = rte_cpu_to_le_32(filter->flags);
362
363         enables = filter->enables |
364               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
365         req.dst_id = rte_cpu_to_le_16(dst_id);
366
367         if (enables &
368             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
369                 memcpy(req.l2_addr, filter->l2_addr,
370                        ETHER_ADDR_LEN);
371         if (enables &
372             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
373                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
374                        ETHER_ADDR_LEN);
375         if (enables &
376             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
377                 req.l2_ovlan = filter->l2_ovlan;
378         if (enables &
379             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
380                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
381         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
382                 req.src_id = rte_cpu_to_le_32(filter->src_id);
383         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
384                 req.src_type = filter->src_type;
385
386         req.enables = rte_cpu_to_le_32(enables);
387
388         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
389
390         HWRM_CHECK_RESULT;
391
392         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
393
394         return rc;
395 }
396
397 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
398 {
399         int rc = 0;
400         struct hwrm_func_qcaps_input req = {.req_type = 0 };
401         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
402         uint16_t new_max_vfs;
403         int i;
404
405         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
406
407         req.fid = rte_cpu_to_le_16(0xffff);
408
409         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
410
411         HWRM_CHECK_RESULT;
412
413         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
414         if (BNXT_PF(bp)) {
415                 bp->pf.port_id = resp->port_id;
416                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
417                 new_max_vfs = bp->pdev->max_vfs;
418                 if (new_max_vfs != bp->pf.max_vfs) {
419                         if (bp->pf.vf_info)
420                                 rte_free(bp->pf.vf_info);
421                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
422                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
423                         bp->pf.max_vfs = new_max_vfs;
424                         for (i = 0; i < new_max_vfs; i++) {
425                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
426                                 bp->pf.vf_info[i].vlan_table =
427                                         rte_zmalloc("VF VLAN table",
428                                                     getpagesize(),
429                                                     getpagesize());
430                                 if (bp->pf.vf_info[i].vlan_table == NULL)
431                                         RTE_LOG(ERR, PMD,
432                                         "Fail to alloc VLAN table for VF %d\n",
433                                         i);
434                                 else
435                                         rte_mem_lock_page(
436                                                 bp->pf.vf_info[i].vlan_table);
437                                 bp->pf.vf_info[i].vlan_as_table =
438                                         rte_zmalloc("VF VLAN AS table",
439                                                     getpagesize(),
440                                                     getpagesize());
441                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
442                                         RTE_LOG(ERR, PMD,
443                                         "Alloc VLAN AS table for VF %d fail\n",
444                                         i);
445                                 else
446                                         rte_mem_lock_page(
447                                                bp->pf.vf_info[i].vlan_as_table);
448                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
449                         }
450                 }
451         }
452
453         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
454         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
455         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
456         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
457         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
458         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
459         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
460         /* TODO: For now, do not support VMDq/RFS on VFs. */
461         if (BNXT_PF(bp)) {
462                 if (bp->pf.max_vfs)
463                         bp->max_vnics = 1;
464                 else
465                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
466         } else {
467                 bp->max_vnics = 1;
468         }
469         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
470         if (BNXT_PF(bp))
471                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
472
473         return rc;
474 }
475
476 int bnxt_hwrm_func_reset(struct bnxt *bp)
477 {
478         int rc = 0;
479         struct hwrm_func_reset_input req = {.req_type = 0 };
480         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
481
482         HWRM_PREP(req, FUNC_RESET, -1, resp);
483
484         req.enables = rte_cpu_to_le_32(0);
485
486         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
487
488         HWRM_CHECK_RESULT;
489
490         return rc;
491 }
492
493 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
494 {
495         int rc;
496         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
497         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
498
499         if (bp->flags & BNXT_FLAG_REGISTERED)
500                 return 0;
501
502         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
503         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
504                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
505         req.ver_maj = RTE_VER_YEAR;
506         req.ver_min = RTE_VER_MONTH;
507         req.ver_upd = RTE_VER_MINOR;
508
509         if (BNXT_PF(bp)) {
510                 req.enables |= rte_cpu_to_le_32(
511                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
512                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
513                        RTE_MIN(sizeof(req.vf_req_fwd),
514                                sizeof(bp->pf.vf_req_fwd)));
515         }
516
517         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
518         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
519
520         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
521
522         HWRM_CHECK_RESULT;
523
524         bp->flags |= BNXT_FLAG_REGISTERED;
525
526         return rc;
527 }
528
529 int bnxt_hwrm_ver_get(struct bnxt *bp)
530 {
531         int rc = 0;
532         struct hwrm_ver_get_input req = {.req_type = 0 };
533         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
534         uint32_t my_version;
535         uint32_t fw_version;
536         uint16_t max_resp_len;
537         char type[RTE_MEMZONE_NAMESIZE];
538         uint32_t dev_caps_cfg;
539
540         bp->max_req_len = HWRM_MAX_REQ_LEN;
541         HWRM_PREP(req, VER_GET, -1, resp);
542
543         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
544         req.hwrm_intf_min = HWRM_VERSION_MINOR;
545         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
546
547         /*
548          * Hold the lock since we may be adjusting the response pointers.
549          */
550         rte_spinlock_lock(&bp->hwrm_lock);
551         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
552
553         HWRM_CHECK_RESULT;
554
555         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
556                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
557                 resp->hwrm_intf_upd,
558                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
559         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
560                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
561         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
562                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
563
564         my_version = HWRM_VERSION_MAJOR << 16;
565         my_version |= HWRM_VERSION_MINOR << 8;
566         my_version |= HWRM_VERSION_UPDATE;
567
568         fw_version = resp->hwrm_intf_maj << 16;
569         fw_version |= resp->hwrm_intf_min << 8;
570         fw_version |= resp->hwrm_intf_upd;
571
572         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
573                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
574                 rc = -EINVAL;
575                 goto error;
576         }
577
578         if (my_version != fw_version) {
579                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
580                 if (my_version < fw_version) {
581                         RTE_LOG(INFO, PMD,
582                                 "Firmware API version is newer than driver.\n");
583                         RTE_LOG(INFO, PMD,
584                                 "The driver may be missing features.\n");
585                 } else {
586                         RTE_LOG(INFO, PMD,
587                                 "Firmware API version is older than driver.\n");
588                         RTE_LOG(INFO, PMD,
589                                 "Not all driver features may be functional.\n");
590                 }
591         }
592
593         if (bp->max_req_len > resp->max_req_win_len) {
594                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
595                 rc = -EINVAL;
596         }
597         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
598         max_resp_len = resp->max_resp_len;
599         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
600
601         if (bp->max_resp_len != max_resp_len) {
602                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
603                         bp->pdev->addr.domain, bp->pdev->addr.bus,
604                         bp->pdev->addr.devid, bp->pdev->addr.function);
605
606                 rte_free(bp->hwrm_cmd_resp_addr);
607
608                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
609                 if (bp->hwrm_cmd_resp_addr == NULL) {
610                         rc = -ENOMEM;
611                         goto error;
612                 }
613                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
614                 bp->hwrm_cmd_resp_dma_addr =
615                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
616                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
617                         RTE_LOG(ERR, PMD,
618                         "Unable to map response buffer to physical memory.\n");
619                         rc = -ENOMEM;
620                         goto error;
621                 }
622                 bp->max_resp_len = max_resp_len;
623         }
624
625         if ((dev_caps_cfg &
626                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
627             (dev_caps_cfg &
628              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
629                 RTE_LOG(DEBUG, PMD, "Short command supported\n");
630
631                 rte_free(bp->hwrm_short_cmd_req_addr);
632
633                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
634                                                         bp->max_req_len, 0);
635                 if (bp->hwrm_short_cmd_req_addr == NULL) {
636                         rc = -ENOMEM;
637                         goto error;
638                 }
639                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
640                 bp->hwrm_short_cmd_req_dma_addr =
641                         rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
642                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
643                         rte_free(bp->hwrm_short_cmd_req_addr);
644                         RTE_LOG(ERR, PMD,
645                                 "Unable to map buffer to physical memory.\n");
646                         rc = -ENOMEM;
647                         goto error;
648                 }
649
650                 bp->flags |= BNXT_FLAG_SHORT_CMD;
651         }
652
653 error:
654         rte_spinlock_unlock(&bp->hwrm_lock);
655         return rc;
656 }
657
658 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
659 {
660         int rc;
661         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
662         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
663
664         if (!(bp->flags & BNXT_FLAG_REGISTERED))
665                 return 0;
666
667         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
668         req.flags = flags;
669
670         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
671
672         HWRM_CHECK_RESULT;
673
674         bp->flags &= ~BNXT_FLAG_REGISTERED;
675
676         return rc;
677 }
678
679 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
680 {
681         int rc = 0;
682         struct hwrm_port_phy_cfg_input req = {0};
683         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
684         uint32_t enables = 0;
685         uint32_t link_speed_mask =
686                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
687
688         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
689
690         if (conf->link_up) {
691                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
692                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
693                 /*
694                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
695                  * any auto mode, even "none".
696                  */
697                 if (!conf->link_speed) {
698                         req.auto_mode = conf->auto_mode;
699                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
700                         if (conf->auto_mode ==
701                             HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
702                                 req.auto_link_speed_mask =
703                                         conf->auto_link_speed_mask;
704                                 enables |= link_speed_mask;
705                         }
706                         if (bp->link_info.auto_link_speed) {
707                                 req.auto_link_speed =
708                                         bp->link_info.auto_link_speed;
709                                 enables |=
710                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
711                         }
712                 }
713                 req.auto_duplex = conf->duplex;
714                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
715                 req.auto_pause = conf->auto_pause;
716                 req.force_pause = conf->force_pause;
717                 /* Set force_pause if there is no auto or if there is a force */
718                 if (req.auto_pause && !req.force_pause)
719                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
720                 else
721                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
722
723                 req.enables = rte_cpu_to_le_32(enables);
724         } else {
725                 req.flags =
726                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
727                 RTE_LOG(INFO, PMD, "Force Link Down\n");
728         }
729
730         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
731
732         HWRM_CHECK_RESULT;
733
734         return rc;
735 }
736
737 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
738                                    struct bnxt_link_info *link_info)
739 {
740         int rc = 0;
741         struct hwrm_port_phy_qcfg_input req = {0};
742         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
743
744         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
745
746         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
747
748         HWRM_CHECK_RESULT;
749
750         link_info->phy_link_status = resp->link;
751         link_info->link_up =
752                 (link_info->phy_link_status ==
753                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
754         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
755         link_info->duplex = resp->duplex;
756         link_info->pause = resp->pause;
757         link_info->auto_pause = resp->auto_pause;
758         link_info->force_pause = resp->force_pause;
759         link_info->auto_mode = resp->auto_mode;
760
761         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
762         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
763         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
764         link_info->phy_ver[0] = resp->phy_maj;
765         link_info->phy_ver[1] = resp->phy_min;
766         link_info->phy_ver[2] = resp->phy_bld;
767
768         return rc;
769 }
770
771 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
772 {
773         int rc = 0;
774         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
775         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
776
777         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
778
779         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
780
781         HWRM_CHECK_RESULT;
782
783 #define GET_QUEUE_INFO(x) \
784         bp->cos_queue[x].id = resp->queue_id##x; \
785         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
786
787         GET_QUEUE_INFO(0);
788         GET_QUEUE_INFO(1);
789         GET_QUEUE_INFO(2);
790         GET_QUEUE_INFO(3);
791         GET_QUEUE_INFO(4);
792         GET_QUEUE_INFO(5);
793         GET_QUEUE_INFO(6);
794         GET_QUEUE_INFO(7);
795
796         return rc;
797 }
798
799 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
800                          struct bnxt_ring *ring,
801                          uint32_t ring_type, uint32_t map_index,
802                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
803 {
804         int rc = 0;
805         uint32_t enables = 0;
806         struct hwrm_ring_alloc_input req = {.req_type = 0 };
807         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
808
809         HWRM_PREP(req, RING_ALLOC, -1, resp);
810
811         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
812         req.fbo = rte_cpu_to_le_32(0);
813         /* Association of ring index with doorbell index */
814         req.logical_id = rte_cpu_to_le_16(map_index);
815         req.length = rte_cpu_to_le_32(ring->ring_size);
816
817         switch (ring_type) {
818         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
819                 req.queue_id = bp->cos_queue[0].id;
820                 /* FALLTHROUGH */
821         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
822                 req.ring_type = ring_type;
823                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
824                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
825                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
826                         enables |=
827                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
828                 break;
829         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
830                 req.ring_type = ring_type;
831                 /*
832                  * TODO: Some HWRM versions crash with
833                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
834                  */
835                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
836                 break;
837         default:
838                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
839                         ring_type);
840                 return -1;
841         }
842         req.enables = rte_cpu_to_le_32(enables);
843
844         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
845
846         if (rc || resp->error_code) {
847                 if (rc == 0 && resp->error_code)
848                         rc = rte_le_to_cpu_16(resp->error_code);
849                 switch (ring_type) {
850                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
851                         RTE_LOG(ERR, PMD,
852                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
853                         return rc;
854                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
855                         RTE_LOG(ERR, PMD,
856                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
857                         return rc;
858                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
859                         RTE_LOG(ERR, PMD,
860                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
861                         return rc;
862                 default:
863                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
864                         return rc;
865                 }
866         }
867
868         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
869         return rc;
870 }
871
872 int bnxt_hwrm_ring_free(struct bnxt *bp,
873                         struct bnxt_ring *ring, uint32_t ring_type)
874 {
875         int rc;
876         struct hwrm_ring_free_input req = {.req_type = 0 };
877         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
878
879         HWRM_PREP(req, RING_FREE, -1, resp);
880
881         req.ring_type = ring_type;
882         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
883
884         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
885
886         if (rc || resp->error_code) {
887                 if (rc == 0 && resp->error_code)
888                         rc = rte_le_to_cpu_16(resp->error_code);
889
890                 switch (ring_type) {
891                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
892                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
893                                 rc);
894                         return rc;
895                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
896                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
897                                 rc);
898                         return rc;
899                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
900                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
901                                 rc);
902                         return rc;
903                 default:
904                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
905                         return rc;
906                 }
907         }
908         return 0;
909 }
910
911 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
912 {
913         int rc = 0;
914         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
915         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
916
917         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
918
919         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
920         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
921         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
922         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
923
924         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
925
926         HWRM_CHECK_RESULT;
927
928         bp->grp_info[idx].fw_grp_id =
929             rte_le_to_cpu_16(resp->ring_group_id);
930
931         return rc;
932 }
933
934 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
935 {
936         int rc;
937         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
938         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
939
940         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
941
942         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
943
944         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
945
946         HWRM_CHECK_RESULT;
947
948         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
949         return rc;
950 }
951
952 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
953 {
954         int rc = 0;
955         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
956         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
957
958         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
959                 return rc;
960
961         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
962
963         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
964
965         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
966
967         HWRM_CHECK_RESULT;
968
969         return rc;
970 }
971
972 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
973                                 unsigned int idx __rte_unused)
974 {
975         int rc;
976         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
977         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
978
979         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
980
981         req.update_period_ms = rte_cpu_to_le_32(0);
982
983         req.stats_dma_addr =
984             rte_cpu_to_le_64(cpr->hw_stats_map);
985
986         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
987
988         HWRM_CHECK_RESULT;
989
990         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
991
992         return rc;
993 }
994
995 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
996                                 unsigned int idx __rte_unused)
997 {
998         int rc;
999         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1000         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1001
1002         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
1003
1004         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1005
1006         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1007
1008         HWRM_CHECK_RESULT;
1009
1010         return rc;
1011 }
1012
1013 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1014 {
1015         int rc = 0, i, j;
1016         struct hwrm_vnic_alloc_input req = { 0 };
1017         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1018
1019         /* map ring groups to this vnic */
1020         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1021                 vnic->start_grp_id, vnic->end_grp_id);
1022         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1023                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1024         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1025         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1026         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1027         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1028         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1029                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1030         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
1031
1032         if (vnic->func_default)
1033                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1034         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1035
1036         HWRM_CHECK_RESULT;
1037
1038         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1039         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1040         return rc;
1041 }
1042
1043 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1044                                         struct bnxt_vnic_info *vnic,
1045                                         struct bnxt_plcmodes_cfg *pmode)
1046 {
1047         int rc = 0;
1048         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1049         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1050
1051         HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
1052
1053         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1054
1055         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1056
1057         HWRM_CHECK_RESULT;
1058
1059         pmode->flags = rte_le_to_cpu_32(resp->flags);
1060         /* dflt_vnic bit doesn't exist in the _cfg command */
1061         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1062         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1063         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1064         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1065
1066         return rc;
1067 }
1068
1069 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1070                                        struct bnxt_vnic_info *vnic,
1071                                        struct bnxt_plcmodes_cfg *pmode)
1072 {
1073         int rc = 0;
1074         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1075         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1076
1077         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1078
1079         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1080         req.flags = rte_cpu_to_le_32(pmode->flags);
1081         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1082         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1083         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1084         req.enables = rte_cpu_to_le_32(
1085             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1086             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1087             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1088         );
1089
1090         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1091
1092         HWRM_CHECK_RESULT;
1093
1094         return rc;
1095 }
1096
1097 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1098 {
1099         int rc = 0;
1100         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1101         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1102         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1103         struct bnxt_plcmodes_cfg pmodes;
1104
1105         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1106                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1107                 return rc;
1108         }
1109
1110         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1111         if (rc)
1112                 return rc;
1113
1114         HWRM_PREP(req, VNIC_CFG, -1, resp);
1115
1116         /* Only RSS support for now TBD: COS & LB */
1117         req.enables =
1118             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1119                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1120         if (vnic->lb_rule != 0xffff)
1121                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1122         if (vnic->cos_rule != 0xffff)
1123                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1124         if (vnic->rss_rule != 0xffff)
1125                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1126         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1127         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1128         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1129         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1130         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1131         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1132         req.mru = rte_cpu_to_le_16(vnic->mru);
1133         if (vnic->func_default)
1134                 req.flags |=
1135                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1136         if (vnic->vlan_strip)
1137                 req.flags |=
1138                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1139         if (vnic->bd_stall)
1140                 req.flags |=
1141                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1142         if (vnic->roce_dual)
1143                 req.flags |= rte_cpu_to_le_32(
1144                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1145         if (vnic->roce_only)
1146                 req.flags |= rte_cpu_to_le_32(
1147                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1148         if (vnic->rss_dflt_cr)
1149                 req.flags |= rte_cpu_to_le_32(
1150                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1151
1152         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1153
1154         HWRM_CHECK_RESULT;
1155
1156         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1157
1158         return rc;
1159 }
1160
1161 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1162                 int16_t fw_vf_id)
1163 {
1164         int rc = 0;
1165         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1166         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1167
1168         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1169                 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1170                 return rc;
1171         }
1172         HWRM_PREP(req, VNIC_QCFG, -1, resp);
1173
1174         req.enables =
1175                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1176         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1177         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1178
1179         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1180
1181         HWRM_CHECK_RESULT;
1182
1183         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1184         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1185         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1186         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1187         vnic->mru = rte_le_to_cpu_16(resp->mru);
1188         vnic->func_default = rte_le_to_cpu_32(
1189                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1190         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1191                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1192         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1193                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1194         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1195                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1196         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1197                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1198         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1199                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1200
1201         return rc;
1202 }
1203
1204 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1205 {
1206         int rc = 0;
1207         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1208         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1209                                                 bp->hwrm_cmd_resp_addr;
1210
1211         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1212
1213         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1214
1215         HWRM_CHECK_RESULT;
1216
1217         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1218         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1219
1220         return rc;
1221 }
1222
1223 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1224 {
1225         int rc = 0;
1226         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1227         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1228                                                 bp->hwrm_cmd_resp_addr;
1229
1230         if (vnic->rss_rule == 0xffff) {
1231                 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1232                 return rc;
1233         }
1234         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1235
1236         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1237
1238         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1239
1240         HWRM_CHECK_RESULT;
1241
1242         vnic->rss_rule = INVALID_HW_RING_ID;
1243
1244         return rc;
1245 }
1246
1247 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1248 {
1249         int rc = 0;
1250         struct hwrm_vnic_free_input req = {.req_type = 0 };
1251         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1252
1253         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1254                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1255                 return rc;
1256         }
1257
1258         HWRM_PREP(req, VNIC_FREE, -1, resp);
1259
1260         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1261
1262         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1263
1264         HWRM_CHECK_RESULT;
1265
1266         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1267         return rc;
1268 }
1269
1270 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1271                            struct bnxt_vnic_info *vnic)
1272 {
1273         int rc = 0;
1274         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1275         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1276
1277         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1278
1279         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1280
1281         req.ring_grp_tbl_addr =
1282             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1283         req.hash_key_tbl_addr =
1284             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1285         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1286
1287         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1288
1289         HWRM_CHECK_RESULT;
1290
1291         return rc;
1292 }
1293
1294 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1295                         struct bnxt_vnic_info *vnic)
1296 {
1297         int rc = 0;
1298         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1299         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1300         uint16_t size;
1301
1302         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1303
1304         req.flags = rte_cpu_to_le_32(
1305                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1306
1307         req.enables = rte_cpu_to_le_32(
1308                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1309
1310         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1311         size -= RTE_PKTMBUF_HEADROOM;
1312
1313         req.jumbo_thresh = rte_cpu_to_le_16(size);
1314         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1315
1316         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1317
1318         HWRM_CHECK_RESULT;
1319
1320         return rc;
1321 }
1322
1323 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1324                         struct bnxt_vnic_info *vnic, bool enable)
1325 {
1326         int rc = 0;
1327         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1328         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1329
1330         HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1331
1332         if (enable) {
1333                 req.enables = rte_cpu_to_le_32(
1334                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1335                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1336                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1337                 req.flags = rte_cpu_to_le_32(
1338                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1339                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1340                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1341                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1342                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1343                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1344                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1345                 req.max_agg_segs = rte_cpu_to_le_16(5);
1346                 req.max_aggs =
1347                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1348                 req.min_agg_len = rte_cpu_to_le_32(512);
1349         }
1350
1351         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1352
1353         HWRM_CHECK_RESULT;
1354
1355         return rc;
1356 }
1357
1358 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1359 {
1360         struct hwrm_func_cfg_input req = {0};
1361         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1362         int rc;
1363
1364         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1365         req.enables = rte_cpu_to_le_32(
1366                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1367         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1368         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1369
1370         HWRM_PREP(req, FUNC_CFG, -1, resp);
1371
1372         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1373         HWRM_CHECK_RESULT;
1374
1375         bp->pf.vf_info[vf].random_mac = false;
1376
1377         return rc;
1378 }
1379
1380 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1381                                   uint64_t *dropped)
1382 {
1383         int rc = 0;
1384         struct hwrm_func_qstats_input req = {.req_type = 0};
1385         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1386
1387         HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1388
1389         req.fid = rte_cpu_to_le_16(fid);
1390
1391         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1392
1393         HWRM_CHECK_RESULT;
1394
1395         if (dropped)
1396                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1397
1398         return rc;
1399 }
1400
1401 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1402                           struct rte_eth_stats *stats)
1403 {
1404         int rc = 0;
1405         struct hwrm_func_qstats_input req = {.req_type = 0};
1406         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1407
1408         HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1409
1410         req.fid = rte_cpu_to_le_16(fid);
1411
1412         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1413
1414         HWRM_CHECK_RESULT;
1415
1416         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1417         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1418         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1419         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1420         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1421         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1422
1423         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1424         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1425         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1426         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1427         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1428         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1429
1430         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1431         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1432
1433         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1434
1435         return rc;
1436 }
1437
1438 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1439 {
1440         int rc = 0;
1441         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1442         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1443
1444         HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
1445
1446         req.fid = rte_cpu_to_le_16(fid);
1447
1448         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1449
1450         HWRM_CHECK_RESULT;
1451
1452         return rc;
1453 }
1454
1455 /*
1456  * HWRM utility functions
1457  */
1458
1459 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1460 {
1461         unsigned int i;
1462         int rc = 0;
1463
1464         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1465                 struct bnxt_tx_queue *txq;
1466                 struct bnxt_rx_queue *rxq;
1467                 struct bnxt_cp_ring_info *cpr;
1468
1469                 if (i >= bp->rx_cp_nr_rings) {
1470                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1471                         cpr = txq->cp_ring;
1472                 } else {
1473                         rxq = bp->rx_queues[i];
1474                         cpr = rxq->cp_ring;
1475                 }
1476
1477                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1478                 if (rc)
1479                         return rc;
1480         }
1481         return 0;
1482 }
1483
1484 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1485 {
1486         int rc;
1487         unsigned int i;
1488         struct bnxt_cp_ring_info *cpr;
1489
1490         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1491
1492                 if (i >= bp->rx_cp_nr_rings)
1493                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1494                 else
1495                         cpr = bp->rx_queues[i]->cp_ring;
1496                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1497                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1498                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1499                         /*
1500                          * TODO. Need a better way to reset grp_info.stats_ctx
1501                          * for Rx rings only. stats_ctx is not saved for Tx
1502                          * in grp_info.
1503                          */
1504                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1505                         if (rc)
1506                                 return rc;
1507                 }
1508         }
1509         return 0;
1510 }
1511
1512 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1513 {
1514         unsigned int i;
1515         int rc = 0;
1516
1517         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1518                 struct bnxt_tx_queue *txq;
1519                 struct bnxt_rx_queue *rxq;
1520                 struct bnxt_cp_ring_info *cpr;
1521
1522                 if (i >= bp->rx_cp_nr_rings) {
1523                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1524                         cpr = txq->cp_ring;
1525                 } else {
1526                         rxq = bp->rx_queues[i];
1527                         cpr = rxq->cp_ring;
1528                 }
1529
1530                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1531
1532                 if (rc)
1533                         return rc;
1534         }
1535         return rc;
1536 }
1537
1538 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1539 {
1540         uint16_t idx;
1541         uint32_t rc = 0;
1542
1543         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1544
1545                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1546                         RTE_LOG(ERR, PMD,
1547                                 "Attempt to free invalid ring group %d\n",
1548                                 idx);
1549                         continue;
1550                 }
1551
1552                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1553
1554                 if (rc)
1555                         return rc;
1556         }
1557         return rc;
1558 }
1559
1560 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1561                                 unsigned int idx __rte_unused)
1562 {
1563         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1564
1565         bnxt_hwrm_ring_free(bp, cp_ring,
1566                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1567         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1568         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1569         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1570                         sizeof(*cpr->cp_desc_ring));
1571         cpr->cp_raw_cons = 0;
1572 }
1573
1574 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1575 {
1576         unsigned int i;
1577         int rc = 0;
1578
1579         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1580                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1581                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1582                 struct bnxt_ring *ring = txr->tx_ring_struct;
1583                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1584                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1585
1586                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1587                         bnxt_hwrm_ring_free(bp, ring,
1588                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1589                         ring->fw_ring_id = INVALID_HW_RING_ID;
1590                         memset(txr->tx_desc_ring, 0,
1591                                         txr->tx_ring_struct->ring_size *
1592                                         sizeof(*txr->tx_desc_ring));
1593                         memset(txr->tx_buf_ring, 0,
1594                                         txr->tx_ring_struct->ring_size *
1595                                         sizeof(*txr->tx_buf_ring));
1596                         txr->tx_prod = 0;
1597                         txr->tx_cons = 0;
1598                 }
1599                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1600                         bnxt_free_cp_ring(bp, cpr, idx);
1601                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1602                 }
1603         }
1604
1605         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1606                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1607                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1608                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1609                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1610                 unsigned int idx = i + 1;
1611
1612                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1613                         bnxt_hwrm_ring_free(bp, ring,
1614                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1615                         ring->fw_ring_id = INVALID_HW_RING_ID;
1616                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1617                         memset(rxr->rx_desc_ring, 0,
1618                                         rxr->rx_ring_struct->ring_size *
1619                                         sizeof(*rxr->rx_desc_ring));
1620                         memset(rxr->rx_buf_ring, 0,
1621                                         rxr->rx_ring_struct->ring_size *
1622                                         sizeof(*rxr->rx_buf_ring));
1623                         rxr->rx_prod = 0;
1624                         memset(rxr->ag_buf_ring, 0,
1625                                         rxr->ag_ring_struct->ring_size *
1626                                         sizeof(*rxr->ag_buf_ring));
1627                         rxr->ag_prod = 0;
1628                 }
1629                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1630                         bnxt_free_cp_ring(bp, cpr, idx);
1631                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1632                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1633                 }
1634         }
1635
1636         /* Default completion ring */
1637         {
1638                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1639
1640                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1641                         bnxt_free_cp_ring(bp, cpr, 0);
1642                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1643                 }
1644         }
1645
1646         return rc;
1647 }
1648
1649 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1650 {
1651         uint16_t i;
1652         uint32_t rc = 0;
1653
1654         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1655                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1656                 if (rc)
1657                         return rc;
1658         }
1659         return rc;
1660 }
1661
1662 void bnxt_free_hwrm_resources(struct bnxt *bp)
1663 {
1664         /* Release memzone */
1665         rte_free(bp->hwrm_cmd_resp_addr);
1666         rte_free(bp->hwrm_short_cmd_req_addr);
1667         bp->hwrm_cmd_resp_addr = NULL;
1668         bp->hwrm_short_cmd_req_addr = NULL;
1669         bp->hwrm_cmd_resp_dma_addr = 0;
1670         bp->hwrm_short_cmd_req_dma_addr = 0;
1671 }
1672
1673 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1674 {
1675         struct rte_pci_device *pdev = bp->pdev;
1676         char type[RTE_MEMZONE_NAMESIZE];
1677
1678         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1679                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1680         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1681         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1682         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1683         if (bp->hwrm_cmd_resp_addr == NULL)
1684                 return -ENOMEM;
1685         bp->hwrm_cmd_resp_dma_addr =
1686                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1687         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1688                 RTE_LOG(ERR, PMD,
1689                         "unable to map response address to physical memory\n");
1690                 return -ENOMEM;
1691         }
1692         rte_spinlock_init(&bp->hwrm_lock);
1693
1694         return 0;
1695 }
1696
1697 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1698 {
1699         struct bnxt_filter_info *filter;
1700         int rc = 0;
1701
1702         STAILQ_FOREACH(filter, &vnic->filter, next) {
1703                 rc = bnxt_hwrm_clear_filter(bp, filter);
1704                 if (rc)
1705                         break;
1706         }
1707         return rc;
1708 }
1709
1710 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1711 {
1712         struct bnxt_filter_info *filter;
1713         int rc = 0;
1714
1715         STAILQ_FOREACH(filter, &vnic->filter, next) {
1716                 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1717                 if (rc)
1718                         break;
1719         }
1720         return rc;
1721 }
1722
1723 void bnxt_free_tunnel_ports(struct bnxt *bp)
1724 {
1725         if (bp->vxlan_port_cnt)
1726                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1727                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1728         bp->vxlan_port = 0;
1729         if (bp->geneve_port_cnt)
1730                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1731                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1732         bp->geneve_port = 0;
1733 }
1734
1735 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1736 {
1737         struct bnxt_vnic_info *vnic;
1738         unsigned int i;
1739
1740         if (bp->vnic_info == NULL)
1741                 return;
1742
1743         vnic = &bp->vnic_info[0];
1744         if (BNXT_PF(bp))
1745                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1746
1747         /* VNIC resources */
1748         for (i = 0; i < bp->nr_vnics; i++) {
1749                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1750
1751                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1752
1753                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1754
1755                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1756
1757                 bnxt_hwrm_vnic_free(bp, vnic);
1758         }
1759         /* Ring resources */
1760         bnxt_free_all_hwrm_rings(bp);
1761         bnxt_free_all_hwrm_ring_grps(bp);
1762         bnxt_free_all_hwrm_stat_ctxs(bp);
1763         bnxt_free_tunnel_ports(bp);
1764 }
1765
1766 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1767 {
1768         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1769
1770         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1771                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1772
1773         switch (conf_link_speed) {
1774         case ETH_LINK_SPEED_10M_HD:
1775         case ETH_LINK_SPEED_100M_HD:
1776                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1777         }
1778         return hw_link_duplex;
1779 }
1780
1781 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1782 {
1783         uint16_t eth_link_speed = 0;
1784
1785         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1786                 return ETH_LINK_SPEED_AUTONEG;
1787
1788         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1789         case ETH_LINK_SPEED_100M:
1790         case ETH_LINK_SPEED_100M_HD:
1791                 eth_link_speed =
1792                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1793                 break;
1794         case ETH_LINK_SPEED_1G:
1795                 eth_link_speed =
1796                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1797                 break;
1798         case ETH_LINK_SPEED_2_5G:
1799                 eth_link_speed =
1800                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1801                 break;
1802         case ETH_LINK_SPEED_10G:
1803                 eth_link_speed =
1804                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1805                 break;
1806         case ETH_LINK_SPEED_20G:
1807                 eth_link_speed =
1808                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1809                 break;
1810         case ETH_LINK_SPEED_25G:
1811                 eth_link_speed =
1812                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1813                 break;
1814         case ETH_LINK_SPEED_40G:
1815                 eth_link_speed =
1816                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1817                 break;
1818         case ETH_LINK_SPEED_50G:
1819                 eth_link_speed =
1820                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1821                 break;
1822         default:
1823                 RTE_LOG(ERR, PMD,
1824                         "Unsupported link speed %d; default to AUTO\n",
1825                         conf_link_speed);
1826                 break;
1827         }
1828         return eth_link_speed;
1829 }
1830
1831 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1832                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1833                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1834                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1835
1836 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1837 {
1838         uint32_t one_speed;
1839
1840         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1841                 return 0;
1842
1843         if (link_speed & ETH_LINK_SPEED_FIXED) {
1844                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1845
1846                 if (one_speed & (one_speed - 1)) {
1847                         RTE_LOG(ERR, PMD,
1848                                 "Invalid advertised speeds (%u) for port %u\n",
1849                                 link_speed, port_id);
1850                         return -EINVAL;
1851                 }
1852                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1853                         RTE_LOG(ERR, PMD,
1854                                 "Unsupported advertised speed (%u) for port %u\n",
1855                                 link_speed, port_id);
1856                         return -EINVAL;
1857                 }
1858         } else {
1859                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1860                         RTE_LOG(ERR, PMD,
1861                                 "Unsupported advertised speeds (%u) for port %u\n",
1862                                 link_speed, port_id);
1863                         return -EINVAL;
1864                 }
1865         }
1866         return 0;
1867 }
1868
1869 static uint16_t
1870 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1871 {
1872         uint16_t ret = 0;
1873
1874         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1875                 if (bp->link_info.support_speeds)
1876                         return bp->link_info.support_speeds;
1877                 link_speed = BNXT_SUPPORTED_SPEEDS;
1878         }
1879
1880         if (link_speed & ETH_LINK_SPEED_100M)
1881                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1882         if (link_speed & ETH_LINK_SPEED_100M_HD)
1883                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1884         if (link_speed & ETH_LINK_SPEED_1G)
1885                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1886         if (link_speed & ETH_LINK_SPEED_2_5G)
1887                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1888         if (link_speed & ETH_LINK_SPEED_10G)
1889                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1890         if (link_speed & ETH_LINK_SPEED_20G)
1891                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1892         if (link_speed & ETH_LINK_SPEED_25G)
1893                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1894         if (link_speed & ETH_LINK_SPEED_40G)
1895                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1896         if (link_speed & ETH_LINK_SPEED_50G)
1897                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1898         return ret;
1899 }
1900
1901 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1902 {
1903         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1904
1905         switch (hw_link_speed) {
1906         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1907                 eth_link_speed = ETH_SPEED_NUM_100M;
1908                 break;
1909         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1910                 eth_link_speed = ETH_SPEED_NUM_1G;
1911                 break;
1912         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1913                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1914                 break;
1915         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1916                 eth_link_speed = ETH_SPEED_NUM_10G;
1917                 break;
1918         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1919                 eth_link_speed = ETH_SPEED_NUM_20G;
1920                 break;
1921         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1922                 eth_link_speed = ETH_SPEED_NUM_25G;
1923                 break;
1924         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1925                 eth_link_speed = ETH_SPEED_NUM_40G;
1926                 break;
1927         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1928                 eth_link_speed = ETH_SPEED_NUM_50G;
1929                 break;
1930         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1931         default:
1932                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1933                         hw_link_speed);
1934                 break;
1935         }
1936         return eth_link_speed;
1937 }
1938
1939 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1940 {
1941         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1942
1943         switch (hw_link_duplex) {
1944         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1945         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1946                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1947                 break;
1948         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1949                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1950                 break;
1951         default:
1952                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1953                         hw_link_duplex);
1954                 break;
1955         }
1956         return eth_link_duplex;
1957 }
1958
1959 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1960 {
1961         int rc = 0;
1962         struct bnxt_link_info *link_info = &bp->link_info;
1963
1964         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1965         if (rc) {
1966                 RTE_LOG(ERR, PMD,
1967                         "Get link config failed with rc %d\n", rc);
1968                 goto exit;
1969         }
1970         if (link_info->link_speed)
1971                 link->link_speed =
1972                         bnxt_parse_hw_link_speed(link_info->link_speed);
1973         else
1974                 link->link_speed = ETH_SPEED_NUM_NONE;
1975         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1976         link->link_status = link_info->link_up;
1977         link->link_autoneg = link_info->auto_mode ==
1978                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1979                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1980 exit:
1981         return rc;
1982 }
1983
1984 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1985 {
1986         int rc = 0;
1987         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1988         struct bnxt_link_info link_req;
1989         uint16_t speed;
1990
1991         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1992                 return 0;
1993
1994         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1995                         bp->eth_dev->data->port_id);
1996         if (rc)
1997                 goto error;
1998
1999         memset(&link_req, 0, sizeof(link_req));
2000         link_req.link_up = link_up;
2001         if (!link_up)
2002                 goto port_phy_cfg;
2003
2004         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2005         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2006         if (speed == 0) {
2007                 link_req.phy_flags |=
2008                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2009                 link_req.auto_mode =
2010                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
2011                 link_req.auto_link_speed_mask =
2012                         bnxt_parse_eth_link_speed_mask(bp,
2013                                                        dev_conf->link_speeds);
2014         } else {
2015                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2016                 link_req.link_speed = speed;
2017                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
2018         }
2019         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2020         link_req.auto_pause = bp->link_info.auto_pause;
2021         link_req.force_pause = bp->link_info.force_pause;
2022
2023 port_phy_cfg:
2024         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2025         if (rc) {
2026                 RTE_LOG(ERR, PMD,
2027                         "Set link config failed with rc %d\n", rc);
2028         }
2029
2030 error:
2031         return rc;
2032 }
2033
2034 /* JIRA 22088 */
2035 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2036 {
2037         struct hwrm_func_qcfg_input req = {0};
2038         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2039         int rc = 0;
2040
2041         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2042         req.fid = rte_cpu_to_le_16(0xffff);
2043
2044         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2045
2046         HWRM_CHECK_RESULT;
2047
2048         /* Hard Coded.. 0xfff VLAN ID mask */
2049         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2050
2051         switch (resp->port_partition_type) {
2052         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2053         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2054         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2055                 bp->port_partition_type = resp->port_partition_type;
2056                 break;
2057         default:
2058                 bp->port_partition_type = 0;
2059                 break;
2060         }
2061
2062         return rc;
2063 }
2064
2065 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2066                                    struct hwrm_func_qcaps_output *qcaps)
2067 {
2068         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2069         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2070                sizeof(qcaps->mac_address));
2071         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2072         qcaps->max_rx_rings = fcfg->num_rx_rings;
2073         qcaps->max_tx_rings = fcfg->num_tx_rings;
2074         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2075         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2076         qcaps->max_vfs = 0;
2077         qcaps->first_vf_id = 0;
2078         qcaps->max_vnics = fcfg->num_vnics;
2079         qcaps->max_decap_records = 0;
2080         qcaps->max_encap_records = 0;
2081         qcaps->max_tx_wm_flows = 0;
2082         qcaps->max_tx_em_flows = 0;
2083         qcaps->max_rx_wm_flows = 0;
2084         qcaps->max_rx_em_flows = 0;
2085         qcaps->max_flow_id = 0;
2086         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2087         qcaps->max_sp_tx_rings = 0;
2088         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2089 }
2090
2091 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2092 {
2093         struct hwrm_func_cfg_input req = {0};
2094         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2095         int rc;
2096
2097         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2098                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2099                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2100                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2101                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2102                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2103                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2104                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2105                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2106                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2107         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2108         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2109         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2110                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2111         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2112         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2113         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2114         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2115         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2116         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2117         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2118         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2119         req.fid = rte_cpu_to_le_16(0xffff);
2120
2121         HWRM_PREP(req, FUNC_CFG, -1, resp);
2122
2123         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2124         HWRM_CHECK_RESULT;
2125
2126         return rc;
2127 }
2128
2129 static void populate_vf_func_cfg_req(struct bnxt *bp,
2130                                      struct hwrm_func_cfg_input *req,
2131                                      int num_vfs)
2132 {
2133         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2134                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2135                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2136                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2137                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2138                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2139                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2140                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2141                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2142                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2143
2144         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2145                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2146         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2147                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2148         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2149                                                 (num_vfs + 1));
2150         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2151         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2152                                                (num_vfs + 1));
2153         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2154         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2155         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2156         /* TODO: For now, do not support VMDq/RFS on VFs. */
2157         req->num_vnics = rte_cpu_to_le_16(1);
2158         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2159                                                  (num_vfs + 1));
2160 }
2161
2162 static void add_random_mac_if_needed(struct bnxt *bp,
2163                                      struct hwrm_func_cfg_input *cfg_req,
2164                                      int vf)
2165 {
2166         struct ether_addr mac;
2167
2168         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2169                 return;
2170
2171         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2172                 cfg_req->enables |=
2173                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2174                 eth_random_addr(cfg_req->dflt_mac_addr);
2175                 bp->pf.vf_info[vf].random_mac = true;
2176         } else {
2177                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2178         }
2179 }
2180
2181 static void reserve_resources_from_vf(struct bnxt *bp,
2182                                       struct hwrm_func_cfg_input *cfg_req,
2183                                       int vf)
2184 {
2185         struct hwrm_func_qcaps_input req = {0};
2186         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2187         int rc;
2188
2189         /* Get the actual allocated values now */
2190         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
2191         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2192         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2193
2194         if (rc) {
2195                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2196                 copy_func_cfg_to_qcaps(cfg_req, resp);
2197         } else if (resp->error_code) {
2198                 rc = rte_le_to_cpu_16(resp->error_code);
2199                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2200                 copy_func_cfg_to_qcaps(cfg_req, resp);
2201         }
2202
2203         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2204         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2205         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2206         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2207         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2208         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2209         /*
2210          * TODO: While not supporting VMDq with VFs, max_vnics is always
2211          * forced to 1 in this case
2212          */
2213         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2214         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2215 }
2216
2217 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2218 {
2219         struct hwrm_func_qcfg_input req = {0};
2220         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2221         int rc;
2222
2223         /* Check for zero MAC address */
2224         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2225         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2226         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2227         if (rc) {
2228                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2229                 return -1;
2230         } else if (resp->error_code) {
2231                 rc = rte_le_to_cpu_16(resp->error_code);
2232                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2233                 return -1;
2234         }
2235         return rte_le_to_cpu_16(resp->vlan);
2236 }
2237
2238 static int update_pf_resource_max(struct bnxt *bp)
2239 {
2240         struct hwrm_func_qcfg_input req = {0};
2241         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2242         int rc;
2243
2244         /* And copy the allocated numbers into the pf struct */
2245         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2246         req.fid = rte_cpu_to_le_16(0xffff);
2247         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2248         HWRM_CHECK_RESULT;
2249
2250         /* Only TX ring value reflects actual allocation? TODO */
2251         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2252         bp->pf.evb_mode = resp->evb_mode;
2253
2254         return rc;
2255 }
2256
2257 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2258 {
2259         int rc;
2260
2261         if (!BNXT_PF(bp)) {
2262                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2263                 return -1;
2264         }
2265
2266         rc = bnxt_hwrm_func_qcaps(bp);
2267         if (rc)
2268                 return rc;
2269
2270         bp->pf.func_cfg_flags &=
2271                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2272                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2273         bp->pf.func_cfg_flags |=
2274                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2275         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2276         return rc;
2277 }
2278
2279 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2280 {
2281         struct hwrm_func_cfg_input req = {0};
2282         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2283         int i;
2284         size_t sz;
2285         int rc = 0;
2286         size_t req_buf_sz;
2287
2288         if (!BNXT_PF(bp)) {
2289                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2290                 return -1;
2291         }
2292
2293         rc = bnxt_hwrm_func_qcaps(bp);
2294
2295         if (rc)
2296                 return rc;
2297
2298         bp->pf.active_vfs = num_vfs;
2299
2300         /*
2301          * First, configure the PF to only use one TX ring.  This ensures that
2302          * there are enough rings for all VFs.
2303          *
2304          * If we don't do this, when we call func_alloc() later, we will lock
2305          * extra rings to the PF that won't be available during func_cfg() of
2306          * the VFs.
2307          *
2308          * This has been fixed with firmware versions above 20.6.54
2309          */
2310         bp->pf.func_cfg_flags &=
2311                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2312                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2313         bp->pf.func_cfg_flags |=
2314                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2315         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2316         if (rc)
2317                 return rc;
2318
2319         /*
2320          * Now, create and register a buffer to hold forwarded VF requests
2321          */
2322         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2323         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2324                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2325         if (bp->pf.vf_req_buf == NULL) {
2326                 rc = -ENOMEM;
2327                 goto error_free;
2328         }
2329         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2330                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2331         for (i = 0; i < num_vfs; i++)
2332                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2333                                         (i * HWRM_MAX_REQ_LEN);
2334
2335         rc = bnxt_hwrm_func_buf_rgtr(bp);
2336         if (rc)
2337                 goto error_free;
2338
2339         populate_vf_func_cfg_req(bp, &req, num_vfs);
2340
2341         bp->pf.active_vfs = 0;
2342         for (i = 0; i < num_vfs; i++) {
2343                 add_random_mac_if_needed(bp, &req, i);
2344
2345                 HWRM_PREP(req, FUNC_CFG, -1, resp);
2346                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2347                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2348                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2349
2350                 /* Clear enable flag for next pass */
2351                 req.enables &= ~rte_cpu_to_le_32(
2352                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2353
2354                 if (rc || resp->error_code) {
2355                         RTE_LOG(ERR, PMD,
2356                                 "Failed to initizlie VF %d\n", i);
2357                         RTE_LOG(ERR, PMD,
2358                                 "Not all VFs available. (%d, %d)\n",
2359                                 rc, resp->error_code);
2360                         break;
2361                 }
2362
2363                 reserve_resources_from_vf(bp, &req, i);
2364                 bp->pf.active_vfs++;
2365                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2366         }
2367
2368         /*
2369          * Now configure the PF to use "the rest" of the resources
2370          * We're using STD_TX_RING_MODE here though which will limit the TX
2371          * rings.  This will allow QoS to function properly.  Not setting this
2372          * will cause PF rings to break bandwidth settings.
2373          */
2374         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2375         if (rc)
2376                 goto error_free;
2377
2378         rc = update_pf_resource_max(bp);
2379         if (rc)
2380                 goto error_free;
2381
2382         return rc;
2383
2384 error_free:
2385         bnxt_hwrm_func_buf_unrgtr(bp);
2386         return rc;
2387 }
2388
2389 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2390 {
2391         struct hwrm_func_cfg_input req = {0};
2392         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2393         int rc;
2394
2395         HWRM_PREP(req, FUNC_CFG, -1, resp);
2396
2397         req.fid = rte_cpu_to_le_16(0xffff);
2398         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2399         req.evb_mode = bp->pf.evb_mode;
2400
2401         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2402         HWRM_CHECK_RESULT;
2403
2404         return rc;
2405 }
2406
2407 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2408                                 uint8_t tunnel_type)
2409 {
2410         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2411         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2412         int rc = 0;
2413
2414         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2415         req.tunnel_type = tunnel_type;
2416         req.tunnel_dst_port_val = port;
2417         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2418         HWRM_CHECK_RESULT;
2419
2420         switch (tunnel_type) {
2421         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2422                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2423                 bp->vxlan_port = port;
2424                 break;
2425         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2426                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2427                 bp->geneve_port = port;
2428                 break;
2429         default:
2430                 break;
2431         }
2432         return rc;
2433 }
2434
2435 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2436                                 uint8_t tunnel_type)
2437 {
2438         struct hwrm_tunnel_dst_port_free_input req = {0};
2439         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2440         int rc = 0;
2441
2442         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2443         req.tunnel_type = tunnel_type;
2444         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2445         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2446         HWRM_CHECK_RESULT;
2447
2448         return rc;
2449 }
2450
2451 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2452                                         uint32_t flags)
2453 {
2454         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2455         struct hwrm_func_cfg_input req = {0};
2456         int rc;
2457
2458         HWRM_PREP(req, FUNC_CFG, -1, resp);
2459         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2460         req.flags = rte_cpu_to_le_32(flags);
2461         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2462         HWRM_CHECK_RESULT;
2463
2464         return rc;
2465 }
2466
2467 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2468 {
2469         uint32_t *flag = flagp;
2470
2471         vnic->flags = *flag;
2472 }
2473
2474 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2475 {
2476         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2477 }
2478
2479 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2480 {
2481         int rc = 0;
2482         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2483         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2484
2485         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2486
2487         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2488         req.req_buf_page_size = rte_cpu_to_le_16(
2489                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2490         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2491         req.req_buf_page_addr[0] =
2492                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2493         if (req.req_buf_page_addr[0] == 0) {
2494                 RTE_LOG(ERR, PMD,
2495                         "unable to map buffer address to physical memory\n");
2496                 return -ENOMEM;
2497         }
2498
2499         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2500
2501         HWRM_CHECK_RESULT;
2502
2503         return rc;
2504 }
2505
2506 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2507 {
2508         int rc = 0;
2509         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2510         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2511
2512         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2513
2514         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2515
2516         HWRM_CHECK_RESULT;
2517
2518         return rc;
2519 }
2520
2521 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2522 {
2523         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2524         struct hwrm_func_cfg_input req = {0};
2525         int rc;
2526
2527         HWRM_PREP(req, FUNC_CFG, -1, resp);
2528         req.fid = rte_cpu_to_le_16(0xffff);
2529         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2530         req.enables = rte_cpu_to_le_32(
2531                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2532         req.async_event_cr = rte_cpu_to_le_16(
2533                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2534         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2535         HWRM_CHECK_RESULT;
2536
2537         return rc;
2538 }
2539
2540 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2541 {
2542         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2543         struct hwrm_func_vf_cfg_input req = {0};
2544         int rc;
2545
2546         HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2547         req.enables = rte_cpu_to_le_32(
2548                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2549         req.async_event_cr = rte_cpu_to_le_16(
2550                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2551         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2552         HWRM_CHECK_RESULT;
2553
2554         return rc;
2555 }
2556
2557 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2558 {
2559         struct hwrm_func_cfg_input req = {0};
2560         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2561         uint16_t dflt_vlan, fid;
2562         uint32_t func_cfg_flags;
2563         int rc = 0;
2564
2565         HWRM_PREP(req, FUNC_CFG, -1, resp);
2566
2567         if (is_vf) {
2568                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2569                 fid = bp->pf.vf_info[vf].fid;
2570                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2571         } else {
2572                 fid = rte_cpu_to_le_16(0xffff);
2573                 func_cfg_flags = bp->pf.func_cfg_flags;
2574                 dflt_vlan = bp->vlan;
2575         }
2576
2577         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2578         req.fid = rte_cpu_to_le_16(fid);
2579         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2580         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2581
2582         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2583         HWRM_CHECK_RESULT;
2584
2585         return rc;
2586 }
2587
2588 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2589                         uint16_t max_bw, uint16_t enables)
2590 {
2591         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2592         struct hwrm_func_cfg_input req = {0};
2593         int rc;
2594
2595         HWRM_PREP(req, FUNC_CFG, -1, resp);
2596         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2597         req.enables |= rte_cpu_to_le_32(enables);
2598         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2599         req.max_bw = rte_cpu_to_le_32(max_bw);
2600         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2601         HWRM_CHECK_RESULT;
2602
2603         return rc;
2604 }
2605
2606 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2607 {
2608         struct hwrm_func_cfg_input req = {0};
2609         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2610         int rc = 0;
2611
2612         HWRM_PREP(req, FUNC_CFG, -1, resp);
2613         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2614         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2615         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2616         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2617
2618         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2619         HWRM_CHECK_RESULT;
2620
2621         return rc;
2622 }
2623
2624 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2625                               void *encaped, size_t ec_size)
2626 {
2627         int rc = 0;
2628         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2629         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2630
2631         if (ec_size > sizeof(req.encap_request))
2632                 return -1;
2633
2634         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2635
2636         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2637         memcpy(req.encap_request, encaped, ec_size);
2638
2639         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2640
2641         HWRM_CHECK_RESULT;
2642
2643         return rc;
2644 }
2645
2646 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2647                                        struct ether_addr *mac)
2648 {
2649         struct hwrm_func_qcfg_input req = {0};
2650         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2651         int rc;
2652
2653         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2654         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2655         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2656
2657         HWRM_CHECK_RESULT;
2658
2659         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2660         return rc;
2661 }
2662
2663 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2664                             void *encaped, size_t ec_size)
2665 {
2666         int rc = 0;
2667         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2668         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2669
2670         if (ec_size > sizeof(req.encap_request))
2671                 return -1;
2672
2673         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2674
2675         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2676         memcpy(req.encap_request, encaped, ec_size);
2677
2678         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2679
2680         HWRM_CHECK_RESULT;
2681
2682         return rc;
2683 }
2684
2685 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2686                          struct rte_eth_stats *stats)
2687 {
2688         int rc = 0;
2689         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2690         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2691
2692         HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
2693
2694         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2695
2696         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2697
2698         HWRM_CHECK_RESULT;
2699
2700         stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2701         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2702         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2703         stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2704         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2705         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2706
2707         stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2708         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2709         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2710         stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2711         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2712         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2713
2714         stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2715         stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2716         stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2717
2718         return rc;
2719 }
2720
2721 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2722 {
2723         struct hwrm_port_qstats_input req = {0};
2724         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2725         struct bnxt_pf_info *pf = &bp->pf;
2726         int rc;
2727
2728         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2729                 return 0;
2730
2731         HWRM_PREP(req, PORT_QSTATS, -1, resp);
2732         req.port_id = rte_cpu_to_le_16(pf->port_id);
2733         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2734         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2735         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2736         HWRM_CHECK_RESULT;
2737         return rc;
2738 }
2739
2740 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2741 {
2742         struct hwrm_port_clr_stats_input req = {0};
2743         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2744         struct bnxt_pf_info *pf = &bp->pf;
2745         int rc;
2746
2747         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2748                 return 0;
2749
2750         HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2751         req.port_id = rte_cpu_to_le_16(pf->port_id);
2752         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2753         HWRM_CHECK_RESULT;
2754         return rc;
2755 }
2756
2757 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2758 {
2759         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2760         struct hwrm_port_led_qcaps_input req = {0};
2761         int rc;
2762
2763         if (BNXT_VF(bp))
2764                 return 0;
2765
2766         HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
2767         req.port_id = bp->pf.port_id;
2768         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2769         HWRM_CHECK_RESULT;
2770
2771         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2772                 unsigned int i;
2773
2774                 bp->num_leds = resp->num_leds;
2775                 memcpy(bp->leds, &resp->led0_id,
2776                         sizeof(bp->leds[0]) * bp->num_leds);
2777                 for (i = 0; i < bp->num_leds; i++) {
2778                         struct bnxt_led_info *led = &bp->leds[i];
2779
2780                         uint16_t caps = led->led_state_caps;
2781
2782                         if (!led->led_group_id ||
2783                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2784                                 bp->num_leds = 0;
2785                                 break;
2786                         }
2787                 }
2788         }
2789         return rc;
2790 }
2791
2792 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2793 {
2794         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2795         struct hwrm_port_led_cfg_input req = {0};
2796         struct bnxt_led_cfg *led_cfg;
2797         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2798         uint16_t duration = 0;
2799         int rc, i;
2800
2801         if (!bp->num_leds || BNXT_VF(bp))
2802                 return -EOPNOTSUPP;
2803
2804         HWRM_PREP(req, PORT_LED_CFG, -1, resp);
2805         if (led_on) {
2806                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2807                 duration = rte_cpu_to_le_16(500);
2808         }
2809         req.port_id = bp->pf.port_id;
2810         req.num_leds = bp->num_leds;
2811         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2812         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2813                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2814                 led_cfg->led_id = bp->leds[i].led_id;
2815                 led_cfg->led_state = led_state;
2816                 led_cfg->led_blink_on = duration;
2817                 led_cfg->led_blink_off = duration;
2818                 led_cfg->led_group_id = bp->leds[i].led_group_id;
2819         }
2820
2821         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2822         HWRM_CHECK_RESULT;
2823
2824         return rc;
2825 }
2826
2827 static void
2828 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
2829 {
2830         uint32_t *count = cbdata;
2831
2832         *count = *count + 1;
2833 }
2834
2835 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2836                                      struct bnxt_vnic_info *vnic __rte_unused)
2837 {
2838         return 0;
2839 }
2840
2841 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
2842 {
2843         uint32_t count = 0;
2844
2845         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2846             &count, bnxt_vnic_count_hwrm_stub);
2847
2848         return count;
2849 }
2850
2851 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2852                                         uint16_t *vnic_ids)
2853 {
2854         struct hwrm_func_vf_vnic_ids_query_input req = {0};
2855         struct hwrm_func_vf_vnic_ids_query_output *resp =
2856                                                 bp->hwrm_cmd_resp_addr;
2857         int rc;
2858
2859         /* First query all VNIC ids */
2860         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
2861
2862         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2863         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2864         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2865
2866         if (req.vnic_id_tbl_addr == 0) {
2867                 RTE_LOG(ERR, PMD,
2868                 "unable to map VNIC ID table address to physical memory\n");
2869                 return -ENOMEM;
2870         }
2871         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2872         if (rc) {
2873                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2874                 return -1;
2875         } else if (resp->error_code) {
2876                 rc = rte_le_to_cpu_16(resp->error_code);
2877                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2878                 return -1;
2879         }
2880
2881         return rte_le_to_cpu_32(resp->vnic_id_cnt);
2882 }
2883
2884 /*
2885  * This function queries the VNIC IDs  for a specified VF. It then calls
2886  * the vnic_cb to update the necessary field in vnic_info with cbdata.
2887  * Then it calls the hwrm_cb function to program this new vnic configuration.
2888  */
2889 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
2890         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
2891         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
2892 {
2893         struct bnxt_vnic_info vnic;
2894         int rc = 0;
2895         int i, num_vnic_ids;
2896         uint16_t *vnic_ids;
2897         size_t vnic_id_sz;
2898         size_t sz;
2899
2900         /* First query all VNIC ids */
2901         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2902         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2903                         RTE_CACHE_LINE_SIZE);
2904         if (vnic_ids == NULL) {
2905                 rc = -ENOMEM;
2906                 return rc;
2907         }
2908         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2909                 rte_mem_lock_page(((char *)vnic_ids) + sz);
2910
2911         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2912
2913         if (num_vnic_ids < 0)
2914                 return num_vnic_ids;
2915
2916         /* Retrieve VNIC, update bd_stall then update */
2917
2918         for (i = 0; i < num_vnic_ids; i++) {
2919                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2920                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2921                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
2922                 if (rc)
2923                         break;
2924                 if (vnic.mru <= 4)      /* Indicates unallocated */
2925                         continue;
2926
2927                 vnic_cb(&vnic, cbdata);
2928
2929                 rc = hwrm_cb(bp, &vnic);
2930                 if (rc)
2931                         break;
2932         }
2933
2934         rte_free(vnic_ids);
2935
2936         return rc;
2937 }
2938
2939 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
2940                                               bool on)
2941 {
2942         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2943         struct hwrm_func_cfg_input req = {0};
2944         int rc;
2945
2946         HWRM_PREP(req, FUNC_CFG, -1, resp);
2947         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2948         req.enables |= rte_cpu_to_le_32(
2949                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
2950         req.vlan_antispoof_mode = on ?
2951                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
2952                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
2953         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2954         HWRM_CHECK_RESULT;
2955
2956         return rc;
2957 }
2958
2959 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
2960 {
2961         struct bnxt_vnic_info vnic;
2962         uint16_t *vnic_ids;
2963         size_t vnic_id_sz;
2964         int num_vnic_ids, i;
2965         size_t sz;
2966         int rc;
2967
2968         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2969         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2970                         RTE_CACHE_LINE_SIZE);
2971         if (vnic_ids == NULL) {
2972                 rc = -ENOMEM;
2973                 return rc;
2974         }
2975
2976         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2977                 rte_mem_lock_page(((char *)vnic_ids) + sz);
2978
2979         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2980         if (rc <= 0)
2981                 goto exit;
2982         num_vnic_ids = rc;
2983
2984         /*
2985          * Loop through to find the default VNIC ID.
2986          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
2987          * by sending the hwrm_func_qcfg command to the firmware.
2988          */
2989         for (i = 0; i < num_vnic_ids; i++) {
2990                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2991                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2992                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
2993                                         bp->pf.first_vf_id + vf);
2994                 if (rc)
2995                         goto exit;
2996                 if (vnic.func_default) {
2997                         rte_free(vnic_ids);
2998                         return vnic.fw_vnic_id;
2999                 }
3000         }
3001         /* Could not find a default VNIC. */
3002         RTE_LOG(ERR, PMD, "No default VNIC\n");
3003 exit:
3004         rte_free(vnic_ids);
3005         return -1;
3006 }