New upstream version 18.02
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                10000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 /*
175  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
176  * spinlock, and does initial processing.
177  *
178  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
179  * releases the spinlock only if it returns.  If the regular int return codes
180  * are not used by the function, HWRM_CHECK_RESULT() should not be used
181  * directly, rather it should be copied and modified to suit the function.
182  *
183  * HWRM_UNLOCK() must be called after all response processing is completed.
184  */
185 #define HWRM_PREP(req, type) do { \
186         rte_spinlock_lock(&bp->hwrm_lock); \
187         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189         req.cmpl_ring = rte_cpu_to_le_16(-1); \
190         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191         req.target_id = rte_cpu_to_le_16(0xffff); \
192         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
193 } while (0)
194
195 #define HWRM_CHECK_RESULT() do {\
196         if (rc) { \
197                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
198                 rte_spinlock_unlock(&bp->hwrm_lock); \
199                 return rc; \
200         } \
201         if (resp->error_code) { \
202                 rc = rte_le_to_cpu_16(resp->error_code); \
203                 if (resp->resp_len >= 16) { \
204                         struct hwrm_err_output *tmp_hwrm_err_op = \
205                                                 (void *)resp; \
206                         PMD_DRV_LOG(ERR, \
207                                 "error %d:%d:%08x:%04x\n", \
208                                 rc, tmp_hwrm_err_op->cmd_err, \
209                                 rte_le_to_cpu_32(\
210                                         tmp_hwrm_err_op->opaque_0), \
211                                 rte_le_to_cpu_16(\
212                                         tmp_hwrm_err_op->opaque_1)); \
213                 } else { \
214                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
215                 } \
216                 rte_spinlock_unlock(&bp->hwrm_lock); \
217                 return rc; \
218         } \
219 } while (0)
220
221 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
222
223 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
224 {
225         int rc = 0;
226         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
227         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
228
229         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
230         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
231         req.mask = 0;
232
233         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
234
235         HWRM_CHECK_RESULT();
236         HWRM_UNLOCK();
237
238         return rc;
239 }
240
241 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
242                                  struct bnxt_vnic_info *vnic,
243                                  uint16_t vlan_count,
244                                  struct bnxt_vlan_table_entry *vlan_table)
245 {
246         int rc = 0;
247         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
248         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
249         uint32_t mask = 0;
250
251         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
252         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
253
254         /* FIXME add multicast flag, when multicast adding options is supported
255          * by ethtool.
256          */
257         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
258                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
259         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
260                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
261         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
262                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
263         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
265         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
267         if (vnic->mc_addr_cnt) {
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
269                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
270                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
271         }
272         if (vlan_table) {
273                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
274                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
275                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
276                          rte_mem_virt2iova(vlan_table));
277                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
278         }
279         req.mask = rte_cpu_to_le_32(mask);
280
281         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
282
283         HWRM_CHECK_RESULT();
284         HWRM_UNLOCK();
285
286         return rc;
287 }
288
289 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
290                         uint16_t vlan_count,
291                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
292 {
293         int rc = 0;
294         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
295         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
296                                                 bp->hwrm_cmd_resp_addr;
297
298         /*
299          * Older HWRM versions did not support this command, and the set_rx_mask
300          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
301          * removed from set_rx_mask call, and this command was added.
302          *
303          * This command is also present from 1.7.8.11 and higher,
304          * as well as 1.7.8.0
305          */
306         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
307                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
308                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
309                                         (11)))
310                                 return 0;
311                 }
312         }
313         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
314         req.fid = rte_cpu_to_le_16(fid);
315
316         req.vlan_tag_mask_tbl_addr =
317                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
318         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
319
320         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
321
322         HWRM_CHECK_RESULT();
323         HWRM_UNLOCK();
324
325         return rc;
326 }
327
328 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
329                            struct bnxt_filter_info *filter)
330 {
331         int rc = 0;
332         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
333         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
334
335         if (filter->fw_l2_filter_id == UINT64_MAX)
336                 return 0;
337
338         HWRM_PREP(req, CFA_L2_FILTER_FREE);
339
340         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
341
342         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
343
344         HWRM_CHECK_RESULT();
345         HWRM_UNLOCK();
346
347         filter->fw_l2_filter_id = -1;
348
349         return 0;
350 }
351
352 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
353                          uint16_t dst_id,
354                          struct bnxt_filter_info *filter)
355 {
356         int rc = 0;
357         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
358         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
359         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
360         const struct rte_eth_vmdq_rx_conf *conf =
361                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
362         uint32_t enables = 0;
363         uint16_t j = dst_id - 1;
364
365         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
366         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
367             conf->pool_map[j].pools & (1UL << j)) {
368                 PMD_DRV_LOG(DEBUG,
369                         "Add vlan %u to vmdq pool %u\n",
370                         conf->pool_map[j].vlan_id, j);
371
372                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
373                 filter->enables |=
374                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
375                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
376         }
377
378         if (filter->fw_l2_filter_id != UINT64_MAX)
379                 bnxt_hwrm_clear_l2_filter(bp, filter);
380
381         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
382
383         req.flags = rte_cpu_to_le_32(filter->flags);
384
385         enables = filter->enables |
386               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
387         req.dst_id = rte_cpu_to_le_16(dst_id);
388
389         if (enables &
390             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
391                 memcpy(req.l2_addr, filter->l2_addr,
392                        ETHER_ADDR_LEN);
393         if (enables &
394             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
395                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
396                        ETHER_ADDR_LEN);
397         if (enables &
398             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
399                 req.l2_ovlan = filter->l2_ovlan;
400         if (enables &
401             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
402                 req.l2_ovlan = filter->l2_ivlan;
403         if (enables &
404             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
405                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
406         if (enables &
407             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
408                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
409         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
410                 req.src_id = rte_cpu_to_le_32(filter->src_id);
411         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
412                 req.src_type = filter->src_type;
413
414         req.enables = rte_cpu_to_le_32(enables);
415
416         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
417
418         HWRM_CHECK_RESULT();
419
420         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
421         HWRM_UNLOCK();
422
423         return rc;
424 }
425
426 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
427 {
428         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
429         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
430         uint32_t flags = 0;
431         int rc;
432
433         if (!ptp)
434                 return 0;
435
436         HWRM_PREP(req, PORT_MAC_CFG);
437
438         if (ptp->rx_filter)
439                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
440         else
441                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
442         if (ptp->tx_tstamp_en)
443                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
444         else
445                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
446         req.flags = rte_cpu_to_le_32(flags);
447         req.enables =
448         rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
449         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
450
451         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
452         HWRM_UNLOCK();
453
454         return rc;
455 }
456
457 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
458 {
459         int rc = 0;
460         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
461         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
462         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
463
464 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
465         if (ptp)
466                 return 0;
467
468         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
469
470         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
471
472         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
473
474         HWRM_CHECK_RESULT();
475
476         if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
477                 return 0;
478
479         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
480         if (!ptp)
481                 return -ENOMEM;
482
483         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
484                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
485         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
486                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
487         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
488                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
489         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
490                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
491         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
492                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
493         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
494                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
495         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
496                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
497         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
498                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
499         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
500                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
501
502         ptp->bp = bp;
503         bp->ptp_cfg = ptp;
504
505         return 0;
506 }
507
508 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
509 {
510         int rc = 0;
511         struct hwrm_func_qcaps_input req = {.req_type = 0 };
512         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
513         uint16_t new_max_vfs;
514         uint32_t flags;
515         int i;
516
517         HWRM_PREP(req, FUNC_QCAPS);
518
519         req.fid = rte_cpu_to_le_16(0xffff);
520
521         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
522
523         HWRM_CHECK_RESULT();
524
525         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
526         flags = rte_le_to_cpu_32(resp->flags);
527         if (BNXT_PF(bp)) {
528                 bp->pf.port_id = resp->port_id;
529                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
530                 new_max_vfs = bp->pdev->max_vfs;
531                 if (new_max_vfs != bp->pf.max_vfs) {
532                         if (bp->pf.vf_info)
533                                 rte_free(bp->pf.vf_info);
534                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
535                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
536                         bp->pf.max_vfs = new_max_vfs;
537                         for (i = 0; i < new_max_vfs; i++) {
538                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
539                                 bp->pf.vf_info[i].vlan_table =
540                                         rte_zmalloc("VF VLAN table",
541                                                     getpagesize(),
542                                                     getpagesize());
543                                 if (bp->pf.vf_info[i].vlan_table == NULL)
544                                         PMD_DRV_LOG(ERR,
545                                         "Fail to alloc VLAN table for VF %d\n",
546                                         i);
547                                 else
548                                         rte_mem_lock_page(
549                                                 bp->pf.vf_info[i].vlan_table);
550                                 bp->pf.vf_info[i].vlan_as_table =
551                                         rte_zmalloc("VF VLAN AS table",
552                                                     getpagesize(),
553                                                     getpagesize());
554                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
555                                         PMD_DRV_LOG(ERR,
556                                         "Alloc VLAN AS table for VF %d fail\n",
557                                         i);
558                                 else
559                                         rte_mem_lock_page(
560                                                bp->pf.vf_info[i].vlan_as_table);
561                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
562                         }
563                 }
564         }
565
566         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
567         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
568         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
569         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
570         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
571         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
572         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
573         /* TODO: For now, do not support VMDq/RFS on VFs. */
574         if (BNXT_PF(bp)) {
575                 if (bp->pf.max_vfs)
576                         bp->max_vnics = 1;
577                 else
578                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
579         } else {
580                 bp->max_vnics = 1;
581         }
582         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
583         if (BNXT_PF(bp)) {
584                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
585                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
586                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
587                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
588                         HWRM_UNLOCK();
589                         bnxt_hwrm_ptp_qcfg(bp);
590                 }
591         }
592
593         HWRM_UNLOCK();
594
595         return rc;
596 }
597
598 int bnxt_hwrm_func_reset(struct bnxt *bp)
599 {
600         int rc = 0;
601         struct hwrm_func_reset_input req = {.req_type = 0 };
602         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
603
604         HWRM_PREP(req, FUNC_RESET);
605
606         req.enables = rte_cpu_to_le_32(0);
607
608         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
609
610         HWRM_CHECK_RESULT();
611         HWRM_UNLOCK();
612
613         return rc;
614 }
615
616 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
617 {
618         int rc;
619         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
620         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
621
622         if (bp->flags & BNXT_FLAG_REGISTERED)
623                 return 0;
624
625         HWRM_PREP(req, FUNC_DRV_RGTR);
626         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
627                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
628         req.ver_maj = RTE_VER_YEAR;
629         req.ver_min = RTE_VER_MONTH;
630         req.ver_upd = RTE_VER_MINOR;
631
632         if (BNXT_PF(bp)) {
633                 req.enables |= rte_cpu_to_le_32(
634                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
635                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
636                        RTE_MIN(sizeof(req.vf_req_fwd),
637                                sizeof(bp->pf.vf_req_fwd)));
638         }
639
640         req.async_event_fwd[0] |=
641                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
642                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
643                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
644         req.async_event_fwd[1] |=
645                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
646                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
647
648         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
649
650         HWRM_CHECK_RESULT();
651         HWRM_UNLOCK();
652
653         bp->flags |= BNXT_FLAG_REGISTERED;
654
655         return rc;
656 }
657
658 int bnxt_hwrm_ver_get(struct bnxt *bp)
659 {
660         int rc = 0;
661         struct hwrm_ver_get_input req = {.req_type = 0 };
662         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
663         uint32_t my_version;
664         uint32_t fw_version;
665         uint16_t max_resp_len;
666         char type[RTE_MEMZONE_NAMESIZE];
667         uint32_t dev_caps_cfg;
668
669         bp->max_req_len = HWRM_MAX_REQ_LEN;
670         HWRM_PREP(req, VER_GET);
671
672         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
673         req.hwrm_intf_min = HWRM_VERSION_MINOR;
674         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
675
676         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
677
678         HWRM_CHECK_RESULT();
679
680         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
681                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
682                 resp->hwrm_intf_upd,
683                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
684         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
685                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
686         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
687                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
688
689         my_version = HWRM_VERSION_MAJOR << 16;
690         my_version |= HWRM_VERSION_MINOR << 8;
691         my_version |= HWRM_VERSION_UPDATE;
692
693         fw_version = resp->hwrm_intf_maj << 16;
694         fw_version |= resp->hwrm_intf_min << 8;
695         fw_version |= resp->hwrm_intf_upd;
696
697         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
698                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
699                 rc = -EINVAL;
700                 goto error;
701         }
702
703         if (my_version != fw_version) {
704                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
705                 if (my_version < fw_version) {
706                         PMD_DRV_LOG(INFO,
707                                 "Firmware API version is newer than driver.\n");
708                         PMD_DRV_LOG(INFO,
709                                 "The driver may be missing features.\n");
710                 } else {
711                         PMD_DRV_LOG(INFO,
712                                 "Firmware API version is older than driver.\n");
713                         PMD_DRV_LOG(INFO,
714                                 "Not all driver features may be functional.\n");
715                 }
716         }
717
718         if (bp->max_req_len > resp->max_req_win_len) {
719                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
720                 rc = -EINVAL;
721         }
722         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
723         max_resp_len = resp->max_resp_len;
724         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
725
726         if (bp->max_resp_len != max_resp_len) {
727                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
728                         bp->pdev->addr.domain, bp->pdev->addr.bus,
729                         bp->pdev->addr.devid, bp->pdev->addr.function);
730
731                 rte_free(bp->hwrm_cmd_resp_addr);
732
733                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
734                 if (bp->hwrm_cmd_resp_addr == NULL) {
735                         rc = -ENOMEM;
736                         goto error;
737                 }
738                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
739                 bp->hwrm_cmd_resp_dma_addr =
740                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
741                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
742                         PMD_DRV_LOG(ERR,
743                         "Unable to map response buffer to physical memory.\n");
744                         rc = -ENOMEM;
745                         goto error;
746                 }
747                 bp->max_resp_len = max_resp_len;
748         }
749
750         if ((dev_caps_cfg &
751                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
752             (dev_caps_cfg &
753              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
754                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
755
756                 rte_free(bp->hwrm_short_cmd_req_addr);
757
758                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
759                                                         bp->max_req_len, 0);
760                 if (bp->hwrm_short_cmd_req_addr == NULL) {
761                         rc = -ENOMEM;
762                         goto error;
763                 }
764                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
765                 bp->hwrm_short_cmd_req_dma_addr =
766                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
767                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
768                         rte_free(bp->hwrm_short_cmd_req_addr);
769                         PMD_DRV_LOG(ERR,
770                                 "Unable to map buffer to physical memory.\n");
771                         rc = -ENOMEM;
772                         goto error;
773                 }
774
775                 bp->flags |= BNXT_FLAG_SHORT_CMD;
776         }
777
778 error:
779         HWRM_UNLOCK();
780         return rc;
781 }
782
783 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
784 {
785         int rc;
786         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
787         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
788
789         if (!(bp->flags & BNXT_FLAG_REGISTERED))
790                 return 0;
791
792         HWRM_PREP(req, FUNC_DRV_UNRGTR);
793         req.flags = flags;
794
795         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
796
797         HWRM_CHECK_RESULT();
798         HWRM_UNLOCK();
799
800         bp->flags &= ~BNXT_FLAG_REGISTERED;
801
802         return rc;
803 }
804
805 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
806 {
807         int rc = 0;
808         struct hwrm_port_phy_cfg_input req = {0};
809         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
810         uint32_t enables = 0;
811
812         HWRM_PREP(req, PORT_PHY_CFG);
813
814         if (conf->link_up) {
815                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
816                 if (bp->link_info.auto_mode && conf->link_speed) {
817                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
818                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
819                 }
820
821                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
822                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
823                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
824                 /*
825                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
826                  * any auto mode, even "none".
827                  */
828                 if (!conf->link_speed) {
829                         /* No speeds specified. Enable AutoNeg - all speeds */
830                         req.auto_mode =
831                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
832                 }
833                 /* AutoNeg - Advertise speeds specified. */
834                 if (conf->auto_link_speed_mask &&
835                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
836                         req.auto_mode =
837                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
838                         req.auto_link_speed_mask =
839                                 conf->auto_link_speed_mask;
840                         enables |=
841                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
842                 }
843
844                 req.auto_duplex = conf->duplex;
845                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
846                 req.auto_pause = conf->auto_pause;
847                 req.force_pause = conf->force_pause;
848                 /* Set force_pause if there is no auto or if there is a force */
849                 if (req.auto_pause && !req.force_pause)
850                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
851                 else
852                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
853
854                 req.enables = rte_cpu_to_le_32(enables);
855         } else {
856                 req.flags =
857                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
858                 PMD_DRV_LOG(INFO, "Force Link Down\n");
859         }
860
861         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
862
863         HWRM_CHECK_RESULT();
864         HWRM_UNLOCK();
865
866         return rc;
867 }
868
869 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
870                                    struct bnxt_link_info *link_info)
871 {
872         int rc = 0;
873         struct hwrm_port_phy_qcfg_input req = {0};
874         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
875
876         HWRM_PREP(req, PORT_PHY_QCFG);
877
878         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
879
880         HWRM_CHECK_RESULT();
881
882         link_info->phy_link_status = resp->link;
883         link_info->link_up =
884                 (link_info->phy_link_status ==
885                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
886         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
887         link_info->duplex = resp->duplex_cfg;
888         link_info->pause = resp->pause;
889         link_info->auto_pause = resp->auto_pause;
890         link_info->force_pause = resp->force_pause;
891         link_info->auto_mode = resp->auto_mode;
892         link_info->phy_type = resp->phy_type;
893         link_info->media_type = resp->media_type;
894
895         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
896         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
897         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
898         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
899         link_info->phy_ver[0] = resp->phy_maj;
900         link_info->phy_ver[1] = resp->phy_min;
901         link_info->phy_ver[2] = resp->phy_bld;
902
903         HWRM_UNLOCK();
904
905         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
906         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
907         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
908         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
909         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
910                     link_info->auto_link_speed_mask);
911         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
912                     link_info->force_link_speed);
913
914         return rc;
915 }
916
917 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
918 {
919         int rc = 0;
920         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
921         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
922
923         HWRM_PREP(req, QUEUE_QPORTCFG);
924
925         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
926
927         HWRM_CHECK_RESULT();
928
929 #define GET_QUEUE_INFO(x) \
930         bp->cos_queue[x].id = resp->queue_id##x; \
931         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
932
933         GET_QUEUE_INFO(0);
934         GET_QUEUE_INFO(1);
935         GET_QUEUE_INFO(2);
936         GET_QUEUE_INFO(3);
937         GET_QUEUE_INFO(4);
938         GET_QUEUE_INFO(5);
939         GET_QUEUE_INFO(6);
940         GET_QUEUE_INFO(7);
941
942         HWRM_UNLOCK();
943
944         return rc;
945 }
946
947 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
948                          struct bnxt_ring *ring,
949                          uint32_t ring_type, uint32_t map_index,
950                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
951 {
952         int rc = 0;
953         uint32_t enables = 0;
954         struct hwrm_ring_alloc_input req = {.req_type = 0 };
955         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
956
957         HWRM_PREP(req, RING_ALLOC);
958
959         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
960         req.fbo = rte_cpu_to_le_32(0);
961         /* Association of ring index with doorbell index */
962         req.logical_id = rte_cpu_to_le_16(map_index);
963         req.length = rte_cpu_to_le_32(ring->ring_size);
964
965         switch (ring_type) {
966         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
967                 req.queue_id = bp->cos_queue[0].id;
968                 /* FALLTHROUGH */
969         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
970                 req.ring_type = ring_type;
971                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
972                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
973                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
974                         enables |=
975                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
976                 break;
977         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
978                 req.ring_type = ring_type;
979                 /*
980                  * TODO: Some HWRM versions crash with
981                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
982                  */
983                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
984                 break;
985         default:
986                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
987                         ring_type);
988                 HWRM_UNLOCK();
989                 return -1;
990         }
991         req.enables = rte_cpu_to_le_32(enables);
992
993         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
994
995         if (rc || resp->error_code) {
996                 if (rc == 0 && resp->error_code)
997                         rc = rte_le_to_cpu_16(resp->error_code);
998                 switch (ring_type) {
999                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1000                         PMD_DRV_LOG(ERR,
1001                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1002                         HWRM_UNLOCK();
1003                         return rc;
1004                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1005                         PMD_DRV_LOG(ERR,
1006                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1007                         HWRM_UNLOCK();
1008                         return rc;
1009                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1010                         PMD_DRV_LOG(ERR,
1011                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1012                         HWRM_UNLOCK();
1013                         return rc;
1014                 default:
1015                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1016                         HWRM_UNLOCK();
1017                         return rc;
1018                 }
1019         }
1020
1021         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1022         HWRM_UNLOCK();
1023         return rc;
1024 }
1025
1026 int bnxt_hwrm_ring_free(struct bnxt *bp,
1027                         struct bnxt_ring *ring, uint32_t ring_type)
1028 {
1029         int rc;
1030         struct hwrm_ring_free_input req = {.req_type = 0 };
1031         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1032
1033         HWRM_PREP(req, RING_FREE);
1034
1035         req.ring_type = ring_type;
1036         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1037
1038         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1039
1040         if (rc || resp->error_code) {
1041                 if (rc == 0 && resp->error_code)
1042                         rc = rte_le_to_cpu_16(resp->error_code);
1043                 HWRM_UNLOCK();
1044
1045                 switch (ring_type) {
1046                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1047                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1048                                 rc);
1049                         return rc;
1050                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1051                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1052                                 rc);
1053                         return rc;
1054                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1055                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1056                                 rc);
1057                         return rc;
1058                 default:
1059                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1060                         return rc;
1061                 }
1062         }
1063         HWRM_UNLOCK();
1064         return 0;
1065 }
1066
1067 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1068 {
1069         int rc = 0;
1070         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1071         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1072
1073         HWRM_PREP(req, RING_GRP_ALLOC);
1074
1075         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1076         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1077         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1078         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1079
1080         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1081
1082         HWRM_CHECK_RESULT();
1083
1084         bp->grp_info[idx].fw_grp_id =
1085             rte_le_to_cpu_16(resp->ring_group_id);
1086
1087         HWRM_UNLOCK();
1088
1089         return rc;
1090 }
1091
1092 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1093 {
1094         int rc;
1095         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1096         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1097
1098         HWRM_PREP(req, RING_GRP_FREE);
1099
1100         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1101
1102         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1103
1104         HWRM_CHECK_RESULT();
1105         HWRM_UNLOCK();
1106
1107         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1108         return rc;
1109 }
1110
1111 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1112 {
1113         int rc = 0;
1114         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1115         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1116
1117         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1118                 return rc;
1119
1120         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1121
1122         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1123
1124         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1125
1126         HWRM_CHECK_RESULT();
1127         HWRM_UNLOCK();
1128
1129         return rc;
1130 }
1131
1132 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1133                                 unsigned int idx __rte_unused)
1134 {
1135         int rc;
1136         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1137         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1138
1139         HWRM_PREP(req, STAT_CTX_ALLOC);
1140
1141         req.update_period_ms = rte_cpu_to_le_32(0);
1142
1143         req.stats_dma_addr =
1144             rte_cpu_to_le_64(cpr->hw_stats_map);
1145
1146         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1147
1148         HWRM_CHECK_RESULT();
1149
1150         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1151
1152         HWRM_UNLOCK();
1153
1154         return rc;
1155 }
1156
1157 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1158                                 unsigned int idx __rte_unused)
1159 {
1160         int rc;
1161         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1162         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1163
1164         HWRM_PREP(req, STAT_CTX_FREE);
1165
1166         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1167
1168         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1169
1170         HWRM_CHECK_RESULT();
1171         HWRM_UNLOCK();
1172
1173         return rc;
1174 }
1175
1176 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1177 {
1178         int rc = 0, i, j;
1179         struct hwrm_vnic_alloc_input req = { 0 };
1180         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1181
1182         /* map ring groups to this vnic */
1183         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1184                 vnic->start_grp_id, vnic->end_grp_id);
1185         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1186                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1187         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1188         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1189         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1190         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1191         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1192                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1193         HWRM_PREP(req, VNIC_ALLOC);
1194
1195         if (vnic->func_default)
1196                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1197         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1198
1199         HWRM_CHECK_RESULT();
1200
1201         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1202         HWRM_UNLOCK();
1203         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1204         return rc;
1205 }
1206
1207 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1208                                         struct bnxt_vnic_info *vnic,
1209                                         struct bnxt_plcmodes_cfg *pmode)
1210 {
1211         int rc = 0;
1212         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1213         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1214
1215         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1216
1217         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1218
1219         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1220
1221         HWRM_CHECK_RESULT();
1222
1223         pmode->flags = rte_le_to_cpu_32(resp->flags);
1224         /* dflt_vnic bit doesn't exist in the _cfg command */
1225         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1226         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1227         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1228         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1229
1230         HWRM_UNLOCK();
1231
1232         return rc;
1233 }
1234
1235 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1236                                        struct bnxt_vnic_info *vnic,
1237                                        struct bnxt_plcmodes_cfg *pmode)
1238 {
1239         int rc = 0;
1240         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1241         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1242
1243         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1244
1245         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1246         req.flags = rte_cpu_to_le_32(pmode->flags);
1247         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1248         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1249         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1250         req.enables = rte_cpu_to_le_32(
1251             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1252             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1253             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1254         );
1255
1256         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1257
1258         HWRM_CHECK_RESULT();
1259         HWRM_UNLOCK();
1260
1261         return rc;
1262 }
1263
1264 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1265 {
1266         int rc = 0;
1267         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1268         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1269         uint32_t ctx_enable_flag = 0;
1270         struct bnxt_plcmodes_cfg pmodes;
1271
1272         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1273                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1274                 return rc;
1275         }
1276
1277         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1278         if (rc)
1279                 return rc;
1280
1281         HWRM_PREP(req, VNIC_CFG);
1282
1283         /* Only RSS support for now TBD: COS & LB */
1284         req.enables =
1285             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1286         if (vnic->lb_rule != 0xffff)
1287                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1288         if (vnic->cos_rule != 0xffff)
1289                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1290         if (vnic->rss_rule != 0xffff) {
1291                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1292                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1293         }
1294         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1295         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1296         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1297         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1298         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1299         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1300         req.mru = rte_cpu_to_le_16(vnic->mru);
1301         if (vnic->func_default)
1302                 req.flags |=
1303                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1304         if (vnic->vlan_strip)
1305                 req.flags |=
1306                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1307         if (vnic->bd_stall)
1308                 req.flags |=
1309                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1310         if (vnic->roce_dual)
1311                 req.flags |= rte_cpu_to_le_32(
1312                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1313         if (vnic->roce_only)
1314                 req.flags |= rte_cpu_to_le_32(
1315                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1316         if (vnic->rss_dflt_cr)
1317                 req.flags |= rte_cpu_to_le_32(
1318                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1319
1320         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1321
1322         HWRM_CHECK_RESULT();
1323         HWRM_UNLOCK();
1324
1325         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1326
1327         return rc;
1328 }
1329
1330 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1331                 int16_t fw_vf_id)
1332 {
1333         int rc = 0;
1334         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1335         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1336
1337         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1338                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1339                 return rc;
1340         }
1341         HWRM_PREP(req, VNIC_QCFG);
1342
1343         req.enables =
1344                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1345         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1346         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1347
1348         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1349
1350         HWRM_CHECK_RESULT();
1351
1352         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1353         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1354         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1355         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1356         vnic->mru = rte_le_to_cpu_16(resp->mru);
1357         vnic->func_default = rte_le_to_cpu_32(
1358                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1359         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1360                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1361         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1362                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1363         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1364                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1365         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1366                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1367         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1368                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1369
1370         HWRM_UNLOCK();
1371
1372         return rc;
1373 }
1374
1375 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1376 {
1377         int rc = 0;
1378         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1379         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1380                                                 bp->hwrm_cmd_resp_addr;
1381
1382         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1383
1384         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1385
1386         HWRM_CHECK_RESULT();
1387
1388         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1389         HWRM_UNLOCK();
1390         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1391
1392         return rc;
1393 }
1394
1395 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1396 {
1397         int rc = 0;
1398         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1399         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1400                                                 bp->hwrm_cmd_resp_addr;
1401
1402         if (vnic->rss_rule == 0xffff) {
1403                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1404                 return rc;
1405         }
1406         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1407
1408         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1409
1410         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1411
1412         HWRM_CHECK_RESULT();
1413         HWRM_UNLOCK();
1414
1415         vnic->rss_rule = INVALID_HW_RING_ID;
1416
1417         return rc;
1418 }
1419
1420 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1421 {
1422         int rc = 0;
1423         struct hwrm_vnic_free_input req = {.req_type = 0 };
1424         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1425
1426         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1427                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1428                 return rc;
1429         }
1430
1431         HWRM_PREP(req, VNIC_FREE);
1432
1433         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1434
1435         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1436
1437         HWRM_CHECK_RESULT();
1438         HWRM_UNLOCK();
1439
1440         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1441         return rc;
1442 }
1443
1444 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1445                            struct bnxt_vnic_info *vnic)
1446 {
1447         int rc = 0;
1448         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1449         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1450
1451         HWRM_PREP(req, VNIC_RSS_CFG);
1452
1453         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1454
1455         req.ring_grp_tbl_addr =
1456             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1457         req.hash_key_tbl_addr =
1458             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1459         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1460
1461         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1462
1463         HWRM_CHECK_RESULT();
1464         HWRM_UNLOCK();
1465
1466         return rc;
1467 }
1468
1469 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1470                         struct bnxt_vnic_info *vnic)
1471 {
1472         int rc = 0;
1473         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1474         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1475         uint16_t size;
1476
1477         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1478
1479         req.flags = rte_cpu_to_le_32(
1480                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1481
1482         req.enables = rte_cpu_to_le_32(
1483                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1484
1485         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1486         size -= RTE_PKTMBUF_HEADROOM;
1487
1488         req.jumbo_thresh = rte_cpu_to_le_16(size);
1489         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1490
1491         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1492
1493         HWRM_CHECK_RESULT();
1494         HWRM_UNLOCK();
1495
1496         return rc;
1497 }
1498
1499 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1500                         struct bnxt_vnic_info *vnic, bool enable)
1501 {
1502         int rc = 0;
1503         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1504         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1505
1506         HWRM_PREP(req, VNIC_TPA_CFG);
1507
1508         if (enable) {
1509                 req.enables = rte_cpu_to_le_32(
1510                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1511                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1512                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1513                 req.flags = rte_cpu_to_le_32(
1514                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1515                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1516                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1517                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1518                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1519                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1520                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1521                 req.max_agg_segs = rte_cpu_to_le_16(5);
1522                 req.max_aggs =
1523                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1524                 req.min_agg_len = rte_cpu_to_le_32(512);
1525         }
1526
1527         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1528
1529         HWRM_CHECK_RESULT();
1530         HWRM_UNLOCK();
1531
1532         return rc;
1533 }
1534
1535 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1536 {
1537         struct hwrm_func_cfg_input req = {0};
1538         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1539         int rc;
1540
1541         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1542         req.enables = rte_cpu_to_le_32(
1543                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1544         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1545         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1546
1547         HWRM_PREP(req, FUNC_CFG);
1548
1549         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1550         HWRM_CHECK_RESULT();
1551         HWRM_UNLOCK();
1552
1553         bp->pf.vf_info[vf].random_mac = false;
1554
1555         return rc;
1556 }
1557
1558 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1559                                   uint64_t *dropped)
1560 {
1561         int rc = 0;
1562         struct hwrm_func_qstats_input req = {.req_type = 0};
1563         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1564
1565         HWRM_PREP(req, FUNC_QSTATS);
1566
1567         req.fid = rte_cpu_to_le_16(fid);
1568
1569         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1570
1571         HWRM_CHECK_RESULT();
1572
1573         if (dropped)
1574                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1575
1576         HWRM_UNLOCK();
1577
1578         return rc;
1579 }
1580
1581 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1582                           struct rte_eth_stats *stats)
1583 {
1584         int rc = 0;
1585         struct hwrm_func_qstats_input req = {.req_type = 0};
1586         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1587
1588         HWRM_PREP(req, FUNC_QSTATS);
1589
1590         req.fid = rte_cpu_to_le_16(fid);
1591
1592         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1593
1594         HWRM_CHECK_RESULT();
1595
1596         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1597         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1598         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1599         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1600         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1601         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1602
1603         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1604         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1605         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1606         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1607         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1608         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1609
1610         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1611         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1612
1613         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1614
1615         HWRM_UNLOCK();
1616
1617         return rc;
1618 }
1619
1620 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1621 {
1622         int rc = 0;
1623         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1624         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1625
1626         HWRM_PREP(req, FUNC_CLR_STATS);
1627
1628         req.fid = rte_cpu_to_le_16(fid);
1629
1630         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1631
1632         HWRM_CHECK_RESULT();
1633         HWRM_UNLOCK();
1634
1635         return rc;
1636 }
1637
1638 /*
1639  * HWRM utility functions
1640  */
1641
1642 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1643 {
1644         unsigned int i;
1645         int rc = 0;
1646
1647         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1648                 struct bnxt_tx_queue *txq;
1649                 struct bnxt_rx_queue *rxq;
1650                 struct bnxt_cp_ring_info *cpr;
1651
1652                 if (i >= bp->rx_cp_nr_rings) {
1653                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1654                         cpr = txq->cp_ring;
1655                 } else {
1656                         rxq = bp->rx_queues[i];
1657                         cpr = rxq->cp_ring;
1658                 }
1659
1660                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1661                 if (rc)
1662                         return rc;
1663         }
1664         return 0;
1665 }
1666
1667 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1668 {
1669         int rc;
1670         unsigned int i;
1671         struct bnxt_cp_ring_info *cpr;
1672
1673         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1674
1675                 if (i >= bp->rx_cp_nr_rings) {
1676                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1677                 } else {
1678                         cpr = bp->rx_queues[i]->cp_ring;
1679                         bp->grp_info[i].fw_stats_ctx = -1;
1680                 }
1681                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1682                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1683                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1684                         if (rc)
1685                                 return rc;
1686                 }
1687         }
1688         return 0;
1689 }
1690
1691 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1692 {
1693         unsigned int i;
1694         int rc = 0;
1695
1696         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1697                 struct bnxt_tx_queue *txq;
1698                 struct bnxt_rx_queue *rxq;
1699                 struct bnxt_cp_ring_info *cpr;
1700
1701                 if (i >= bp->rx_cp_nr_rings) {
1702                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1703                         cpr = txq->cp_ring;
1704                 } else {
1705                         rxq = bp->rx_queues[i];
1706                         cpr = rxq->cp_ring;
1707                 }
1708
1709                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1710
1711                 if (rc)
1712                         return rc;
1713         }
1714         return rc;
1715 }
1716
1717 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1718 {
1719         uint16_t idx;
1720         uint32_t rc = 0;
1721
1722         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1723
1724                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1725                         continue;
1726
1727                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1728
1729                 if (rc)
1730                         return rc;
1731         }
1732         return rc;
1733 }
1734
1735 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1736                                 unsigned int idx __rte_unused)
1737 {
1738         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1739
1740         bnxt_hwrm_ring_free(bp, cp_ring,
1741                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1742         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1743         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1744                         sizeof(*cpr->cp_desc_ring));
1745         cpr->cp_raw_cons = 0;
1746 }
1747
1748 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1749 {
1750         unsigned int i;
1751         int rc = 0;
1752
1753         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1754                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1755                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1756                 struct bnxt_ring *ring = txr->tx_ring_struct;
1757                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1758                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1759
1760                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1761                         bnxt_hwrm_ring_free(bp, ring,
1762                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1763                         ring->fw_ring_id = INVALID_HW_RING_ID;
1764                         memset(txr->tx_desc_ring, 0,
1765                                         txr->tx_ring_struct->ring_size *
1766                                         sizeof(*txr->tx_desc_ring));
1767                         memset(txr->tx_buf_ring, 0,
1768                                         txr->tx_ring_struct->ring_size *
1769                                         sizeof(*txr->tx_buf_ring));
1770                         txr->tx_prod = 0;
1771                         txr->tx_cons = 0;
1772                 }
1773                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1774                         bnxt_free_cp_ring(bp, cpr, idx);
1775                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1776                 }
1777         }
1778
1779         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1780                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1781                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1782                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1783                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1784                 unsigned int idx = i + 1;
1785
1786                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1787                         bnxt_hwrm_ring_free(bp, ring,
1788                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1789                         ring->fw_ring_id = INVALID_HW_RING_ID;
1790                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1791                         memset(rxr->rx_desc_ring, 0,
1792                                         rxr->rx_ring_struct->ring_size *
1793                                         sizeof(*rxr->rx_desc_ring));
1794                         memset(rxr->rx_buf_ring, 0,
1795                                         rxr->rx_ring_struct->ring_size *
1796                                         sizeof(*rxr->rx_buf_ring));
1797                         rxr->rx_prod = 0;
1798                 }
1799                 ring = rxr->ag_ring_struct;
1800                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1801                         bnxt_hwrm_ring_free(bp, ring,
1802                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1803                         ring->fw_ring_id = INVALID_HW_RING_ID;
1804                         memset(rxr->ag_buf_ring, 0,
1805                                rxr->ag_ring_struct->ring_size *
1806                                sizeof(*rxr->ag_buf_ring));
1807                         rxr->ag_prod = 0;
1808                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1809                 }
1810                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1811                         bnxt_free_cp_ring(bp, cpr, idx);
1812                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1813                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1814                 }
1815         }
1816
1817         /* Default completion ring */
1818         {
1819                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1820
1821                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1822                         bnxt_free_cp_ring(bp, cpr, 0);
1823                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1824                 }
1825         }
1826
1827         return rc;
1828 }
1829
1830 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1831 {
1832         uint16_t i;
1833         uint32_t rc = 0;
1834
1835         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1836                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1837                 if (rc)
1838                         return rc;
1839         }
1840         return rc;
1841 }
1842
1843 void bnxt_free_hwrm_resources(struct bnxt *bp)
1844 {
1845         /* Release memzone */
1846         rte_free(bp->hwrm_cmd_resp_addr);
1847         rte_free(bp->hwrm_short_cmd_req_addr);
1848         bp->hwrm_cmd_resp_addr = NULL;
1849         bp->hwrm_short_cmd_req_addr = NULL;
1850         bp->hwrm_cmd_resp_dma_addr = 0;
1851         bp->hwrm_short_cmd_req_dma_addr = 0;
1852 }
1853
1854 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1855 {
1856         struct rte_pci_device *pdev = bp->pdev;
1857         char type[RTE_MEMZONE_NAMESIZE];
1858
1859         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1860                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1861         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1862         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1863         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1864         if (bp->hwrm_cmd_resp_addr == NULL)
1865                 return -ENOMEM;
1866         bp->hwrm_cmd_resp_dma_addr =
1867                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1868         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1869                 PMD_DRV_LOG(ERR,
1870                         "unable to map response address to physical memory\n");
1871                 return -ENOMEM;
1872         }
1873         rte_spinlock_init(&bp->hwrm_lock);
1874
1875         return 0;
1876 }
1877
1878 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1879 {
1880         struct bnxt_filter_info *filter;
1881         int rc = 0;
1882
1883         STAILQ_FOREACH(filter, &vnic->filter, next) {
1884                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1885                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1886                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1887                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1888                 else
1889                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1890                 //if (rc)
1891                         //break;
1892         }
1893         return rc;
1894 }
1895
1896 static int
1897 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1898 {
1899         struct bnxt_filter_info *filter;
1900         struct rte_flow *flow;
1901         int rc = 0;
1902
1903         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1904                 filter = flow->filter;
1905                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1906                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1907                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1908                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1909                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1910                 else
1911                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1912
1913                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1914                 rte_free(flow);
1915                 //if (rc)
1916                         //break;
1917         }
1918         return rc;
1919 }
1920
1921 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1922 {
1923         struct bnxt_filter_info *filter;
1924         int rc = 0;
1925
1926         STAILQ_FOREACH(filter, &vnic->filter, next) {
1927                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1928                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1929                                                      filter);
1930                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1931                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1932                                                          filter);
1933                 else
1934                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1935                                                      filter);
1936                 if (rc)
1937                         break;
1938         }
1939         return rc;
1940 }
1941
1942 void bnxt_free_tunnel_ports(struct bnxt *bp)
1943 {
1944         if (bp->vxlan_port_cnt)
1945                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1946                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1947         bp->vxlan_port = 0;
1948         if (bp->geneve_port_cnt)
1949                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1950                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1951         bp->geneve_port = 0;
1952 }
1953
1954 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1955 {
1956         int i;
1957
1958         if (bp->vnic_info == NULL)
1959                 return;
1960
1961         /*
1962          * Cleanup VNICs in reverse order, to make sure the L2 filter
1963          * from vnic0 is last to be cleaned up.
1964          */
1965         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1966                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1967
1968                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1969
1970                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1971
1972                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1973
1974                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1975
1976                 bnxt_hwrm_vnic_free(bp, vnic);
1977         }
1978         /* Ring resources */
1979         bnxt_free_all_hwrm_rings(bp);
1980         bnxt_free_all_hwrm_ring_grps(bp);
1981         bnxt_free_all_hwrm_stat_ctxs(bp);
1982         bnxt_free_tunnel_ports(bp);
1983 }
1984
1985 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1986 {
1987         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1988
1989         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1990                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1991
1992         switch (conf_link_speed) {
1993         case ETH_LINK_SPEED_10M_HD:
1994         case ETH_LINK_SPEED_100M_HD:
1995                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1996         }
1997         return hw_link_duplex;
1998 }
1999
2000 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2001 {
2002         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2003 }
2004
2005 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2006 {
2007         uint16_t eth_link_speed = 0;
2008
2009         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2010                 return ETH_LINK_SPEED_AUTONEG;
2011
2012         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2013         case ETH_LINK_SPEED_100M:
2014         case ETH_LINK_SPEED_100M_HD:
2015                 eth_link_speed =
2016                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2017                 break;
2018         case ETH_LINK_SPEED_1G:
2019                 eth_link_speed =
2020                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2021                 break;
2022         case ETH_LINK_SPEED_2_5G:
2023                 eth_link_speed =
2024                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2025                 break;
2026         case ETH_LINK_SPEED_10G:
2027                 eth_link_speed =
2028                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2029                 break;
2030         case ETH_LINK_SPEED_20G:
2031                 eth_link_speed =
2032                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2033                 break;
2034         case ETH_LINK_SPEED_25G:
2035                 eth_link_speed =
2036                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2037                 break;
2038         case ETH_LINK_SPEED_40G:
2039                 eth_link_speed =
2040                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2041                 break;
2042         case ETH_LINK_SPEED_50G:
2043                 eth_link_speed =
2044                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2045                 break;
2046         case ETH_LINK_SPEED_100G:
2047                 eth_link_speed =
2048                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2049                 break;
2050         default:
2051                 PMD_DRV_LOG(ERR,
2052                         "Unsupported link speed %d; default to AUTO\n",
2053                         conf_link_speed);
2054                 break;
2055         }
2056         return eth_link_speed;
2057 }
2058
2059 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2060                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2061                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2062                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2063
2064 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2065 {
2066         uint32_t one_speed;
2067
2068         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2069                 return 0;
2070
2071         if (link_speed & ETH_LINK_SPEED_FIXED) {
2072                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2073
2074                 if (one_speed & (one_speed - 1)) {
2075                         PMD_DRV_LOG(ERR,
2076                                 "Invalid advertised speeds (%u) for port %u\n",
2077                                 link_speed, port_id);
2078                         return -EINVAL;
2079                 }
2080                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2081                         PMD_DRV_LOG(ERR,
2082                                 "Unsupported advertised speed (%u) for port %u\n",
2083                                 link_speed, port_id);
2084                         return -EINVAL;
2085                 }
2086         } else {
2087                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2088                         PMD_DRV_LOG(ERR,
2089                                 "Unsupported advertised speeds (%u) for port %u\n",
2090                                 link_speed, port_id);
2091                         return -EINVAL;
2092                 }
2093         }
2094         return 0;
2095 }
2096
2097 static uint16_t
2098 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2099 {
2100         uint16_t ret = 0;
2101
2102         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2103                 if (bp->link_info.support_speeds)
2104                         return bp->link_info.support_speeds;
2105                 link_speed = BNXT_SUPPORTED_SPEEDS;
2106         }
2107
2108         if (link_speed & ETH_LINK_SPEED_100M)
2109                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2110         if (link_speed & ETH_LINK_SPEED_100M_HD)
2111                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2112         if (link_speed & ETH_LINK_SPEED_1G)
2113                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2114         if (link_speed & ETH_LINK_SPEED_2_5G)
2115                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2116         if (link_speed & ETH_LINK_SPEED_10G)
2117                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2118         if (link_speed & ETH_LINK_SPEED_20G)
2119                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2120         if (link_speed & ETH_LINK_SPEED_25G)
2121                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2122         if (link_speed & ETH_LINK_SPEED_40G)
2123                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2124         if (link_speed & ETH_LINK_SPEED_50G)
2125                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2126         if (link_speed & ETH_LINK_SPEED_100G)
2127                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2128         return ret;
2129 }
2130
2131 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2132 {
2133         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2134
2135         switch (hw_link_speed) {
2136         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2137                 eth_link_speed = ETH_SPEED_NUM_100M;
2138                 break;
2139         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2140                 eth_link_speed = ETH_SPEED_NUM_1G;
2141                 break;
2142         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2143                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2144                 break;
2145         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2146                 eth_link_speed = ETH_SPEED_NUM_10G;
2147                 break;
2148         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2149                 eth_link_speed = ETH_SPEED_NUM_20G;
2150                 break;
2151         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2152                 eth_link_speed = ETH_SPEED_NUM_25G;
2153                 break;
2154         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2155                 eth_link_speed = ETH_SPEED_NUM_40G;
2156                 break;
2157         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2158                 eth_link_speed = ETH_SPEED_NUM_50G;
2159                 break;
2160         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2161                 eth_link_speed = ETH_SPEED_NUM_100G;
2162                 break;
2163         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2164         default:
2165                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2166                         hw_link_speed);
2167                 break;
2168         }
2169         return eth_link_speed;
2170 }
2171
2172 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2173 {
2174         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2175
2176         switch (hw_link_duplex) {
2177         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2178         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2179                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2180                 break;
2181         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2182                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2183                 break;
2184         default:
2185                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2186                         hw_link_duplex);
2187                 break;
2188         }
2189         return eth_link_duplex;
2190 }
2191
2192 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2193 {
2194         int rc = 0;
2195         struct bnxt_link_info *link_info = &bp->link_info;
2196
2197         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2198         if (rc) {
2199                 PMD_DRV_LOG(ERR,
2200                         "Get link config failed with rc %d\n", rc);
2201                 goto exit;
2202         }
2203         if (link_info->link_speed)
2204                 link->link_speed =
2205                         bnxt_parse_hw_link_speed(link_info->link_speed);
2206         else
2207                 link->link_speed = ETH_SPEED_NUM_NONE;
2208         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2209         link->link_status = link_info->link_up;
2210         link->link_autoneg = link_info->auto_mode ==
2211                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2212                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2213 exit:
2214         return rc;
2215 }
2216
2217 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2218 {
2219         int rc = 0;
2220         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2221         struct bnxt_link_info link_req;
2222         uint16_t speed, autoneg;
2223
2224         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2225                 return 0;
2226
2227         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2228                         bp->eth_dev->data->port_id);
2229         if (rc)
2230                 goto error;
2231
2232         memset(&link_req, 0, sizeof(link_req));
2233         link_req.link_up = link_up;
2234         if (!link_up)
2235                 goto port_phy_cfg;
2236
2237         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2238         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2239         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2240         /* Autoneg can be done only when the FW allows */
2241         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2242                                 bp->link_info.force_link_speed)) {
2243                 link_req.phy_flags |=
2244                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2245                 link_req.auto_link_speed_mask =
2246                         bnxt_parse_eth_link_speed_mask(bp,
2247                                                        dev_conf->link_speeds);
2248         } else {
2249                 if (bp->link_info.phy_type ==
2250                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2251                     bp->link_info.phy_type ==
2252                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2253                     bp->link_info.media_type ==
2254                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2255                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2256                         return -EINVAL;
2257                 }
2258
2259                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2260                 /* If user wants a particular speed try that first. */
2261                 if (speed)
2262                         link_req.link_speed = speed;
2263                 else if (bp->link_info.force_link_speed)
2264                         link_req.link_speed = bp->link_info.force_link_speed;
2265                 else
2266                         link_req.link_speed = bp->link_info.auto_link_speed;
2267         }
2268         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2269         link_req.auto_pause = bp->link_info.auto_pause;
2270         link_req.force_pause = bp->link_info.force_pause;
2271
2272 port_phy_cfg:
2273         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2274         if (rc) {
2275                 PMD_DRV_LOG(ERR,
2276                         "Set link config failed with rc %d\n", rc);
2277         }
2278
2279 error:
2280         return rc;
2281 }
2282
2283 /* JIRA 22088 */
2284 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2285 {
2286         struct hwrm_func_qcfg_input req = {0};
2287         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2288         uint16_t flags;
2289         int rc = 0;
2290
2291         HWRM_PREP(req, FUNC_QCFG);
2292         req.fid = rte_cpu_to_le_16(0xffff);
2293
2294         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2295
2296         HWRM_CHECK_RESULT();
2297
2298         /* Hard Coded.. 0xfff VLAN ID mask */
2299         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2300         flags = rte_le_to_cpu_16(resp->flags);
2301         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2302                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2303
2304         switch (resp->port_partition_type) {
2305         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2306         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2307         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2308                 bp->port_partition_type = resp->port_partition_type;
2309                 break;
2310         default:
2311                 bp->port_partition_type = 0;
2312                 break;
2313         }
2314
2315         HWRM_UNLOCK();
2316
2317         return rc;
2318 }
2319
2320 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2321                                    struct hwrm_func_qcaps_output *qcaps)
2322 {
2323         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2324         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2325                sizeof(qcaps->mac_address));
2326         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2327         qcaps->max_rx_rings = fcfg->num_rx_rings;
2328         qcaps->max_tx_rings = fcfg->num_tx_rings;
2329         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2330         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2331         qcaps->max_vfs = 0;
2332         qcaps->first_vf_id = 0;
2333         qcaps->max_vnics = fcfg->num_vnics;
2334         qcaps->max_decap_records = 0;
2335         qcaps->max_encap_records = 0;
2336         qcaps->max_tx_wm_flows = 0;
2337         qcaps->max_tx_em_flows = 0;
2338         qcaps->max_rx_wm_flows = 0;
2339         qcaps->max_rx_em_flows = 0;
2340         qcaps->max_flow_id = 0;
2341         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2342         qcaps->max_sp_tx_rings = 0;
2343         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2344 }
2345
2346 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2347 {
2348         struct hwrm_func_cfg_input req = {0};
2349         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2350         int rc;
2351
2352         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2353                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2354                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2355                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2356                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2357                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2358                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2359                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2360                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2361                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2362         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2363         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2364         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2365                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2366         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2367         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2368         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2369         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2370         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2371         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2372         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2373         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2374         req.fid = rte_cpu_to_le_16(0xffff);
2375
2376         HWRM_PREP(req, FUNC_CFG);
2377
2378         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2379
2380         HWRM_CHECK_RESULT();
2381         HWRM_UNLOCK();
2382
2383         return rc;
2384 }
2385
2386 static void populate_vf_func_cfg_req(struct bnxt *bp,
2387                                      struct hwrm_func_cfg_input *req,
2388                                      int num_vfs)
2389 {
2390         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2391                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2392                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2393                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2394                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2395                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2396                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2397                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2398                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2399                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2400
2401         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2402                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2403         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2404                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2405         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2406                                                 (num_vfs + 1));
2407         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2408         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2409                                                (num_vfs + 1));
2410         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2411         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2412         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2413         /* TODO: For now, do not support VMDq/RFS on VFs. */
2414         req->num_vnics = rte_cpu_to_le_16(1);
2415         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2416                                                  (num_vfs + 1));
2417 }
2418
2419 static void add_random_mac_if_needed(struct bnxt *bp,
2420                                      struct hwrm_func_cfg_input *cfg_req,
2421                                      int vf)
2422 {
2423         struct ether_addr mac;
2424
2425         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2426                 return;
2427
2428         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2429                 cfg_req->enables |=
2430                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2431                 eth_random_addr(cfg_req->dflt_mac_addr);
2432                 bp->pf.vf_info[vf].random_mac = true;
2433         } else {
2434                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2435         }
2436 }
2437
2438 static void reserve_resources_from_vf(struct bnxt *bp,
2439                                       struct hwrm_func_cfg_input *cfg_req,
2440                                       int vf)
2441 {
2442         struct hwrm_func_qcaps_input req = {0};
2443         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2444         int rc;
2445
2446         /* Get the actual allocated values now */
2447         HWRM_PREP(req, FUNC_QCAPS);
2448         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2449         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2450
2451         if (rc) {
2452                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2453                 copy_func_cfg_to_qcaps(cfg_req, resp);
2454         } else if (resp->error_code) {
2455                 rc = rte_le_to_cpu_16(resp->error_code);
2456                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2457                 copy_func_cfg_to_qcaps(cfg_req, resp);
2458         }
2459
2460         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2461         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2462         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2463         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2464         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2465         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2466         /*
2467          * TODO: While not supporting VMDq with VFs, max_vnics is always
2468          * forced to 1 in this case
2469          */
2470         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2471         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2472
2473         HWRM_UNLOCK();
2474 }
2475
2476 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2477 {
2478         struct hwrm_func_qcfg_input req = {0};
2479         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2480         int rc;
2481
2482         /* Check for zero MAC address */
2483         HWRM_PREP(req, FUNC_QCFG);
2484         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2485         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2486         if (rc) {
2487                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2488                 return -1;
2489         } else if (resp->error_code) {
2490                 rc = rte_le_to_cpu_16(resp->error_code);
2491                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2492                 return -1;
2493         }
2494         rc = rte_le_to_cpu_16(resp->vlan);
2495
2496         HWRM_UNLOCK();
2497
2498         return rc;
2499 }
2500
2501 static int update_pf_resource_max(struct bnxt *bp)
2502 {
2503         struct hwrm_func_qcfg_input req = {0};
2504         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2505         int rc;
2506
2507         /* And copy the allocated numbers into the pf struct */
2508         HWRM_PREP(req, FUNC_QCFG);
2509         req.fid = rte_cpu_to_le_16(0xffff);
2510         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2511         HWRM_CHECK_RESULT();
2512
2513         /* Only TX ring value reflects actual allocation? TODO */
2514         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2515         bp->pf.evb_mode = resp->evb_mode;
2516
2517         HWRM_UNLOCK();
2518
2519         return rc;
2520 }
2521
2522 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2523 {
2524         int rc;
2525
2526         if (!BNXT_PF(bp)) {
2527                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2528                 return -1;
2529         }
2530
2531         rc = bnxt_hwrm_func_qcaps(bp);
2532         if (rc)
2533                 return rc;
2534
2535         bp->pf.func_cfg_flags &=
2536                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2537                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2538         bp->pf.func_cfg_flags |=
2539                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2540         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2541         return rc;
2542 }
2543
2544 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2545 {
2546         struct hwrm_func_cfg_input req = {0};
2547         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2548         int i;
2549         size_t sz;
2550         int rc = 0;
2551         size_t req_buf_sz;
2552
2553         if (!BNXT_PF(bp)) {
2554                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2555                 return -1;
2556         }
2557
2558         rc = bnxt_hwrm_func_qcaps(bp);
2559
2560         if (rc)
2561                 return rc;
2562
2563         bp->pf.active_vfs = num_vfs;
2564
2565         /*
2566          * First, configure the PF to only use one TX ring.  This ensures that
2567          * there are enough rings for all VFs.
2568          *
2569          * If we don't do this, when we call func_alloc() later, we will lock
2570          * extra rings to the PF that won't be available during func_cfg() of
2571          * the VFs.
2572          *
2573          * This has been fixed with firmware versions above 20.6.54
2574          */
2575         bp->pf.func_cfg_flags &=
2576                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2577                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2578         bp->pf.func_cfg_flags |=
2579                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2580         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2581         if (rc)
2582                 return rc;
2583
2584         /*
2585          * Now, create and register a buffer to hold forwarded VF requests
2586          */
2587         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2588         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2589                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2590         if (bp->pf.vf_req_buf == NULL) {
2591                 rc = -ENOMEM;
2592                 goto error_free;
2593         }
2594         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2595                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2596         for (i = 0; i < num_vfs; i++)
2597                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2598                                         (i * HWRM_MAX_REQ_LEN);
2599
2600         rc = bnxt_hwrm_func_buf_rgtr(bp);
2601         if (rc)
2602                 goto error_free;
2603
2604         populate_vf_func_cfg_req(bp, &req, num_vfs);
2605
2606         bp->pf.active_vfs = 0;
2607         for (i = 0; i < num_vfs; i++) {
2608                 add_random_mac_if_needed(bp, &req, i);
2609
2610                 HWRM_PREP(req, FUNC_CFG);
2611                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2612                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2613                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2614
2615                 /* Clear enable flag for next pass */
2616                 req.enables &= ~rte_cpu_to_le_32(
2617                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2618
2619                 if (rc || resp->error_code) {
2620                         PMD_DRV_LOG(ERR,
2621                                 "Failed to initizlie VF %d\n", i);
2622                         PMD_DRV_LOG(ERR,
2623                                 "Not all VFs available. (%d, %d)\n",
2624                                 rc, resp->error_code);
2625                         HWRM_UNLOCK();
2626                         break;
2627                 }
2628
2629                 HWRM_UNLOCK();
2630
2631                 reserve_resources_from_vf(bp, &req, i);
2632                 bp->pf.active_vfs++;
2633                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2634         }
2635
2636         /*
2637          * Now configure the PF to use "the rest" of the resources
2638          * We're using STD_TX_RING_MODE here though which will limit the TX
2639          * rings.  This will allow QoS to function properly.  Not setting this
2640          * will cause PF rings to break bandwidth settings.
2641          */
2642         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2643         if (rc)
2644                 goto error_free;
2645
2646         rc = update_pf_resource_max(bp);
2647         if (rc)
2648                 goto error_free;
2649
2650         return rc;
2651
2652 error_free:
2653         bnxt_hwrm_func_buf_unrgtr(bp);
2654         return rc;
2655 }
2656
2657 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2658 {
2659         struct hwrm_func_cfg_input req = {0};
2660         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2661         int rc;
2662
2663         HWRM_PREP(req, FUNC_CFG);
2664
2665         req.fid = rte_cpu_to_le_16(0xffff);
2666         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2667         req.evb_mode = bp->pf.evb_mode;
2668
2669         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2670         HWRM_CHECK_RESULT();
2671         HWRM_UNLOCK();
2672
2673         return rc;
2674 }
2675
2676 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2677                                 uint8_t tunnel_type)
2678 {
2679         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2680         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2681         int rc = 0;
2682
2683         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2684         req.tunnel_type = tunnel_type;
2685         req.tunnel_dst_port_val = port;
2686         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2687         HWRM_CHECK_RESULT();
2688
2689         switch (tunnel_type) {
2690         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2691                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2692                 bp->vxlan_port = port;
2693                 break;
2694         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2695                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2696                 bp->geneve_port = port;
2697                 break;
2698         default:
2699                 break;
2700         }
2701
2702         HWRM_UNLOCK();
2703
2704         return rc;
2705 }
2706
2707 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2708                                 uint8_t tunnel_type)
2709 {
2710         struct hwrm_tunnel_dst_port_free_input req = {0};
2711         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2712         int rc = 0;
2713
2714         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2715
2716         req.tunnel_type = tunnel_type;
2717         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2718         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2719
2720         HWRM_CHECK_RESULT();
2721         HWRM_UNLOCK();
2722
2723         return rc;
2724 }
2725
2726 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2727                                         uint32_t flags)
2728 {
2729         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2730         struct hwrm_func_cfg_input req = {0};
2731         int rc;
2732
2733         HWRM_PREP(req, FUNC_CFG);
2734
2735         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2736         req.flags = rte_cpu_to_le_32(flags);
2737         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2738
2739         HWRM_CHECK_RESULT();
2740         HWRM_UNLOCK();
2741
2742         return rc;
2743 }
2744
2745 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2746 {
2747         uint32_t *flag = flagp;
2748
2749         vnic->flags = *flag;
2750 }
2751
2752 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2753 {
2754         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2755 }
2756
2757 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2758 {
2759         int rc = 0;
2760         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2761         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2762
2763         HWRM_PREP(req, FUNC_BUF_RGTR);
2764
2765         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2766         req.req_buf_page_size = rte_cpu_to_le_16(
2767                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2768         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2769         req.req_buf_page_addr[0] =
2770                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2771         if (req.req_buf_page_addr[0] == 0) {
2772                 PMD_DRV_LOG(ERR,
2773                         "unable to map buffer address to physical memory\n");
2774                 return -ENOMEM;
2775         }
2776
2777         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2778
2779         HWRM_CHECK_RESULT();
2780         HWRM_UNLOCK();
2781
2782         return rc;
2783 }
2784
2785 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2786 {
2787         int rc = 0;
2788         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2789         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2790
2791         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2792
2793         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2794
2795         HWRM_CHECK_RESULT();
2796         HWRM_UNLOCK();
2797
2798         return rc;
2799 }
2800
2801 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2802 {
2803         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2804         struct hwrm_func_cfg_input req = {0};
2805         int rc;
2806
2807         HWRM_PREP(req, FUNC_CFG);
2808
2809         req.fid = rte_cpu_to_le_16(0xffff);
2810         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2811         req.enables = rte_cpu_to_le_32(
2812                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2813         req.async_event_cr = rte_cpu_to_le_16(
2814                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2815         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2816
2817         HWRM_CHECK_RESULT();
2818         HWRM_UNLOCK();
2819
2820         return rc;
2821 }
2822
2823 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2824 {
2825         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2826         struct hwrm_func_vf_cfg_input req = {0};
2827         int rc;
2828
2829         HWRM_PREP(req, FUNC_VF_CFG);
2830
2831         req.enables = rte_cpu_to_le_32(
2832                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2833         req.async_event_cr = rte_cpu_to_le_16(
2834                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2835         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2836
2837         HWRM_CHECK_RESULT();
2838         HWRM_UNLOCK();
2839
2840         return rc;
2841 }
2842
2843 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2844 {
2845         struct hwrm_func_cfg_input req = {0};
2846         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2847         uint16_t dflt_vlan, fid;
2848         uint32_t func_cfg_flags;
2849         int rc = 0;
2850
2851         HWRM_PREP(req, FUNC_CFG);
2852
2853         if (is_vf) {
2854                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2855                 fid = bp->pf.vf_info[vf].fid;
2856                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2857         } else {
2858                 fid = rte_cpu_to_le_16(0xffff);
2859                 func_cfg_flags = bp->pf.func_cfg_flags;
2860                 dflt_vlan = bp->vlan;
2861         }
2862
2863         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2864         req.fid = rte_cpu_to_le_16(fid);
2865         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2866         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2867
2868         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2869
2870         HWRM_CHECK_RESULT();
2871         HWRM_UNLOCK();
2872
2873         return rc;
2874 }
2875
2876 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2877                         uint16_t max_bw, uint16_t enables)
2878 {
2879         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2880         struct hwrm_func_cfg_input req = {0};
2881         int rc;
2882
2883         HWRM_PREP(req, FUNC_CFG);
2884
2885         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2886         req.enables |= rte_cpu_to_le_32(enables);
2887         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2888         req.max_bw = rte_cpu_to_le_32(max_bw);
2889         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2890
2891         HWRM_CHECK_RESULT();
2892         HWRM_UNLOCK();
2893
2894         return rc;
2895 }
2896
2897 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2898 {
2899         struct hwrm_func_cfg_input req = {0};
2900         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2901         int rc = 0;
2902
2903         HWRM_PREP(req, FUNC_CFG);
2904
2905         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2906         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2907         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2908         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2909
2910         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2911
2912         HWRM_CHECK_RESULT();
2913         HWRM_UNLOCK();
2914
2915         return rc;
2916 }
2917
2918 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2919                               void *encaped, size_t ec_size)
2920 {
2921         int rc = 0;
2922         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2923         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2924
2925         if (ec_size > sizeof(req.encap_request))
2926                 return -1;
2927
2928         HWRM_PREP(req, REJECT_FWD_RESP);
2929
2930         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2931         memcpy(req.encap_request, encaped, ec_size);
2932
2933         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2934
2935         HWRM_CHECK_RESULT();
2936         HWRM_UNLOCK();
2937
2938         return rc;
2939 }
2940
2941 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2942                                        struct ether_addr *mac)
2943 {
2944         struct hwrm_func_qcfg_input req = {0};
2945         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2946         int rc;
2947
2948         HWRM_PREP(req, FUNC_QCFG);
2949
2950         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2951         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2952
2953         HWRM_CHECK_RESULT();
2954
2955         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2956
2957         HWRM_UNLOCK();
2958
2959         return rc;
2960 }
2961
2962 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2963                             void *encaped, size_t ec_size)
2964 {
2965         int rc = 0;
2966         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2967         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2968
2969         if (ec_size > sizeof(req.encap_request))
2970                 return -1;
2971
2972         HWRM_PREP(req, EXEC_FWD_RESP);
2973
2974         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2975         memcpy(req.encap_request, encaped, ec_size);
2976
2977         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2978
2979         HWRM_CHECK_RESULT();
2980         HWRM_UNLOCK();
2981
2982         return rc;
2983 }
2984
2985 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2986                          struct rte_eth_stats *stats, uint8_t rx)
2987 {
2988         int rc = 0;
2989         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2990         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2991
2992         HWRM_PREP(req, STAT_CTX_QUERY);
2993
2994         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2995
2996         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2997
2998         HWRM_CHECK_RESULT();
2999
3000         if (rx) {
3001                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3002                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3003                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3004                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3005                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3006                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3007                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3008                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3009         } else {
3010                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3011                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3012                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3013                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3014                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3015                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3016                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3017         }
3018
3019
3020         HWRM_UNLOCK();
3021
3022         return rc;
3023 }
3024
3025 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3026 {
3027         struct hwrm_port_qstats_input req = {0};
3028         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3029         struct bnxt_pf_info *pf = &bp->pf;
3030         int rc;
3031
3032         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3033                 return 0;
3034
3035         HWRM_PREP(req, PORT_QSTATS);
3036
3037         req.port_id = rte_cpu_to_le_16(pf->port_id);
3038         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3039         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3040         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3041
3042         HWRM_CHECK_RESULT();
3043         HWRM_UNLOCK();
3044
3045         return rc;
3046 }
3047
3048 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3049 {
3050         struct hwrm_port_clr_stats_input req = {0};
3051         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3052         struct bnxt_pf_info *pf = &bp->pf;
3053         int rc;
3054
3055         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3056                 return 0;
3057
3058         HWRM_PREP(req, PORT_CLR_STATS);
3059
3060         req.port_id = rte_cpu_to_le_16(pf->port_id);
3061         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3062
3063         HWRM_CHECK_RESULT();
3064         HWRM_UNLOCK();
3065
3066         return rc;
3067 }
3068
3069 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3070 {
3071         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3072         struct hwrm_port_led_qcaps_input req = {0};
3073         int rc;
3074
3075         if (BNXT_VF(bp))
3076                 return 0;
3077
3078         HWRM_PREP(req, PORT_LED_QCAPS);
3079         req.port_id = bp->pf.port_id;
3080         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3081
3082         HWRM_CHECK_RESULT();
3083
3084         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3085                 unsigned int i;
3086
3087                 bp->num_leds = resp->num_leds;
3088                 memcpy(bp->leds, &resp->led0_id,
3089                         sizeof(bp->leds[0]) * bp->num_leds);
3090                 for (i = 0; i < bp->num_leds; i++) {
3091                         struct bnxt_led_info *led = &bp->leds[i];
3092
3093                         uint16_t caps = led->led_state_caps;
3094
3095                         if (!led->led_group_id ||
3096                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3097                                 bp->num_leds = 0;
3098                                 break;
3099                         }
3100                 }
3101         }
3102
3103         HWRM_UNLOCK();
3104
3105         return rc;
3106 }
3107
3108 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3109 {
3110         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3111         struct hwrm_port_led_cfg_input req = {0};
3112         struct bnxt_led_cfg *led_cfg;
3113         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3114         uint16_t duration = 0;
3115         int rc, i;
3116
3117         if (!bp->num_leds || BNXT_VF(bp))
3118                 return -EOPNOTSUPP;
3119
3120         HWRM_PREP(req, PORT_LED_CFG);
3121
3122         if (led_on) {
3123                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3124                 duration = rte_cpu_to_le_16(500);
3125         }
3126         req.port_id = bp->pf.port_id;
3127         req.num_leds = bp->num_leds;
3128         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3129         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3130                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3131                 led_cfg->led_id = bp->leds[i].led_id;
3132                 led_cfg->led_state = led_state;
3133                 led_cfg->led_blink_on = duration;
3134                 led_cfg->led_blink_off = duration;
3135                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3136         }
3137
3138         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3139
3140         HWRM_CHECK_RESULT();
3141         HWRM_UNLOCK();
3142
3143         return rc;
3144 }
3145
3146 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3147                                uint32_t *length)
3148 {
3149         int rc;
3150         struct hwrm_nvm_get_dir_info_input req = {0};
3151         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3152
3153         HWRM_PREP(req, NVM_GET_DIR_INFO);
3154
3155         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3156
3157         HWRM_CHECK_RESULT();
3158         HWRM_UNLOCK();
3159
3160         if (!rc) {
3161                 *entries = rte_le_to_cpu_32(resp->entries);
3162                 *length = rte_le_to_cpu_32(resp->entry_length);
3163         }
3164         return rc;
3165 }
3166
3167 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3168 {
3169         int rc;
3170         uint32_t dir_entries;
3171         uint32_t entry_length;
3172         uint8_t *buf;
3173         size_t buflen;
3174         rte_iova_t dma_handle;
3175         struct hwrm_nvm_get_dir_entries_input req = {0};
3176         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3177
3178         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3179         if (rc != 0)
3180                 return rc;
3181
3182         *data++ = dir_entries;
3183         *data++ = entry_length;
3184         len -= 2;
3185         memset(data, 0xff, len);
3186
3187         buflen = dir_entries * entry_length;
3188         buf = rte_malloc("nvm_dir", buflen, 0);
3189         rte_mem_lock_page(buf);
3190         if (buf == NULL)
3191                 return -ENOMEM;
3192         dma_handle = rte_mem_virt2iova(buf);
3193         if (dma_handle == 0) {
3194                 PMD_DRV_LOG(ERR,
3195                         "unable to map response address to physical memory\n");
3196                 return -ENOMEM;
3197         }
3198         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3199         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3200         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3201
3202         HWRM_CHECK_RESULT();
3203         HWRM_UNLOCK();
3204
3205         if (rc == 0)
3206                 memcpy(data, buf, len > buflen ? buflen : len);
3207
3208         rte_free(buf);
3209
3210         return rc;
3211 }
3212
3213 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3214                              uint32_t offset, uint32_t length,
3215                              uint8_t *data)
3216 {
3217         int rc;
3218         uint8_t *buf;
3219         rte_iova_t dma_handle;
3220         struct hwrm_nvm_read_input req = {0};
3221         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3222
3223         buf = rte_malloc("nvm_item", length, 0);
3224         rte_mem_lock_page(buf);
3225         if (!buf)
3226                 return -ENOMEM;
3227
3228         dma_handle = rte_mem_virt2iova(buf);
3229         if (dma_handle == 0) {
3230                 PMD_DRV_LOG(ERR,
3231                         "unable to map response address to physical memory\n");
3232                 return -ENOMEM;
3233         }
3234         HWRM_PREP(req, NVM_READ);
3235         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3236         req.dir_idx = rte_cpu_to_le_16(index);
3237         req.offset = rte_cpu_to_le_32(offset);
3238         req.len = rte_cpu_to_le_32(length);
3239         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3240         HWRM_CHECK_RESULT();
3241         HWRM_UNLOCK();
3242         if (rc == 0)
3243                 memcpy(data, buf, length);
3244
3245         rte_free(buf);
3246         return rc;
3247 }
3248
3249 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3250 {
3251         int rc;
3252         struct hwrm_nvm_erase_dir_entry_input req = {0};
3253         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3254
3255         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3256         req.dir_idx = rte_cpu_to_le_16(index);
3257         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3258         HWRM_CHECK_RESULT();
3259         HWRM_UNLOCK();
3260
3261         return rc;
3262 }
3263
3264
3265 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3266                           uint16_t dir_ordinal, uint16_t dir_ext,
3267                           uint16_t dir_attr, const uint8_t *data,
3268                           size_t data_len)
3269 {
3270         int rc;
3271         struct hwrm_nvm_write_input req = {0};
3272         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3273         rte_iova_t dma_handle;
3274         uint8_t *buf;
3275
3276         HWRM_PREP(req, NVM_WRITE);
3277
3278         req.dir_type = rte_cpu_to_le_16(dir_type);
3279         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3280         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3281         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3282         req.dir_data_length = rte_cpu_to_le_32(data_len);
3283
3284         buf = rte_malloc("nvm_write", data_len, 0);
3285         rte_mem_lock_page(buf);
3286         if (!buf)
3287                 return -ENOMEM;
3288
3289         dma_handle = rte_mem_virt2iova(buf);
3290         if (dma_handle == 0) {
3291                 PMD_DRV_LOG(ERR,
3292                         "unable to map response address to physical memory\n");
3293                 return -ENOMEM;
3294         }
3295         memcpy(buf, data, data_len);
3296         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3297
3298         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3299
3300         HWRM_CHECK_RESULT();
3301         HWRM_UNLOCK();
3302
3303         rte_free(buf);
3304         return rc;
3305 }
3306
3307 static void
3308 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3309 {
3310         uint32_t *count = cbdata;
3311
3312         *count = *count + 1;
3313 }
3314
3315 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3316                                      struct bnxt_vnic_info *vnic __rte_unused)
3317 {
3318         return 0;
3319 }
3320
3321 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3322 {
3323         uint32_t count = 0;
3324
3325         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3326             &count, bnxt_vnic_count_hwrm_stub);
3327
3328         return count;
3329 }
3330
3331 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3332                                         uint16_t *vnic_ids)
3333 {
3334         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3335         struct hwrm_func_vf_vnic_ids_query_output *resp =
3336                                                 bp->hwrm_cmd_resp_addr;
3337         int rc;
3338
3339         /* First query all VNIC ids */
3340         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3341
3342         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3343         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3344         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3345
3346         if (req.vnic_id_tbl_addr == 0) {
3347                 HWRM_UNLOCK();
3348                 PMD_DRV_LOG(ERR,
3349                 "unable to map VNIC ID table address to physical memory\n");
3350                 return -ENOMEM;
3351         }
3352         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3353         if (rc) {
3354                 HWRM_UNLOCK();
3355                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3356                 return -1;
3357         } else if (resp->error_code) {
3358                 rc = rte_le_to_cpu_16(resp->error_code);
3359                 HWRM_UNLOCK();
3360                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3361                 return -1;
3362         }
3363         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3364
3365         HWRM_UNLOCK();
3366
3367         return rc;
3368 }
3369
3370 /*
3371  * This function queries the VNIC IDs  for a specified VF. It then calls
3372  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3373  * Then it calls the hwrm_cb function to program this new vnic configuration.
3374  */
3375 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3376         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3377         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3378 {
3379         struct bnxt_vnic_info vnic;
3380         int rc = 0;
3381         int i, num_vnic_ids;
3382         uint16_t *vnic_ids;
3383         size_t vnic_id_sz;
3384         size_t sz;
3385
3386         /* First query all VNIC ids */
3387         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3388         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3389                         RTE_CACHE_LINE_SIZE);
3390         if (vnic_ids == NULL) {
3391                 rc = -ENOMEM;
3392                 return rc;
3393         }
3394         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3395                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3396
3397         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3398
3399         if (num_vnic_ids < 0)
3400                 return num_vnic_ids;
3401
3402         /* Retrieve VNIC, update bd_stall then update */
3403
3404         for (i = 0; i < num_vnic_ids; i++) {
3405                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3406                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3407                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3408                 if (rc)
3409                         break;
3410                 if (vnic.mru <= 4)      /* Indicates unallocated */
3411                         continue;
3412
3413                 vnic_cb(&vnic, cbdata);
3414
3415                 rc = hwrm_cb(bp, &vnic);
3416                 if (rc)
3417                         break;
3418         }
3419
3420         rte_free(vnic_ids);
3421
3422         return rc;
3423 }
3424
3425 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3426                                               bool on)
3427 {
3428         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3429         struct hwrm_func_cfg_input req = {0};
3430         int rc;
3431
3432         HWRM_PREP(req, FUNC_CFG);
3433
3434         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3435         req.enables |= rte_cpu_to_le_32(
3436                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3437         req.vlan_antispoof_mode = on ?
3438                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3439                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3440         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3441
3442         HWRM_CHECK_RESULT();
3443         HWRM_UNLOCK();
3444
3445         return rc;
3446 }
3447
3448 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3449 {
3450         struct bnxt_vnic_info vnic;
3451         uint16_t *vnic_ids;
3452         size_t vnic_id_sz;
3453         int num_vnic_ids, i;
3454         size_t sz;
3455         int rc;
3456
3457         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3458         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3459                         RTE_CACHE_LINE_SIZE);
3460         if (vnic_ids == NULL) {
3461                 rc = -ENOMEM;
3462                 return rc;
3463         }
3464
3465         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3466                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3467
3468         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3469         if (rc <= 0)
3470                 goto exit;
3471         num_vnic_ids = rc;
3472
3473         /*
3474          * Loop through to find the default VNIC ID.
3475          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3476          * by sending the hwrm_func_qcfg command to the firmware.
3477          */
3478         for (i = 0; i < num_vnic_ids; i++) {
3479                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3480                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3481                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3482                                         bp->pf.first_vf_id + vf);
3483                 if (rc)
3484                         goto exit;
3485                 if (vnic.func_default) {
3486                         rte_free(vnic_ids);
3487                         return vnic.fw_vnic_id;
3488                 }
3489         }
3490         /* Could not find a default VNIC. */
3491         PMD_DRV_LOG(ERR, "No default VNIC\n");
3492 exit:
3493         rte_free(vnic_ids);
3494         return -1;
3495 }
3496
3497 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3498                          uint16_t dst_id,
3499                          struct bnxt_filter_info *filter)
3500 {
3501         int rc = 0;
3502         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3503         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3504         uint32_t enables = 0;
3505
3506         if (filter->fw_em_filter_id != UINT64_MAX)
3507                 bnxt_hwrm_clear_em_filter(bp, filter);
3508
3509         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3510
3511         req.flags = rte_cpu_to_le_32(filter->flags);
3512
3513         enables = filter->enables |
3514               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3515         req.dst_id = rte_cpu_to_le_16(dst_id);
3516
3517         if (filter->ip_addr_type) {
3518                 req.ip_addr_type = filter->ip_addr_type;
3519                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3520         }
3521         if (enables &
3522             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3523                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3524         if (enables &
3525             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3526                 memcpy(req.src_macaddr, filter->src_macaddr,
3527                        ETHER_ADDR_LEN);
3528         if (enables &
3529             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3530                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3531                        ETHER_ADDR_LEN);
3532         if (enables &
3533             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3534                 req.ovlan_vid = filter->l2_ovlan;
3535         if (enables &
3536             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3537                 req.ivlan_vid = filter->l2_ivlan;
3538         if (enables &
3539             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3540                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3541         if (enables &
3542             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3543                 req.ip_protocol = filter->ip_protocol;
3544         if (enables &
3545             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3546                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3547         if (enables &
3548             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3549                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3550         if (enables &
3551             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3552                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3553         if (enables &
3554             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3555                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3556         if (enables &
3557             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3558                 req.mirror_vnic_id = filter->mirror_vnic_id;
3559
3560         req.enables = rte_cpu_to_le_32(enables);
3561
3562         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3563
3564         HWRM_CHECK_RESULT();
3565
3566         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3567         HWRM_UNLOCK();
3568
3569         return rc;
3570 }
3571
3572 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3573 {
3574         int rc = 0;
3575         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3576         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3577
3578         if (filter->fw_em_filter_id == UINT64_MAX)
3579                 return 0;
3580
3581         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3582         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3583
3584         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3585
3586         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3587
3588         HWRM_CHECK_RESULT();
3589         HWRM_UNLOCK();
3590
3591         filter->fw_em_filter_id = -1;
3592         filter->fw_l2_filter_id = -1;
3593
3594         return 0;
3595 }
3596
3597 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3598                          uint16_t dst_id,
3599                          struct bnxt_filter_info *filter)
3600 {
3601         int rc = 0;
3602         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3603         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3604                                                 bp->hwrm_cmd_resp_addr;
3605         uint32_t enables = 0;
3606
3607         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3608                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3609
3610         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3611
3612         req.flags = rte_cpu_to_le_32(filter->flags);
3613
3614         enables = filter->enables |
3615               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3616         req.dst_id = rte_cpu_to_le_16(dst_id);
3617
3618
3619         if (filter->ip_addr_type) {
3620                 req.ip_addr_type = filter->ip_addr_type;
3621                 enables |=
3622                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3623         }
3624         if (enables &
3625             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3626                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3627         if (enables &
3628             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3629                 memcpy(req.src_macaddr, filter->src_macaddr,
3630                        ETHER_ADDR_LEN);
3631         //if (enables &
3632             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3633                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3634                        //ETHER_ADDR_LEN);
3635         if (enables &
3636             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3637                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3638         if (enables &
3639             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3640                 req.ip_protocol = filter->ip_protocol;
3641         if (enables &
3642             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3643                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3644         if (enables &
3645             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3646                 req.src_ipaddr_mask[0] =
3647                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3648         if (enables &
3649             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3650                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3651         if (enables &
3652             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3653                 req.dst_ipaddr_mask[0] =
3654                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3655         if (enables &
3656             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3657                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3658         if (enables &
3659             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3660                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3661         if (enables &
3662             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3663                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3664         if (enables &
3665             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3666                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3667         if (enables &
3668             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3669                 req.mirror_vnic_id = filter->mirror_vnic_id;
3670
3671         req.enables = rte_cpu_to_le_32(enables);
3672
3673         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3674
3675         HWRM_CHECK_RESULT();
3676
3677         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3678         HWRM_UNLOCK();
3679
3680         return rc;
3681 }
3682
3683 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3684                                 struct bnxt_filter_info *filter)
3685 {
3686         int rc = 0;
3687         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3688         struct hwrm_cfa_ntuple_filter_free_output *resp =
3689                                                 bp->hwrm_cmd_resp_addr;
3690
3691         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3692                 return 0;
3693
3694         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3695
3696         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3697
3698         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3699
3700         HWRM_CHECK_RESULT();
3701         HWRM_UNLOCK();
3702
3703         filter->fw_ntuple_filter_id = -1;
3704
3705         return 0;
3706 }
3707
3708 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3709 {
3710         unsigned int rss_idx, fw_idx, i;
3711
3712         if (vnic->rss_table && vnic->hash_type) {
3713                 /*
3714                  * Fill the RSS hash & redirection table with
3715                  * ring group ids for all VNICs
3716                  */
3717                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3718                         rss_idx++, fw_idx++) {
3719                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3720                                 fw_idx %= bp->rx_cp_nr_rings;
3721                                 if (vnic->fw_grp_ids[fw_idx] !=
3722                                     INVALID_HW_RING_ID)
3723                                         break;
3724                                 fw_idx++;
3725                         }
3726                         if (i == bp->rx_cp_nr_rings)
3727                                 return 0;
3728                         vnic->rss_table[rss_idx] =
3729                                 vnic->fw_grp_ids[fw_idx];
3730                 }
3731                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3732         }
3733         return 0;
3734 }