New upstream version 17.11.3
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                10000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 /*
175  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
176  * spinlock, and does initial processing.
177  *
178  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
179  * releases the spinlock only if it returns.  If the regular int return codes
180  * are not used by the function, HWRM_CHECK_RESULT() should not be used
181  * directly, rather it should be copied and modified to suit the function.
182  *
183  * HWRM_UNLOCK() must be called after all response processing is completed.
184  */
185 #define HWRM_PREP(req, type) do { \
186         rte_spinlock_lock(&bp->hwrm_lock); \
187         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189         req.cmpl_ring = rte_cpu_to_le_16(-1); \
190         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191         req.target_id = rte_cpu_to_le_16(0xffff); \
192         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
193 } while (0)
194
195 #define HWRM_CHECK_RESULT() do {\
196         if (rc) { \
197                 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
198                         __func__, rc); \
199                 rte_spinlock_unlock(&bp->hwrm_lock); \
200                 return rc; \
201         } \
202         if (resp->error_code) { \
203                 rc = rte_le_to_cpu_16(resp->error_code); \
204                 if (resp->resp_len >= 16) { \
205                         struct hwrm_err_output *tmp_hwrm_err_op = \
206                                                 (void *)resp; \
207                         RTE_LOG(ERR, PMD, \
208                                 "%s error %d:%d:%08x:%04x\n", \
209                                 __func__, \
210                                 rc, tmp_hwrm_err_op->cmd_err, \
211                                 rte_le_to_cpu_32(\
212                                         tmp_hwrm_err_op->opaque_0), \
213                                 rte_le_to_cpu_16(\
214                                         tmp_hwrm_err_op->opaque_1)); \
215                 } \
216                 else { \
217                         RTE_LOG(ERR, PMD, \
218                                 "%s error %d\n", __func__, rc); \
219                 } \
220                 rte_spinlock_unlock(&bp->hwrm_lock); \
221                 return rc; \
222         } \
223 } while (0)
224
225 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
226
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
228 {
229         int rc = 0;
230         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
232
233         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
235         req.mask = 0;
236
237         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
238
239         HWRM_CHECK_RESULT();
240         HWRM_UNLOCK();
241
242         return rc;
243 }
244
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246                                  struct bnxt_vnic_info *vnic,
247                                  uint16_t vlan_count,
248                                  struct bnxt_vlan_table_entry *vlan_table)
249 {
250         int rc = 0;
251         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
253         uint32_t mask = 0;
254
255         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
256                 return rc;
257
258         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
259         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
260
261         /* FIXME add multicast flag, when multicast adding options is supported
262          * by ethtool.
263          */
264         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
265                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
266         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
267                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
268         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
269                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
270         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
271                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
272         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
273                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
274         if (vnic->mc_addr_cnt) {
275                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
276                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
277                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
278         }
279         if (vlan_table) {
280                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
281                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
282                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
283                          rte_mem_virt2iova(vlan_table));
284                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
285         }
286         req.mask = rte_cpu_to_le_32(mask);
287
288         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
289
290         HWRM_CHECK_RESULT();
291         HWRM_UNLOCK();
292
293         return rc;
294 }
295
296 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
297                         uint16_t vlan_count,
298                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
299 {
300         int rc = 0;
301         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
302         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
303                                                 bp->hwrm_cmd_resp_addr;
304
305         /*
306          * Older HWRM versions did not support this command, and the set_rx_mask
307          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
308          * removed from set_rx_mask call, and this command was added.
309          *
310          * This command is also present from 1.7.8.11 and higher,
311          * as well as 1.7.8.0
312          */
313         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
314                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
315                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
316                                         (11)))
317                                 return 0;
318                 }
319         }
320         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
321         req.fid = rte_cpu_to_le_16(fid);
322
323         req.vlan_tag_mask_tbl_addr =
324                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
325         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
326
327         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
328
329         HWRM_CHECK_RESULT();
330         HWRM_UNLOCK();
331
332         return rc;
333 }
334
335 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
336                            struct bnxt_filter_info *filter)
337 {
338         int rc = 0;
339         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
340         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
341
342         if (filter->fw_l2_filter_id == UINT64_MAX)
343                 return 0;
344
345         HWRM_PREP(req, CFA_L2_FILTER_FREE);
346
347         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
348
349         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
350
351         HWRM_CHECK_RESULT();
352         HWRM_UNLOCK();
353
354         filter->fw_l2_filter_id = -1;
355
356         return 0;
357 }
358
359 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
360                          uint16_t dst_id,
361                          struct bnxt_filter_info *filter)
362 {
363         int rc = 0;
364         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
365         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
366         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
367         const struct rte_eth_vmdq_rx_conf *conf =
368                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
369         uint32_t enables = 0;
370         uint16_t j = dst_id - 1;
371
372         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
373         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
374             conf->pool_map[j].pools & (1UL << j)) {
375                 RTE_LOG(DEBUG, PMD,
376                         "Add vlan %u to vmdq pool %u\n",
377                         conf->pool_map[j].vlan_id, j);
378
379                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
380                 filter->enables |=
381                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
382                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
383         }
384
385         if (filter->fw_l2_filter_id != UINT64_MAX)
386                 bnxt_hwrm_clear_l2_filter(bp, filter);
387
388         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
389
390         req.flags = rte_cpu_to_le_32(filter->flags);
391
392         enables = filter->enables |
393               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
394         req.dst_id = rte_cpu_to_le_16(dst_id);
395
396         if (enables &
397             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
398                 memcpy(req.l2_addr, filter->l2_addr,
399                        ETHER_ADDR_LEN);
400         if (enables &
401             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
402                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
403                        ETHER_ADDR_LEN);
404         if (enables &
405             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
406                 req.l2_ovlan = filter->l2_ovlan;
407         if (enables &
408             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
409                 req.l2_ovlan = filter->l2_ivlan;
410         if (enables &
411             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
412                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
413         if (enables &
414             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
415                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
416         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
417                 req.src_id = rte_cpu_to_le_32(filter->src_id);
418         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
419                 req.src_type = filter->src_type;
420
421         req.enables = rte_cpu_to_le_32(enables);
422
423         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
424
425         HWRM_CHECK_RESULT();
426
427         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
428         HWRM_UNLOCK();
429
430         return rc;
431 }
432
433 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
434 {
435         int rc = 0;
436         struct hwrm_func_qcaps_input req = {.req_type = 0 };
437         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
438         uint16_t new_max_vfs;
439         int i;
440
441         HWRM_PREP(req, FUNC_QCAPS);
442
443         req.fid = rte_cpu_to_le_16(0xffff);
444
445         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
446
447         HWRM_CHECK_RESULT();
448
449         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
450         if (BNXT_PF(bp)) {
451                 bp->pf.port_id = resp->port_id;
452                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
453                 new_max_vfs = bp->pdev->max_vfs;
454                 if (new_max_vfs != bp->pf.max_vfs) {
455                         if (bp->pf.vf_info)
456                                 rte_free(bp->pf.vf_info);
457                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
458                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
459                         bp->pf.max_vfs = new_max_vfs;
460                         for (i = 0; i < new_max_vfs; i++) {
461                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
462                                 bp->pf.vf_info[i].vlan_table =
463                                         rte_zmalloc("VF VLAN table",
464                                                     getpagesize(),
465                                                     getpagesize());
466                                 if (bp->pf.vf_info[i].vlan_table == NULL)
467                                         RTE_LOG(ERR, PMD,
468                                         "Fail to alloc VLAN table for VF %d\n",
469                                         i);
470                                 else
471                                         rte_mem_lock_page(
472                                                 bp->pf.vf_info[i].vlan_table);
473                                 bp->pf.vf_info[i].vlan_as_table =
474                                         rte_zmalloc("VF VLAN AS table",
475                                                     getpagesize(),
476                                                     getpagesize());
477                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
478                                         RTE_LOG(ERR, PMD,
479                                         "Alloc VLAN AS table for VF %d fail\n",
480                                         i);
481                                 else
482                                         rte_mem_lock_page(
483                                                bp->pf.vf_info[i].vlan_as_table);
484                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
485                         }
486                 }
487         }
488
489         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
490         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
491         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
492         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
493         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
494         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
495         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
496         /* TODO: For now, do not support VMDq/RFS on VFs. */
497         if (BNXT_PF(bp)) {
498                 if (bp->pf.max_vfs)
499                         bp->max_vnics = 1;
500                 else
501                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
502         } else {
503                 bp->max_vnics = 1;
504         }
505         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
506         if (BNXT_PF(bp))
507                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
508         HWRM_UNLOCK();
509
510         return rc;
511 }
512
513 int bnxt_hwrm_func_reset(struct bnxt *bp)
514 {
515         int rc = 0;
516         struct hwrm_func_reset_input req = {.req_type = 0 };
517         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
518
519         HWRM_PREP(req, FUNC_RESET);
520
521         req.enables = rte_cpu_to_le_32(0);
522
523         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
524
525         HWRM_CHECK_RESULT();
526         HWRM_UNLOCK();
527
528         return rc;
529 }
530
531 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
532 {
533         int rc;
534         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
535         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
536
537         if (bp->flags & BNXT_FLAG_REGISTERED)
538                 return 0;
539
540         HWRM_PREP(req, FUNC_DRV_RGTR);
541         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
542                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
543         req.ver_maj = RTE_VER_YEAR;
544         req.ver_min = RTE_VER_MONTH;
545         req.ver_upd = RTE_VER_MINOR;
546
547         if (BNXT_PF(bp)) {
548                 req.enables |= rte_cpu_to_le_32(
549                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
550                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
551                        RTE_MIN(sizeof(req.vf_req_fwd),
552                                sizeof(bp->pf.vf_req_fwd)));
553         }
554
555         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
556         //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
557
558         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
559
560         HWRM_CHECK_RESULT();
561         HWRM_UNLOCK();
562
563         bp->flags |= BNXT_FLAG_REGISTERED;
564
565         return rc;
566 }
567
568 int bnxt_hwrm_ver_get(struct bnxt *bp)
569 {
570         int rc = 0;
571         struct hwrm_ver_get_input req = {.req_type = 0 };
572         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
573         uint32_t my_version;
574         uint32_t fw_version;
575         uint16_t max_resp_len;
576         char type[RTE_MEMZONE_NAMESIZE];
577         uint32_t dev_caps_cfg;
578
579         bp->max_req_len = HWRM_MAX_REQ_LEN;
580         HWRM_PREP(req, VER_GET);
581
582         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
583         req.hwrm_intf_min = HWRM_VERSION_MINOR;
584         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
585
586         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
587
588         HWRM_CHECK_RESULT();
589
590         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
591                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
592                 resp->hwrm_intf_upd,
593                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
594         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
595                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
596         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
597                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
598
599         my_version = HWRM_VERSION_MAJOR << 16;
600         my_version |= HWRM_VERSION_MINOR << 8;
601         my_version |= HWRM_VERSION_UPDATE;
602
603         fw_version = resp->hwrm_intf_maj << 16;
604         fw_version |= resp->hwrm_intf_min << 8;
605         fw_version |= resp->hwrm_intf_upd;
606
607         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
608                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
609                 rc = -EINVAL;
610                 goto error;
611         }
612
613         if (my_version != fw_version) {
614                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
615                 if (my_version < fw_version) {
616                         RTE_LOG(INFO, PMD,
617                                 "Firmware API version is newer than driver.\n");
618                         RTE_LOG(INFO, PMD,
619                                 "The driver may be missing features.\n");
620                 } else {
621                         RTE_LOG(INFO, PMD,
622                                 "Firmware API version is older than driver.\n");
623                         RTE_LOG(INFO, PMD,
624                                 "Not all driver features may be functional.\n");
625                 }
626         }
627
628         if (bp->max_req_len > resp->max_req_win_len) {
629                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
630                 rc = -EINVAL;
631         }
632         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
633         max_resp_len = resp->max_resp_len;
634         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
635
636         if (bp->max_resp_len != max_resp_len) {
637                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
638                         bp->pdev->addr.domain, bp->pdev->addr.bus,
639                         bp->pdev->addr.devid, bp->pdev->addr.function);
640
641                 rte_free(bp->hwrm_cmd_resp_addr);
642
643                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
644                 if (bp->hwrm_cmd_resp_addr == NULL) {
645                         rc = -ENOMEM;
646                         goto error;
647                 }
648                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
649                 bp->hwrm_cmd_resp_dma_addr =
650                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
651                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
652                         RTE_LOG(ERR, PMD,
653                         "Unable to map response buffer to physical memory.\n");
654                         rc = -ENOMEM;
655                         goto error;
656                 }
657                 bp->max_resp_len = max_resp_len;
658         }
659
660         if ((dev_caps_cfg &
661                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
662             (dev_caps_cfg &
663              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
664                 RTE_LOG(DEBUG, PMD, "Short command supported\n");
665
666                 rte_free(bp->hwrm_short_cmd_req_addr);
667
668                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
669                                                         bp->max_req_len, 0);
670                 if (bp->hwrm_short_cmd_req_addr == NULL) {
671                         rc = -ENOMEM;
672                         goto error;
673                 }
674                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
675                 bp->hwrm_short_cmd_req_dma_addr =
676                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
677                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
678                         rte_free(bp->hwrm_short_cmd_req_addr);
679                         RTE_LOG(ERR, PMD,
680                                 "Unable to map buffer to physical memory.\n");
681                         rc = -ENOMEM;
682                         goto error;
683                 }
684
685                 bp->flags |= BNXT_FLAG_SHORT_CMD;
686         }
687
688 error:
689         HWRM_UNLOCK();
690         return rc;
691 }
692
693 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
694 {
695         int rc;
696         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
697         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
698
699         if (!(bp->flags & BNXT_FLAG_REGISTERED))
700                 return 0;
701
702         HWRM_PREP(req, FUNC_DRV_UNRGTR);
703         req.flags = flags;
704
705         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
706
707         HWRM_CHECK_RESULT();
708         HWRM_UNLOCK();
709
710         bp->flags &= ~BNXT_FLAG_REGISTERED;
711
712         return rc;
713 }
714
715 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
716 {
717         int rc = 0;
718         struct hwrm_port_phy_cfg_input req = {0};
719         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
720         uint32_t enables = 0;
721
722         HWRM_PREP(req, PORT_PHY_CFG);
723
724         if (conf->link_up) {
725                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
726                 if (bp->link_info.auto_mode && conf->link_speed) {
727                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
728                         RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
729                 }
730
731                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
732                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
733                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
734                 /*
735                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
736                  * any auto mode, even "none".
737                  */
738                 if (!conf->link_speed) {
739                         /* No speeds specified. Enable AutoNeg - all speeds */
740                         req.auto_mode =
741                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
742                 }
743                 /* AutoNeg - Advertise speeds specified. */
744                 if (conf->auto_link_speed_mask &&
745                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
746                         req.auto_mode =
747                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
748                         req.auto_link_speed_mask =
749                                 conf->auto_link_speed_mask;
750                         enables |=
751                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
752                 }
753
754                 req.auto_duplex = conf->duplex;
755                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
756                 req.auto_pause = conf->auto_pause;
757                 req.force_pause = conf->force_pause;
758                 /* Set force_pause if there is no auto or if there is a force */
759                 if (req.auto_pause && !req.force_pause)
760                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
761                 else
762                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
763
764                 req.enables = rte_cpu_to_le_32(enables);
765         } else {
766                 req.flags =
767                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
768                 RTE_LOG(INFO, PMD, "Force Link Down\n");
769         }
770
771         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
772
773         HWRM_CHECK_RESULT();
774         HWRM_UNLOCK();
775
776         return rc;
777 }
778
779 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
780                                    struct bnxt_link_info *link_info)
781 {
782         int rc = 0;
783         struct hwrm_port_phy_qcfg_input req = {0};
784         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
785
786         HWRM_PREP(req, PORT_PHY_QCFG);
787
788         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
789
790         HWRM_CHECK_RESULT();
791
792         link_info->phy_link_status = resp->link;
793         link_info->link_up =
794                 (link_info->phy_link_status ==
795                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
796         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
797         link_info->duplex = resp->duplex_cfg;
798         link_info->pause = resp->pause;
799         link_info->auto_pause = resp->auto_pause;
800         link_info->force_pause = resp->force_pause;
801         link_info->auto_mode = resp->auto_mode;
802         link_info->phy_type = resp->phy_type;
803         link_info->media_type = resp->media_type;
804
805         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
806         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
807         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
808         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
809         link_info->phy_ver[0] = resp->phy_maj;
810         link_info->phy_ver[1] = resp->phy_min;
811         link_info->phy_ver[2] = resp->phy_bld;
812
813         HWRM_UNLOCK();
814
815         RTE_LOG(DEBUG, PMD, "Link Speed %d\n", link_info->link_speed);
816         RTE_LOG(DEBUG, PMD, "Auto Mode %d\n", link_info->auto_mode);
817         RTE_LOG(DEBUG, PMD, "Support Speeds %x\n", link_info->support_speeds);
818         RTE_LOG(DEBUG, PMD, "Auto Link Speed %x\n", link_info->auto_link_speed);
819         RTE_LOG(DEBUG, PMD, "Auto Link Speed Mask %x\n",
820                     link_info->auto_link_speed_mask);
821         RTE_LOG(DEBUG, PMD, "Forced Link Speed %x\n",
822                     link_info->force_link_speed);
823
824         return rc;
825 }
826
827 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
828 {
829         int rc = 0;
830         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
831         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
832
833         HWRM_PREP(req, QUEUE_QPORTCFG);
834
835         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
836
837         HWRM_CHECK_RESULT();
838
839 #define GET_QUEUE_INFO(x) \
840         bp->cos_queue[x].id = resp->queue_id##x; \
841         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
842
843         GET_QUEUE_INFO(0);
844         GET_QUEUE_INFO(1);
845         GET_QUEUE_INFO(2);
846         GET_QUEUE_INFO(3);
847         GET_QUEUE_INFO(4);
848         GET_QUEUE_INFO(5);
849         GET_QUEUE_INFO(6);
850         GET_QUEUE_INFO(7);
851
852         HWRM_UNLOCK();
853
854         return rc;
855 }
856
857 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
858                          struct bnxt_ring *ring,
859                          uint32_t ring_type, uint32_t map_index,
860                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
861 {
862         int rc = 0;
863         uint32_t enables = 0;
864         struct hwrm_ring_alloc_input req = {.req_type = 0 };
865         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
866
867         HWRM_PREP(req, RING_ALLOC);
868
869         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
870         req.fbo = rte_cpu_to_le_32(0);
871         /* Association of ring index with doorbell index */
872         req.logical_id = rte_cpu_to_le_16(map_index);
873         req.length = rte_cpu_to_le_32(ring->ring_size);
874
875         switch (ring_type) {
876         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
877                 req.queue_id = bp->cos_queue[0].id;
878                 /* FALLTHROUGH */
879         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
880                 req.ring_type = ring_type;
881                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
882                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
883                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
884                         enables |=
885                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
886                 break;
887         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
888                 req.ring_type = ring_type;
889                 /*
890                  * TODO: Some HWRM versions crash with
891                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
892                  */
893                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
894                 break;
895         default:
896                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
897                         ring_type);
898                 HWRM_UNLOCK();
899                 return -1;
900         }
901         req.enables = rte_cpu_to_le_32(enables);
902
903         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
904
905         if (rc || resp->error_code) {
906                 if (rc == 0 && resp->error_code)
907                         rc = rte_le_to_cpu_16(resp->error_code);
908                 switch (ring_type) {
909                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
910                         RTE_LOG(ERR, PMD,
911                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
912                         HWRM_UNLOCK();
913                         return rc;
914                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
915                         RTE_LOG(ERR, PMD,
916                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
917                         HWRM_UNLOCK();
918                         return rc;
919                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
920                         RTE_LOG(ERR, PMD,
921                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
922                         HWRM_UNLOCK();
923                         return rc;
924                 default:
925                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
926                         HWRM_UNLOCK();
927                         return rc;
928                 }
929         }
930
931         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
932         HWRM_UNLOCK();
933         return rc;
934 }
935
936 int bnxt_hwrm_ring_free(struct bnxt *bp,
937                         struct bnxt_ring *ring, uint32_t ring_type)
938 {
939         int rc;
940         struct hwrm_ring_free_input req = {.req_type = 0 };
941         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
942
943         HWRM_PREP(req, RING_FREE);
944
945         req.ring_type = ring_type;
946         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
947
948         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
949
950         if (rc || resp->error_code) {
951                 if (rc == 0 && resp->error_code)
952                         rc = rte_le_to_cpu_16(resp->error_code);
953                 HWRM_UNLOCK();
954
955                 switch (ring_type) {
956                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
957                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
958                                 rc);
959                         return rc;
960                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
961                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
962                                 rc);
963                         return rc;
964                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
965                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
966                                 rc);
967                         return rc;
968                 default:
969                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
970                         return rc;
971                 }
972         }
973         HWRM_UNLOCK();
974         return 0;
975 }
976
977 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
978 {
979         int rc = 0;
980         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
981         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
982
983         HWRM_PREP(req, RING_GRP_ALLOC);
984
985         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
986         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
987         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
988         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
989
990         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
991
992         HWRM_CHECK_RESULT();
993
994         bp->grp_info[idx].fw_grp_id =
995             rte_le_to_cpu_16(resp->ring_group_id);
996
997         HWRM_UNLOCK();
998
999         return rc;
1000 }
1001
1002 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1003 {
1004         int rc;
1005         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1006         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1007
1008         HWRM_PREP(req, RING_GRP_FREE);
1009
1010         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1011
1012         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1013
1014         HWRM_CHECK_RESULT();
1015         HWRM_UNLOCK();
1016
1017         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1018         return rc;
1019 }
1020
1021 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1022 {
1023         int rc = 0;
1024         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1025         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1026
1027         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1028                 return rc;
1029
1030         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1031
1032         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1033
1034         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1035
1036         HWRM_CHECK_RESULT();
1037         HWRM_UNLOCK();
1038
1039         return rc;
1040 }
1041
1042 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1043                                 unsigned int idx __rte_unused)
1044 {
1045         int rc;
1046         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1047         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1048
1049         HWRM_PREP(req, STAT_CTX_ALLOC);
1050
1051         req.update_period_ms = rte_cpu_to_le_32(0);
1052
1053         req.stats_dma_addr =
1054             rte_cpu_to_le_64(cpr->hw_stats_map);
1055
1056         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1057
1058         HWRM_CHECK_RESULT();
1059
1060         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1061
1062         HWRM_UNLOCK();
1063
1064         return rc;
1065 }
1066
1067 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1068                                 unsigned int idx __rte_unused)
1069 {
1070         int rc;
1071         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1072         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1073
1074         HWRM_PREP(req, STAT_CTX_FREE);
1075
1076         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1077
1078         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1079
1080         HWRM_CHECK_RESULT();
1081         HWRM_UNLOCK();
1082
1083         return rc;
1084 }
1085
1086 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1087 {
1088         int rc = 0, i, j;
1089         struct hwrm_vnic_alloc_input req = { 0 };
1090         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1091
1092         /* map ring groups to this vnic */
1093         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1094                 vnic->start_grp_id, vnic->end_grp_id);
1095         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1096                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1097         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1098         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1099         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1100         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1101         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1102                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1103         HWRM_PREP(req, VNIC_ALLOC);
1104
1105         if (vnic->func_default)
1106                 req.flags =
1107                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1108         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1109
1110         HWRM_CHECK_RESULT();
1111
1112         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1113         HWRM_UNLOCK();
1114         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1115         return rc;
1116 }
1117
1118 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1119                                         struct bnxt_vnic_info *vnic,
1120                                         struct bnxt_plcmodes_cfg *pmode)
1121 {
1122         int rc = 0;
1123         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1124         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1125
1126         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1127
1128         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1129
1130         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1131
1132         HWRM_CHECK_RESULT();
1133
1134         pmode->flags = rte_le_to_cpu_32(resp->flags);
1135         /* dflt_vnic bit doesn't exist in the _cfg command */
1136         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1137         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1138         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1139         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1140
1141         HWRM_UNLOCK();
1142
1143         return rc;
1144 }
1145
1146 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1147                                        struct bnxt_vnic_info *vnic,
1148                                        struct bnxt_plcmodes_cfg *pmode)
1149 {
1150         int rc = 0;
1151         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1152         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1153
1154         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1155
1156         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1157         req.flags = rte_cpu_to_le_32(pmode->flags);
1158         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1159         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1160         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1161         req.enables = rte_cpu_to_le_32(
1162             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1163             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1164             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1165         );
1166
1167         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1168
1169         HWRM_CHECK_RESULT();
1170         HWRM_UNLOCK();
1171
1172         return rc;
1173 }
1174
1175 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1176 {
1177         int rc = 0;
1178         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1179         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1180         uint32_t ctx_enable_flag = 0;
1181         struct bnxt_plcmodes_cfg pmodes;
1182
1183         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1184                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1185                 return rc;
1186         }
1187
1188         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1189         if (rc)
1190                 return rc;
1191
1192         HWRM_PREP(req, VNIC_CFG);
1193
1194         /* Only RSS support for now TBD: COS & LB */
1195         req.enables =
1196             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1197         if (vnic->lb_rule != 0xffff)
1198                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1199         if (vnic->cos_rule != 0xffff)
1200                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1201         if (vnic->rss_rule != 0xffff) {
1202                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1203                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1204         }
1205         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1206         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1207         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1208         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1209         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1210         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1211         req.mru = rte_cpu_to_le_16(vnic->mru);
1212         if (vnic->func_default)
1213                 req.flags |=
1214                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1215         if (vnic->vlan_strip)
1216                 req.flags |=
1217                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1218         if (vnic->bd_stall)
1219                 req.flags |=
1220                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1221         if (vnic->roce_dual)
1222                 req.flags |= rte_cpu_to_le_32(
1223                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1224         if (vnic->roce_only)
1225                 req.flags |= rte_cpu_to_le_32(
1226                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1227         if (vnic->rss_dflt_cr)
1228                 req.flags |= rte_cpu_to_le_32(
1229                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1230
1231         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1232
1233         HWRM_CHECK_RESULT();
1234         HWRM_UNLOCK();
1235
1236         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1237
1238         return rc;
1239 }
1240
1241 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1242                 int16_t fw_vf_id)
1243 {
1244         int rc = 0;
1245         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1246         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1247
1248         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1249                 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1250                 return rc;
1251         }
1252         HWRM_PREP(req, VNIC_QCFG);
1253
1254         req.enables =
1255                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1256         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1257         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1258
1259         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1260
1261         HWRM_CHECK_RESULT();
1262
1263         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1264         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1265         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1266         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1267         vnic->mru = rte_le_to_cpu_16(resp->mru);
1268         vnic->func_default = rte_le_to_cpu_32(
1269                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1270         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1271                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1272         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1273                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1274         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1275                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1276         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1277                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1278         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1279                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1280
1281         HWRM_UNLOCK();
1282
1283         return rc;
1284 }
1285
1286 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1287 {
1288         int rc = 0;
1289         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1290         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1291                                                 bp->hwrm_cmd_resp_addr;
1292
1293         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1294
1295         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1296
1297         HWRM_CHECK_RESULT();
1298
1299         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1300         HWRM_UNLOCK();
1301         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1302
1303         return rc;
1304 }
1305
1306 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1307 {
1308         int rc = 0;
1309         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1310         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1311                                                 bp->hwrm_cmd_resp_addr;
1312
1313         if (vnic->rss_rule == 0xffff) {
1314                 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1315                 return rc;
1316         }
1317         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1318
1319         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1320
1321         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1322
1323         HWRM_CHECK_RESULT();
1324         HWRM_UNLOCK();
1325
1326         vnic->rss_rule = INVALID_HW_RING_ID;
1327
1328         return rc;
1329 }
1330
1331 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1332 {
1333         int rc = 0;
1334         struct hwrm_vnic_free_input req = {.req_type = 0 };
1335         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1336
1337         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1338                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1339                 return rc;
1340         }
1341
1342         HWRM_PREP(req, VNIC_FREE);
1343
1344         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1345
1346         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1347
1348         HWRM_CHECK_RESULT();
1349         HWRM_UNLOCK();
1350
1351         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1352         return rc;
1353 }
1354
1355 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1356                            struct bnxt_vnic_info *vnic)
1357 {
1358         int rc = 0;
1359         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1360         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1361
1362         HWRM_PREP(req, VNIC_RSS_CFG);
1363
1364         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1365
1366         req.ring_grp_tbl_addr =
1367             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1368         req.hash_key_tbl_addr =
1369             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1370         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1371
1372         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1373
1374         HWRM_CHECK_RESULT();
1375         HWRM_UNLOCK();
1376
1377         return rc;
1378 }
1379
1380 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1381                         struct bnxt_vnic_info *vnic)
1382 {
1383         int rc = 0;
1384         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1385         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1386         uint16_t size;
1387
1388         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1389
1390         req.flags = rte_cpu_to_le_32(
1391                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1392
1393         req.enables = rte_cpu_to_le_32(
1394                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1395
1396         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1397         size -= RTE_PKTMBUF_HEADROOM;
1398
1399         req.jumbo_thresh = rte_cpu_to_le_16(size);
1400         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1401
1402         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1403
1404         HWRM_CHECK_RESULT();
1405         HWRM_UNLOCK();
1406
1407         return rc;
1408 }
1409
1410 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1411                         struct bnxt_vnic_info *vnic, bool enable)
1412 {
1413         int rc = 0;
1414         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1415         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1416
1417         HWRM_PREP(req, VNIC_TPA_CFG);
1418
1419         if (enable) {
1420                 req.enables = rte_cpu_to_le_32(
1421                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1422                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1423                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1424                 req.flags = rte_cpu_to_le_32(
1425                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1426                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1427                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1428                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1429                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1430                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1431                 req.max_agg_segs = rte_cpu_to_le_16(5);
1432                 req.max_aggs =
1433                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1434                 req.min_agg_len = rte_cpu_to_le_32(512);
1435         }
1436         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1437
1438         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1439
1440         HWRM_CHECK_RESULT();
1441         HWRM_UNLOCK();
1442
1443         return rc;
1444 }
1445
1446 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1447 {
1448         struct hwrm_func_cfg_input req = {0};
1449         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1450         int rc;
1451
1452         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1453         req.enables = rte_cpu_to_le_32(
1454                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1455         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1456         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1457
1458         HWRM_PREP(req, FUNC_CFG);
1459
1460         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1461         HWRM_CHECK_RESULT();
1462         HWRM_UNLOCK();
1463
1464         bp->pf.vf_info[vf].random_mac = false;
1465
1466         return rc;
1467 }
1468
1469 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1470                                   uint64_t *dropped)
1471 {
1472         int rc = 0;
1473         struct hwrm_func_qstats_input req = {.req_type = 0};
1474         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1475
1476         HWRM_PREP(req, FUNC_QSTATS);
1477
1478         req.fid = rte_cpu_to_le_16(fid);
1479
1480         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1481
1482         HWRM_CHECK_RESULT();
1483
1484         if (dropped)
1485                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1486
1487         HWRM_UNLOCK();
1488
1489         return rc;
1490 }
1491
1492 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1493                           struct rte_eth_stats *stats)
1494 {
1495         int rc = 0;
1496         struct hwrm_func_qstats_input req = {.req_type = 0};
1497         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1498
1499         HWRM_PREP(req, FUNC_QSTATS);
1500
1501         req.fid = rte_cpu_to_le_16(fid);
1502
1503         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1504
1505         HWRM_CHECK_RESULT();
1506
1507         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1508         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1509         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1510         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1511         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1512         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1513
1514         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1515         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1516         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1517         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1518         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1519         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1520
1521         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1522         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1523
1524         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1525
1526         HWRM_UNLOCK();
1527
1528         return rc;
1529 }
1530
1531 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1532 {
1533         int rc = 0;
1534         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1535         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1536
1537         HWRM_PREP(req, FUNC_CLR_STATS);
1538
1539         req.fid = rte_cpu_to_le_16(fid);
1540
1541         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1542
1543         HWRM_CHECK_RESULT();
1544         HWRM_UNLOCK();
1545
1546         return rc;
1547 }
1548
1549 /*
1550  * HWRM utility functions
1551  */
1552
1553 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1554 {
1555         unsigned int i;
1556         int rc = 0;
1557
1558         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1559                 struct bnxt_tx_queue *txq;
1560                 struct bnxt_rx_queue *rxq;
1561                 struct bnxt_cp_ring_info *cpr;
1562
1563                 if (i >= bp->rx_cp_nr_rings) {
1564                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1565                         cpr = txq->cp_ring;
1566                 } else {
1567                         rxq = bp->rx_queues[i];
1568                         cpr = rxq->cp_ring;
1569                 }
1570
1571                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1572                 if (rc)
1573                         return rc;
1574         }
1575         return 0;
1576 }
1577
1578 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1579 {
1580         int rc;
1581         unsigned int i;
1582         struct bnxt_cp_ring_info *cpr;
1583
1584         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1585
1586                 if (i >= bp->rx_cp_nr_rings) {
1587                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1588                 } else {
1589                         cpr = bp->rx_queues[i]->cp_ring;
1590                         bp->grp_info[i].fw_stats_ctx = -1;
1591                 }
1592                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1593                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1594                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1595                         if (rc)
1596                                 return rc;
1597                 }
1598         }
1599         return 0;
1600 }
1601
1602 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1603 {
1604         unsigned int i;
1605         int rc = 0;
1606
1607         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1608                 struct bnxt_tx_queue *txq;
1609                 struct bnxt_rx_queue *rxq;
1610                 struct bnxt_cp_ring_info *cpr;
1611
1612                 if (i >= bp->rx_cp_nr_rings) {
1613                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1614                         cpr = txq->cp_ring;
1615                 } else {
1616                         rxq = bp->rx_queues[i];
1617                         cpr = rxq->cp_ring;
1618                 }
1619
1620                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1621
1622                 if (rc)
1623                         return rc;
1624         }
1625         return rc;
1626 }
1627
1628 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1629 {
1630         uint16_t idx;
1631         uint32_t rc = 0;
1632
1633         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1634
1635                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1636                         continue;
1637
1638                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1639
1640                 if (rc)
1641                         return rc;
1642         }
1643         return rc;
1644 }
1645
1646 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1647                                 unsigned int idx __rte_unused)
1648 {
1649         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1650
1651         bnxt_hwrm_ring_free(bp, cp_ring,
1652                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1653         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1654         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1655                         sizeof(*cpr->cp_desc_ring));
1656         cpr->cp_raw_cons = 0;
1657 }
1658
1659 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1660 {
1661         unsigned int i;
1662         int rc = 0;
1663
1664         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1665                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1666                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1667                 struct bnxt_ring *ring = txr->tx_ring_struct;
1668                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1669                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1670
1671                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1672                         bnxt_hwrm_ring_free(bp, ring,
1673                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1674                         ring->fw_ring_id = INVALID_HW_RING_ID;
1675                         memset(txr->tx_desc_ring, 0,
1676                                         txr->tx_ring_struct->ring_size *
1677                                         sizeof(*txr->tx_desc_ring));
1678                         memset(txr->tx_buf_ring, 0,
1679                                         txr->tx_ring_struct->ring_size *
1680                                         sizeof(*txr->tx_buf_ring));
1681                         txr->tx_prod = 0;
1682                         txr->tx_cons = 0;
1683                 }
1684                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1685                         bnxt_free_cp_ring(bp, cpr, idx);
1686                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1687                 }
1688         }
1689
1690         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1691                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1692                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1693                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1694                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1695                 unsigned int idx = i + 1;
1696
1697                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1698                         bnxt_hwrm_ring_free(bp, ring,
1699                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1700                         ring->fw_ring_id = INVALID_HW_RING_ID;
1701                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1702                         memset(rxr->rx_desc_ring, 0,
1703                                         rxr->rx_ring_struct->ring_size *
1704                                         sizeof(*rxr->rx_desc_ring));
1705                         memset(rxr->rx_buf_ring, 0,
1706                                         rxr->rx_ring_struct->ring_size *
1707                                         sizeof(*rxr->rx_buf_ring));
1708                         rxr->rx_prod = 0;
1709                 }
1710                 ring = rxr->ag_ring_struct;
1711                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1712                         bnxt_hwrm_ring_free(bp, ring,
1713                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1714                         ring->fw_ring_id = INVALID_HW_RING_ID;
1715                         memset(rxr->ag_buf_ring, 0,
1716                                rxr->ag_ring_struct->ring_size *
1717                                sizeof(*rxr->ag_buf_ring));
1718                         rxr->ag_prod = 0;
1719                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1720                 }
1721                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1722                         bnxt_free_cp_ring(bp, cpr, idx);
1723                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1724                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1725                 }
1726         }
1727
1728         /* Default completion ring */
1729         {
1730                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1731
1732                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1733                         bnxt_free_cp_ring(bp, cpr, 0);
1734                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1735                 }
1736         }
1737
1738         return rc;
1739 }
1740
1741 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1742 {
1743         uint16_t i;
1744         uint32_t rc = 0;
1745
1746         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1747                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1748                 if (rc)
1749                         return rc;
1750         }
1751         return rc;
1752 }
1753
1754 void bnxt_free_hwrm_resources(struct bnxt *bp)
1755 {
1756         /* Release memzone */
1757         rte_free(bp->hwrm_cmd_resp_addr);
1758         rte_free(bp->hwrm_short_cmd_req_addr);
1759         bp->hwrm_cmd_resp_addr = NULL;
1760         bp->hwrm_short_cmd_req_addr = NULL;
1761         bp->hwrm_cmd_resp_dma_addr = 0;
1762         bp->hwrm_short_cmd_req_dma_addr = 0;
1763 }
1764
1765 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1766 {
1767         struct rte_pci_device *pdev = bp->pdev;
1768         char type[RTE_MEMZONE_NAMESIZE];
1769
1770         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1771                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1772         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1773         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1774         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1775         if (bp->hwrm_cmd_resp_addr == NULL)
1776                 return -ENOMEM;
1777         bp->hwrm_cmd_resp_dma_addr =
1778                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1779         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1780                 RTE_LOG(ERR, PMD,
1781                         "unable to map response address to physical memory\n");
1782                 return -ENOMEM;
1783         }
1784         rte_spinlock_init(&bp->hwrm_lock);
1785
1786         return 0;
1787 }
1788
1789 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1790 {
1791         struct bnxt_filter_info *filter;
1792         int rc = 0;
1793
1794         STAILQ_FOREACH(filter, &vnic->filter, next) {
1795                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1796                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1797                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1798                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1799                 else
1800                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1801                 //if (rc)
1802                         //break;
1803         }
1804         return rc;
1805 }
1806
1807 static int
1808 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1809 {
1810         struct bnxt_filter_info *filter;
1811         struct rte_flow *flow;
1812         int rc = 0;
1813
1814         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1815                 filter = flow->filter;
1816                 RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
1817                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1818                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1819                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1820                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1821                 else
1822                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1823
1824                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1825                 rte_free(flow);
1826                 //if (rc)
1827                         //break;
1828         }
1829         return rc;
1830 }
1831
1832 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1833 {
1834         struct bnxt_filter_info *filter;
1835         int rc = 0;
1836
1837         STAILQ_FOREACH(filter, &vnic->filter, next) {
1838                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1839                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1840                                                      filter);
1841                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1842                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1843                                                          filter);
1844                 else
1845                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1846                                                      filter);
1847                 if (rc)
1848                         break;
1849         }
1850         return rc;
1851 }
1852
1853 void bnxt_free_tunnel_ports(struct bnxt *bp)
1854 {
1855         if (bp->vxlan_port_cnt)
1856                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1857                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1858         bp->vxlan_port = 0;
1859         if (bp->geneve_port_cnt)
1860                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1861                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1862         bp->geneve_port = 0;
1863 }
1864
1865 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1866 {
1867         int i;
1868
1869         if (bp->vnic_info == NULL)
1870                 return;
1871
1872         /*
1873          * Cleanup VNICs in reverse order, to make sure the L2 filter
1874          * from vnic0 is last to be cleaned up.
1875          */
1876         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1877                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1878
1879                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1880
1881                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1882
1883                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1884
1885                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1886
1887                 bnxt_hwrm_vnic_free(bp, vnic);
1888         }
1889         /* Ring resources */
1890         bnxt_free_all_hwrm_rings(bp);
1891         bnxt_free_all_hwrm_ring_grps(bp);
1892         bnxt_free_all_hwrm_stat_ctxs(bp);
1893         bnxt_free_tunnel_ports(bp);
1894 }
1895
1896 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1897 {
1898         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1899
1900         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1901                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1902
1903         switch (conf_link_speed) {
1904         case ETH_LINK_SPEED_10M_HD:
1905         case ETH_LINK_SPEED_100M_HD:
1906                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1907         }
1908         return hw_link_duplex;
1909 }
1910
1911 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1912 {
1913         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1914 }
1915
1916 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1917 {
1918         uint16_t eth_link_speed = 0;
1919
1920         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1921                 return ETH_LINK_SPEED_AUTONEG;
1922
1923         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1924         case ETH_LINK_SPEED_100M:
1925         case ETH_LINK_SPEED_100M_HD:
1926                 eth_link_speed =
1927                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1928                 break;
1929         case ETH_LINK_SPEED_1G:
1930                 eth_link_speed =
1931                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1932                 break;
1933         case ETH_LINK_SPEED_2_5G:
1934                 eth_link_speed =
1935                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1936                 break;
1937         case ETH_LINK_SPEED_10G:
1938                 eth_link_speed =
1939                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1940                 break;
1941         case ETH_LINK_SPEED_20G:
1942                 eth_link_speed =
1943                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1944                 break;
1945         case ETH_LINK_SPEED_25G:
1946                 eth_link_speed =
1947                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1948                 break;
1949         case ETH_LINK_SPEED_40G:
1950                 eth_link_speed =
1951                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1952                 break;
1953         case ETH_LINK_SPEED_50G:
1954                 eth_link_speed =
1955                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1956                 break;
1957         default:
1958                 RTE_LOG(ERR, PMD,
1959                         "Unsupported link speed %d; default to AUTO\n",
1960                         conf_link_speed);
1961                 break;
1962         }
1963         return eth_link_speed;
1964 }
1965
1966 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1967                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1968                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1969                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1970
1971 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
1972 {
1973         uint32_t one_speed;
1974
1975         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1976                 return 0;
1977
1978         if (link_speed & ETH_LINK_SPEED_FIXED) {
1979                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1980
1981                 if (one_speed & (one_speed - 1)) {
1982                         RTE_LOG(ERR, PMD,
1983                                 "Invalid advertised speeds (%u) for port %u\n",
1984                                 link_speed, port_id);
1985                         return -EINVAL;
1986                 }
1987                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1988                         RTE_LOG(ERR, PMD,
1989                                 "Unsupported advertised speed (%u) for port %u\n",
1990                                 link_speed, port_id);
1991                         return -EINVAL;
1992                 }
1993         } else {
1994                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1995                         RTE_LOG(ERR, PMD,
1996                                 "Unsupported advertised speeds (%u) for port %u\n",
1997                                 link_speed, port_id);
1998                         return -EINVAL;
1999                 }
2000         }
2001         return 0;
2002 }
2003
2004 static uint16_t
2005 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2006 {
2007         uint16_t ret = 0;
2008
2009         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2010                 if (bp->link_info.support_speeds)
2011                         return bp->link_info.support_speeds;
2012                 link_speed = BNXT_SUPPORTED_SPEEDS;
2013         }
2014
2015         if (link_speed & ETH_LINK_SPEED_100M)
2016                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2017         if (link_speed & ETH_LINK_SPEED_100M_HD)
2018                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2019         if (link_speed & ETH_LINK_SPEED_1G)
2020                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2021         if (link_speed & ETH_LINK_SPEED_2_5G)
2022                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2023         if (link_speed & ETH_LINK_SPEED_10G)
2024                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2025         if (link_speed & ETH_LINK_SPEED_20G)
2026                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2027         if (link_speed & ETH_LINK_SPEED_25G)
2028                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2029         if (link_speed & ETH_LINK_SPEED_40G)
2030                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2031         if (link_speed & ETH_LINK_SPEED_50G)
2032                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2033         return ret;
2034 }
2035
2036 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2037 {
2038         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2039
2040         switch (hw_link_speed) {
2041         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2042                 eth_link_speed = ETH_SPEED_NUM_100M;
2043                 break;
2044         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2045                 eth_link_speed = ETH_SPEED_NUM_1G;
2046                 break;
2047         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2048                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2049                 break;
2050         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2051                 eth_link_speed = ETH_SPEED_NUM_10G;
2052                 break;
2053         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2054                 eth_link_speed = ETH_SPEED_NUM_20G;
2055                 break;
2056         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2057                 eth_link_speed = ETH_SPEED_NUM_25G;
2058                 break;
2059         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2060                 eth_link_speed = ETH_SPEED_NUM_40G;
2061                 break;
2062         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2063                 eth_link_speed = ETH_SPEED_NUM_50G;
2064                 break;
2065         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2066         default:
2067                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
2068                         hw_link_speed);
2069                 break;
2070         }
2071         return eth_link_speed;
2072 }
2073
2074 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2075 {
2076         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2077
2078         switch (hw_link_duplex) {
2079         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2080         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2081                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2082                 break;
2083         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2084                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2085                 break;
2086         default:
2087                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2088                         hw_link_duplex);
2089                 break;
2090         }
2091         return eth_link_duplex;
2092 }
2093
2094 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2095 {
2096         int rc = 0;
2097         struct bnxt_link_info *link_info = &bp->link_info;
2098
2099         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2100         if (rc) {
2101                 RTE_LOG(ERR, PMD,
2102                         "Get link config failed with rc %d\n", rc);
2103                 goto exit;
2104         }
2105         if (link_info->link_speed)
2106                 link->link_speed =
2107                         bnxt_parse_hw_link_speed(link_info->link_speed);
2108         else
2109                 link->link_speed = ETH_SPEED_NUM_NONE;
2110         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2111         link->link_status = link_info->link_up;
2112         link->link_autoneg = link_info->auto_mode ==
2113                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2114                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2115 exit:
2116         return rc;
2117 }
2118
2119 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2120 {
2121         int rc = 0;
2122         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2123         struct bnxt_link_info link_req;
2124         uint16_t speed, autoneg;
2125
2126         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
2127                 return 0;
2128
2129         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2130                         bp->eth_dev->data->port_id);
2131         if (rc)
2132                 goto error;
2133
2134         memset(&link_req, 0, sizeof(link_req));
2135         link_req.link_up = link_up;
2136         if (!link_up)
2137                 goto port_phy_cfg;
2138
2139         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2140         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2141         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2142         /* Autoneg can be done only when the FW allows */
2143         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2144                                 bp->link_info.force_link_speed)) {
2145                 link_req.phy_flags |=
2146                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2147                 link_req.auto_link_speed_mask =
2148                         bnxt_parse_eth_link_speed_mask(bp,
2149                                                        dev_conf->link_speeds);
2150         } else {
2151                 if (bp->link_info.phy_type ==
2152                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2153                     bp->link_info.phy_type ==
2154                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2155                     bp->link_info.media_type ==
2156                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2157                         RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
2158                         return -EINVAL;
2159                 }
2160
2161                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2162                 /* If user wants a particular speed try that first. */
2163                 if (speed)
2164                         link_req.link_speed = speed;
2165                 else if (bp->link_info.force_link_speed)
2166                         link_req.link_speed = bp->link_info.force_link_speed;
2167                 else
2168                         link_req.link_speed = bp->link_info.auto_link_speed;
2169         }
2170         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2171         link_req.auto_pause = bp->link_info.auto_pause;
2172         link_req.force_pause = bp->link_info.force_pause;
2173
2174 port_phy_cfg:
2175         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2176         if (rc) {
2177                 RTE_LOG(ERR, PMD,
2178                         "Set link config failed with rc %d\n", rc);
2179         }
2180
2181 error:
2182         return rc;
2183 }
2184
2185 /* JIRA 22088 */
2186 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2187 {
2188         struct hwrm_func_qcfg_input req = {0};
2189         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2190         int rc = 0;
2191
2192         HWRM_PREP(req, FUNC_QCFG);
2193         req.fid = rte_cpu_to_le_16(0xffff);
2194
2195         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2196
2197         HWRM_CHECK_RESULT();
2198
2199         /* Hard Coded.. 0xfff VLAN ID mask */
2200         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2201
2202         switch (resp->port_partition_type) {
2203         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2204         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2205         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2206                 bp->port_partition_type = resp->port_partition_type;
2207                 break;
2208         default:
2209                 bp->port_partition_type = 0;
2210                 break;
2211         }
2212
2213         HWRM_UNLOCK();
2214
2215         return rc;
2216 }
2217
2218 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2219                                    struct hwrm_func_qcaps_output *qcaps)
2220 {
2221         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2222         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2223                sizeof(qcaps->mac_address));
2224         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2225         qcaps->max_rx_rings = fcfg->num_rx_rings;
2226         qcaps->max_tx_rings = fcfg->num_tx_rings;
2227         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2228         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2229         qcaps->max_vfs = 0;
2230         qcaps->first_vf_id = 0;
2231         qcaps->max_vnics = fcfg->num_vnics;
2232         qcaps->max_decap_records = 0;
2233         qcaps->max_encap_records = 0;
2234         qcaps->max_tx_wm_flows = 0;
2235         qcaps->max_tx_em_flows = 0;
2236         qcaps->max_rx_wm_flows = 0;
2237         qcaps->max_rx_em_flows = 0;
2238         qcaps->max_flow_id = 0;
2239         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2240         qcaps->max_sp_tx_rings = 0;
2241         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2242 }
2243
2244 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2245 {
2246         struct hwrm_func_cfg_input req = {0};
2247         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2248         int rc;
2249
2250         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2251                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2252                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2253                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2254                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2255                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2256                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2257                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2258                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2259                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2260         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2261         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2262         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2263                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2264         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2265         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2266         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2267         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2268         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2269         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2270         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2271         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2272         req.fid = rte_cpu_to_le_16(0xffff);
2273
2274         HWRM_PREP(req, FUNC_CFG);
2275
2276         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2277
2278         HWRM_CHECK_RESULT();
2279         HWRM_UNLOCK();
2280
2281         return rc;
2282 }
2283
2284 static void populate_vf_func_cfg_req(struct bnxt *bp,
2285                                      struct hwrm_func_cfg_input *req,
2286                                      int num_vfs)
2287 {
2288         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2289                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2290                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2291                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2292                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2293                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2294                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2295                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2296                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2297                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2298
2299         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2300                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2301         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2302                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2303         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2304                                                 (num_vfs + 1));
2305         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2306         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2307                                                (num_vfs + 1));
2308         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2309         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2310         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2311         /* TODO: For now, do not support VMDq/RFS on VFs. */
2312         req->num_vnics = rte_cpu_to_le_16(1);
2313         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2314                                                  (num_vfs + 1));
2315 }
2316
2317 static void add_random_mac_if_needed(struct bnxt *bp,
2318                                      struct hwrm_func_cfg_input *cfg_req,
2319                                      int vf)
2320 {
2321         struct ether_addr mac;
2322
2323         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2324                 return;
2325
2326         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2327                 cfg_req->enables |=
2328                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2329                 eth_random_addr(cfg_req->dflt_mac_addr);
2330                 bp->pf.vf_info[vf].random_mac = true;
2331         } else {
2332                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2333         }
2334 }
2335
2336 static void reserve_resources_from_vf(struct bnxt *bp,
2337                                       struct hwrm_func_cfg_input *cfg_req,
2338                                       int vf)
2339 {
2340         struct hwrm_func_qcaps_input req = {0};
2341         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2342         int rc;
2343
2344         /* Get the actual allocated values now */
2345         HWRM_PREP(req, FUNC_QCAPS);
2346         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2347         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2348
2349         if (rc) {
2350                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2351                 copy_func_cfg_to_qcaps(cfg_req, resp);
2352         } else if (resp->error_code) {
2353                 rc = rte_le_to_cpu_16(resp->error_code);
2354                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2355                 copy_func_cfg_to_qcaps(cfg_req, resp);
2356         }
2357
2358         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2359         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2360         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2361         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2362         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2363         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2364         /*
2365          * TODO: While not supporting VMDq with VFs, max_vnics is always
2366          * forced to 1 in this case
2367          */
2368         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2369         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2370
2371         HWRM_UNLOCK();
2372 }
2373
2374 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2375 {
2376         struct hwrm_func_qcfg_input req = {0};
2377         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2378         int rc;
2379
2380         /* Check for zero MAC address */
2381         HWRM_PREP(req, FUNC_QCFG);
2382         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2383         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2384         if (rc) {
2385                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2386                 return -1;
2387         } else if (resp->error_code) {
2388                 rc = rte_le_to_cpu_16(resp->error_code);
2389                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2390                 return -1;
2391         }
2392         rc = rte_le_to_cpu_16(resp->vlan);
2393
2394         HWRM_UNLOCK();
2395
2396         return rc;
2397 }
2398
2399 static int update_pf_resource_max(struct bnxt *bp)
2400 {
2401         struct hwrm_func_qcfg_input req = {0};
2402         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2403         int rc;
2404
2405         /* And copy the allocated numbers into the pf struct */
2406         HWRM_PREP(req, FUNC_QCFG);
2407         req.fid = rte_cpu_to_le_16(0xffff);
2408         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2409         HWRM_CHECK_RESULT();
2410
2411         /* Only TX ring value reflects actual allocation? TODO */
2412         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2413         bp->pf.evb_mode = resp->evb_mode;
2414
2415         HWRM_UNLOCK();
2416
2417         return rc;
2418 }
2419
2420 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2421 {
2422         int rc;
2423
2424         if (!BNXT_PF(bp)) {
2425                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2426                 return -1;
2427         }
2428
2429         rc = bnxt_hwrm_func_qcaps(bp);
2430         if (rc)
2431                 return rc;
2432
2433         bp->pf.func_cfg_flags &=
2434                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2435                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2436         bp->pf.func_cfg_flags |=
2437                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2438         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2439         return rc;
2440 }
2441
2442 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2443 {
2444         struct hwrm_func_cfg_input req = {0};
2445         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2446         int i;
2447         size_t sz;
2448         int rc = 0;
2449         size_t req_buf_sz;
2450
2451         if (!BNXT_PF(bp)) {
2452                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2453                 return -1;
2454         }
2455
2456         rc = bnxt_hwrm_func_qcaps(bp);
2457
2458         if (rc)
2459                 return rc;
2460
2461         bp->pf.active_vfs = num_vfs;
2462
2463         /*
2464          * First, configure the PF to only use one TX ring.  This ensures that
2465          * there are enough rings for all VFs.
2466          *
2467          * If we don't do this, when we call func_alloc() later, we will lock
2468          * extra rings to the PF that won't be available during func_cfg() of
2469          * the VFs.
2470          *
2471          * This has been fixed with firmware versions above 20.6.54
2472          */
2473         bp->pf.func_cfg_flags &=
2474                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2475                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2476         bp->pf.func_cfg_flags |=
2477                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2478         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2479         if (rc)
2480                 return rc;
2481
2482         /*
2483          * Now, create and register a buffer to hold forwarded VF requests
2484          */
2485         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2486         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2487                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2488         if (bp->pf.vf_req_buf == NULL) {
2489                 rc = -ENOMEM;
2490                 goto error_free;
2491         }
2492         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2493                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2494         for (i = 0; i < num_vfs; i++)
2495                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2496                                         (i * HWRM_MAX_REQ_LEN);
2497
2498         rc = bnxt_hwrm_func_buf_rgtr(bp);
2499         if (rc)
2500                 goto error_free;
2501
2502         populate_vf_func_cfg_req(bp, &req, num_vfs);
2503
2504         bp->pf.active_vfs = 0;
2505         for (i = 0; i < num_vfs; i++) {
2506                 add_random_mac_if_needed(bp, &req, i);
2507
2508                 HWRM_PREP(req, FUNC_CFG);
2509                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2510                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2511                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2512
2513                 /* Clear enable flag for next pass */
2514                 req.enables &= ~rte_cpu_to_le_32(
2515                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2516
2517                 if (rc || resp->error_code) {
2518                         RTE_LOG(ERR, PMD,
2519                                 "Failed to initizlie VF %d\n", i);
2520                         RTE_LOG(ERR, PMD,
2521                                 "Not all VFs available. (%d, %d)\n",
2522                                 rc, resp->error_code);
2523                         HWRM_UNLOCK();
2524                         break;
2525                 }
2526
2527                 HWRM_UNLOCK();
2528
2529                 reserve_resources_from_vf(bp, &req, i);
2530                 bp->pf.active_vfs++;
2531                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2532         }
2533
2534         /*
2535          * Now configure the PF to use "the rest" of the resources
2536          * We're using STD_TX_RING_MODE here though which will limit the TX
2537          * rings.  This will allow QoS to function properly.  Not setting this
2538          * will cause PF rings to break bandwidth settings.
2539          */
2540         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2541         if (rc)
2542                 goto error_free;
2543
2544         rc = update_pf_resource_max(bp);
2545         if (rc)
2546                 goto error_free;
2547
2548         return rc;
2549
2550 error_free:
2551         bnxt_hwrm_func_buf_unrgtr(bp);
2552         return rc;
2553 }
2554
2555 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2556 {
2557         struct hwrm_func_cfg_input req = {0};
2558         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2559         int rc;
2560
2561         HWRM_PREP(req, FUNC_CFG);
2562
2563         req.fid = rte_cpu_to_le_16(0xffff);
2564         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2565         req.evb_mode = bp->pf.evb_mode;
2566
2567         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2568         HWRM_CHECK_RESULT();
2569         HWRM_UNLOCK();
2570
2571         return rc;
2572 }
2573
2574 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2575                                 uint8_t tunnel_type)
2576 {
2577         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2578         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2579         int rc = 0;
2580
2581         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2582         req.tunnel_type = tunnel_type;
2583         req.tunnel_dst_port_val = port;
2584         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2585         HWRM_CHECK_RESULT();
2586
2587         switch (tunnel_type) {
2588         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2589                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2590                 bp->vxlan_port = port;
2591                 break;
2592         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2593                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2594                 bp->geneve_port = port;
2595                 break;
2596         default:
2597                 break;
2598         }
2599
2600         HWRM_UNLOCK();
2601
2602         return rc;
2603 }
2604
2605 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2606                                 uint8_t tunnel_type)
2607 {
2608         struct hwrm_tunnel_dst_port_free_input req = {0};
2609         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2610         int rc = 0;
2611
2612         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2613
2614         req.tunnel_type = tunnel_type;
2615         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2616         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2617
2618         HWRM_CHECK_RESULT();
2619         HWRM_UNLOCK();
2620
2621         return rc;
2622 }
2623
2624 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2625                                         uint32_t flags)
2626 {
2627         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2628         struct hwrm_func_cfg_input req = {0};
2629         int rc;
2630
2631         HWRM_PREP(req, FUNC_CFG);
2632
2633         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2634         req.flags = rte_cpu_to_le_32(flags);
2635         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2636
2637         HWRM_CHECK_RESULT();
2638         HWRM_UNLOCK();
2639
2640         return rc;
2641 }
2642
2643 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2644 {
2645         uint32_t *flag = flagp;
2646
2647         vnic->flags = *flag;
2648 }
2649
2650 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2651 {
2652         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2653 }
2654
2655 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2656 {
2657         int rc = 0;
2658         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2659         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2660
2661         HWRM_PREP(req, FUNC_BUF_RGTR);
2662
2663         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2664         req.req_buf_page_size = rte_cpu_to_le_16(
2665                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2666         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2667         req.req_buf_page_addr[0] =
2668                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2669         if (req.req_buf_page_addr[0] == 0) {
2670                 RTE_LOG(ERR, PMD,
2671                         "unable to map buffer address to physical memory\n");
2672                 return -ENOMEM;
2673         }
2674
2675         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2676
2677         HWRM_CHECK_RESULT();
2678         HWRM_UNLOCK();
2679
2680         return rc;
2681 }
2682
2683 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2684 {
2685         int rc = 0;
2686         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2687         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2688
2689         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2690
2691         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2692
2693         HWRM_CHECK_RESULT();
2694         HWRM_UNLOCK();
2695
2696         return rc;
2697 }
2698
2699 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2700 {
2701         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2702         struct hwrm_func_cfg_input req = {0};
2703         int rc;
2704
2705         HWRM_PREP(req, FUNC_CFG);
2706
2707         req.fid = rte_cpu_to_le_16(0xffff);
2708         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2709         req.enables = rte_cpu_to_le_32(
2710                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2711         req.async_event_cr = rte_cpu_to_le_16(
2712                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2713         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2714
2715         HWRM_CHECK_RESULT();
2716         HWRM_UNLOCK();
2717
2718         return rc;
2719 }
2720
2721 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2722 {
2723         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2724         struct hwrm_func_vf_cfg_input req = {0};
2725         int rc;
2726
2727         HWRM_PREP(req, FUNC_VF_CFG);
2728
2729         req.enables = rte_cpu_to_le_32(
2730                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2731         req.async_event_cr = rte_cpu_to_le_16(
2732                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2733         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2734
2735         HWRM_CHECK_RESULT();
2736         HWRM_UNLOCK();
2737
2738         return rc;
2739 }
2740
2741 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2742 {
2743         struct hwrm_func_cfg_input req = {0};
2744         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2745         uint16_t dflt_vlan, fid;
2746         uint32_t func_cfg_flags;
2747         int rc = 0;
2748
2749         HWRM_PREP(req, FUNC_CFG);
2750
2751         if (is_vf) {
2752                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2753                 fid = bp->pf.vf_info[vf].fid;
2754                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2755         } else {
2756                 fid = rte_cpu_to_le_16(0xffff);
2757                 func_cfg_flags = bp->pf.func_cfg_flags;
2758                 dflt_vlan = bp->vlan;
2759         }
2760
2761         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2762         req.fid = rte_cpu_to_le_16(fid);
2763         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2764         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2765
2766         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2767
2768         HWRM_CHECK_RESULT();
2769         HWRM_UNLOCK();
2770
2771         return rc;
2772 }
2773
2774 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2775                         uint16_t max_bw, uint16_t enables)
2776 {
2777         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2778         struct hwrm_func_cfg_input req = {0};
2779         int rc;
2780
2781         HWRM_PREP(req, FUNC_CFG);
2782
2783         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2784         req.enables |= rte_cpu_to_le_32(enables);
2785         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2786         req.max_bw = rte_cpu_to_le_32(max_bw);
2787         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2788
2789         HWRM_CHECK_RESULT();
2790         HWRM_UNLOCK();
2791
2792         return rc;
2793 }
2794
2795 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2796 {
2797         struct hwrm_func_cfg_input req = {0};
2798         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2799         int rc = 0;
2800
2801         HWRM_PREP(req, FUNC_CFG);
2802
2803         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2804         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2805         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2806         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2807
2808         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2809
2810         HWRM_CHECK_RESULT();
2811         HWRM_UNLOCK();
2812
2813         return rc;
2814 }
2815
2816 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2817                               void *encaped, size_t ec_size)
2818 {
2819         int rc = 0;
2820         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2821         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2822
2823         if (ec_size > sizeof(req.encap_request))
2824                 return -1;
2825
2826         HWRM_PREP(req, REJECT_FWD_RESP);
2827
2828         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2829         memcpy(req.encap_request, encaped, ec_size);
2830
2831         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2832
2833         HWRM_CHECK_RESULT();
2834         HWRM_UNLOCK();
2835
2836         return rc;
2837 }
2838
2839 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2840                                        struct ether_addr *mac)
2841 {
2842         struct hwrm_func_qcfg_input req = {0};
2843         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2844         int rc;
2845
2846         HWRM_PREP(req, FUNC_QCFG);
2847
2848         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2849         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2850
2851         HWRM_CHECK_RESULT();
2852
2853         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2854
2855         HWRM_UNLOCK();
2856
2857         return rc;
2858 }
2859
2860 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2861                             void *encaped, size_t ec_size)
2862 {
2863         int rc = 0;
2864         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2865         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2866
2867         if (ec_size > sizeof(req.encap_request))
2868                 return -1;
2869
2870         HWRM_PREP(req, EXEC_FWD_RESP);
2871
2872         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2873         memcpy(req.encap_request, encaped, ec_size);
2874
2875         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2876
2877         HWRM_CHECK_RESULT();
2878         HWRM_UNLOCK();
2879
2880         return rc;
2881 }
2882
2883 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2884                          struct rte_eth_stats *stats, uint8_t rx)
2885 {
2886         int rc = 0;
2887         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2888         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2889
2890         HWRM_PREP(req, STAT_CTX_QUERY);
2891
2892         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2893
2894         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2895
2896         HWRM_CHECK_RESULT();
2897
2898         if (rx) {
2899                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2900                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2901                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2902                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2903                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2904                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2905                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2906                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2907         } else {
2908                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2909                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2910                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2911                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2912                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2913                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2914                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2915         }
2916
2917
2918         HWRM_UNLOCK();
2919
2920         return rc;
2921 }
2922
2923 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2924 {
2925         struct hwrm_port_qstats_input req = {0};
2926         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2927         struct bnxt_pf_info *pf = &bp->pf;
2928         int rc;
2929
2930         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2931                 return 0;
2932
2933         HWRM_PREP(req, PORT_QSTATS);
2934
2935         req.port_id = rte_cpu_to_le_16(pf->port_id);
2936         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2937         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2938         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2939
2940         HWRM_CHECK_RESULT();
2941         HWRM_UNLOCK();
2942
2943         return rc;
2944 }
2945
2946 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2947 {
2948         struct hwrm_port_clr_stats_input req = {0};
2949         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2950         struct bnxt_pf_info *pf = &bp->pf;
2951         int rc;
2952
2953         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2954                 return 0;
2955
2956         HWRM_PREP(req, PORT_CLR_STATS);
2957
2958         req.port_id = rte_cpu_to_le_16(pf->port_id);
2959         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2960
2961         HWRM_CHECK_RESULT();
2962         HWRM_UNLOCK();
2963
2964         return rc;
2965 }
2966
2967 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2968 {
2969         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2970         struct hwrm_port_led_qcaps_input req = {0};
2971         int rc;
2972
2973         if (BNXT_VF(bp))
2974                 return 0;
2975
2976         HWRM_PREP(req, PORT_LED_QCAPS);
2977         req.port_id = bp->pf.port_id;
2978         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2979
2980         HWRM_CHECK_RESULT();
2981
2982         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2983                 unsigned int i;
2984
2985                 bp->num_leds = resp->num_leds;
2986                 memcpy(bp->leds, &resp->led0_id,
2987                         sizeof(bp->leds[0]) * bp->num_leds);
2988                 for (i = 0; i < bp->num_leds; i++) {
2989                         struct bnxt_led_info *led = &bp->leds[i];
2990
2991                         uint16_t caps = led->led_state_caps;
2992
2993                         if (!led->led_group_id ||
2994                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2995                                 bp->num_leds = 0;
2996                                 break;
2997                         }
2998                 }
2999         }
3000
3001         HWRM_UNLOCK();
3002
3003         return rc;
3004 }
3005
3006 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3007 {
3008         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3009         struct hwrm_port_led_cfg_input req = {0};
3010         struct bnxt_led_cfg *led_cfg;
3011         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3012         uint16_t duration = 0;
3013         int rc, i;
3014
3015         if (!bp->num_leds || BNXT_VF(bp))
3016                 return -EOPNOTSUPP;
3017
3018         HWRM_PREP(req, PORT_LED_CFG);
3019
3020         if (led_on) {
3021                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3022                 duration = rte_cpu_to_le_16(500);
3023         }
3024         req.port_id = bp->pf.port_id;
3025         req.num_leds = bp->num_leds;
3026         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3027         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3028                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3029                 led_cfg->led_id = bp->leds[i].led_id;
3030                 led_cfg->led_state = led_state;
3031                 led_cfg->led_blink_on = duration;
3032                 led_cfg->led_blink_off = duration;
3033                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3034         }
3035
3036         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3037
3038         HWRM_CHECK_RESULT();
3039         HWRM_UNLOCK();
3040
3041         return rc;
3042 }
3043
3044 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3045                                uint32_t *length)
3046 {
3047         int rc;
3048         struct hwrm_nvm_get_dir_info_input req = {0};
3049         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3050
3051         HWRM_PREP(req, NVM_GET_DIR_INFO);
3052
3053         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3054
3055         HWRM_CHECK_RESULT();
3056         HWRM_UNLOCK();
3057
3058         if (!rc) {
3059                 *entries = rte_le_to_cpu_32(resp->entries);
3060                 *length = rte_le_to_cpu_32(resp->entry_length);
3061         }
3062         return rc;
3063 }
3064
3065 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3066 {
3067         int rc;
3068         uint32_t dir_entries;
3069         uint32_t entry_length;
3070         uint8_t *buf;
3071         size_t buflen;
3072         rte_iova_t dma_handle;
3073         struct hwrm_nvm_get_dir_entries_input req = {0};
3074         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3075
3076         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3077         if (rc != 0)
3078                 return rc;
3079
3080         *data++ = dir_entries;
3081         *data++ = entry_length;
3082         len -= 2;
3083         memset(data, 0xff, len);
3084
3085         buflen = dir_entries * entry_length;
3086         buf = rte_malloc("nvm_dir", buflen, 0);
3087         rte_mem_lock_page(buf);
3088         if (buf == NULL)
3089                 return -ENOMEM;
3090         dma_handle = rte_mem_virt2iova(buf);
3091         if (dma_handle == 0) {
3092                 RTE_LOG(ERR, PMD,
3093                         "unable to map response address to physical memory\n");
3094                 return -ENOMEM;
3095         }
3096         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3097         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3098         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3099
3100         HWRM_CHECK_RESULT();
3101         HWRM_UNLOCK();
3102
3103         if (rc == 0)
3104                 memcpy(data, buf, len > buflen ? buflen : len);
3105
3106         rte_free(buf);
3107
3108         return rc;
3109 }
3110
3111 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3112                              uint32_t offset, uint32_t length,
3113                              uint8_t *data)
3114 {
3115         int rc;
3116         uint8_t *buf;
3117         rte_iova_t dma_handle;
3118         struct hwrm_nvm_read_input req = {0};
3119         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3120
3121         buf = rte_malloc("nvm_item", length, 0);
3122         rte_mem_lock_page(buf);
3123         if (!buf)
3124                 return -ENOMEM;
3125
3126         dma_handle = rte_mem_virt2iova(buf);
3127         if (dma_handle == 0) {
3128                 RTE_LOG(ERR, PMD,
3129                         "unable to map response address to physical memory\n");
3130                 return -ENOMEM;
3131         }
3132         HWRM_PREP(req, NVM_READ);
3133         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3134         req.dir_idx = rte_cpu_to_le_16(index);
3135         req.offset = rte_cpu_to_le_32(offset);
3136         req.len = rte_cpu_to_le_32(length);
3137         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3138         HWRM_CHECK_RESULT();
3139         HWRM_UNLOCK();
3140         if (rc == 0)
3141                 memcpy(data, buf, length);
3142
3143         rte_free(buf);
3144         return rc;
3145 }
3146
3147 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3148 {
3149         int rc;
3150         struct hwrm_nvm_erase_dir_entry_input req = {0};
3151         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3152
3153         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3154         req.dir_idx = rte_cpu_to_le_16(index);
3155         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3156         HWRM_CHECK_RESULT();
3157         HWRM_UNLOCK();
3158
3159         return rc;
3160 }
3161
3162
3163 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3164                           uint16_t dir_ordinal, uint16_t dir_ext,
3165                           uint16_t dir_attr, const uint8_t *data,
3166                           size_t data_len)
3167 {
3168         int rc;
3169         struct hwrm_nvm_write_input req = {0};
3170         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3171         rte_iova_t dma_handle;
3172         uint8_t *buf;
3173
3174         HWRM_PREP(req, NVM_WRITE);
3175
3176         req.dir_type = rte_cpu_to_le_16(dir_type);
3177         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3178         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3179         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3180         req.dir_data_length = rte_cpu_to_le_32(data_len);
3181
3182         buf = rte_malloc("nvm_write", data_len, 0);
3183         rte_mem_lock_page(buf);
3184         if (!buf)
3185                 return -ENOMEM;
3186
3187         dma_handle = rte_mem_virt2iova(buf);
3188         if (dma_handle == 0) {
3189                 RTE_LOG(ERR, PMD,
3190                         "unable to map response address to physical memory\n");
3191                 return -ENOMEM;
3192         }
3193         memcpy(buf, data, data_len);
3194         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3195
3196         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3197
3198         HWRM_CHECK_RESULT();
3199         HWRM_UNLOCK();
3200
3201         rte_free(buf);
3202         return rc;
3203 }
3204
3205 static void
3206 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3207 {
3208         uint32_t *count = cbdata;
3209
3210         *count = *count + 1;
3211 }
3212
3213 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3214                                      struct bnxt_vnic_info *vnic __rte_unused)
3215 {
3216         return 0;
3217 }
3218
3219 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3220 {
3221         uint32_t count = 0;
3222
3223         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3224             &count, bnxt_vnic_count_hwrm_stub);
3225
3226         return count;
3227 }
3228
3229 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3230                                         uint16_t *vnic_ids)
3231 {
3232         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3233         struct hwrm_func_vf_vnic_ids_query_output *resp =
3234                                                 bp->hwrm_cmd_resp_addr;
3235         int rc;
3236
3237         /* First query all VNIC ids */
3238         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3239
3240         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3241         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3242         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3243
3244         if (req.vnic_id_tbl_addr == 0) {
3245                 HWRM_UNLOCK();
3246                 RTE_LOG(ERR, PMD,
3247                 "unable to map VNIC ID table address to physical memory\n");
3248                 return -ENOMEM;
3249         }
3250         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3251         if (rc) {
3252                 HWRM_UNLOCK();
3253                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3254                 return -1;
3255         } else if (resp->error_code) {
3256                 rc = rte_le_to_cpu_16(resp->error_code);
3257                 HWRM_UNLOCK();
3258                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
3259                 return -1;
3260         }
3261         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3262
3263         HWRM_UNLOCK();
3264
3265         return rc;
3266 }
3267
3268 /*
3269  * This function queries the VNIC IDs  for a specified VF. It then calls
3270  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3271  * Then it calls the hwrm_cb function to program this new vnic configuration.
3272  */
3273 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3274         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3275         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3276 {
3277         struct bnxt_vnic_info vnic;
3278         int rc = 0;
3279         int i, num_vnic_ids;
3280         uint16_t *vnic_ids;
3281         size_t vnic_id_sz;
3282         size_t sz;
3283
3284         /* First query all VNIC ids */
3285         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3286         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3287                         RTE_CACHE_LINE_SIZE);
3288         if (vnic_ids == NULL) {
3289                 rc = -ENOMEM;
3290                 return rc;
3291         }
3292         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3293                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3294
3295         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3296
3297         if (num_vnic_ids < 0)
3298                 return num_vnic_ids;
3299
3300         /* Retrieve VNIC, update bd_stall then update */
3301
3302         for (i = 0; i < num_vnic_ids; i++) {
3303                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3304                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3305                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3306                 if (rc)
3307                         break;
3308                 if (vnic.mru <= 4)      /* Indicates unallocated */
3309                         continue;
3310
3311                 vnic_cb(&vnic, cbdata);
3312
3313                 rc = hwrm_cb(bp, &vnic);
3314                 if (rc)
3315                         break;
3316         }
3317
3318         rte_free(vnic_ids);
3319
3320         return rc;
3321 }
3322
3323 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3324                                               bool on)
3325 {
3326         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3327         struct hwrm_func_cfg_input req = {0};
3328         int rc;
3329
3330         HWRM_PREP(req, FUNC_CFG);
3331
3332         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3333         req.enables |= rte_cpu_to_le_32(
3334                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3335         req.vlan_antispoof_mode = on ?
3336                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3337                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3338         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3339
3340         HWRM_CHECK_RESULT();
3341         HWRM_UNLOCK();
3342
3343         return rc;
3344 }
3345
3346 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3347 {
3348         struct bnxt_vnic_info vnic;
3349         uint16_t *vnic_ids;
3350         size_t vnic_id_sz;
3351         int num_vnic_ids, i;
3352         size_t sz;
3353         int rc;
3354
3355         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3356         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3357                         RTE_CACHE_LINE_SIZE);
3358         if (vnic_ids == NULL) {
3359                 rc = -ENOMEM;
3360                 return rc;
3361         }
3362
3363         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3364                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3365
3366         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3367         if (rc <= 0)
3368                 goto exit;
3369         num_vnic_ids = rc;
3370
3371         /*
3372          * Loop through to find the default VNIC ID.
3373          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3374          * by sending the hwrm_func_qcfg command to the firmware.
3375          */
3376         for (i = 0; i < num_vnic_ids; i++) {
3377                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3378                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3379                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3380                                         bp->pf.first_vf_id + vf);
3381                 if (rc)
3382                         goto exit;
3383                 if (vnic.func_default) {
3384                         rte_free(vnic_ids);
3385                         return vnic.fw_vnic_id;
3386                 }
3387         }
3388         /* Could not find a default VNIC. */
3389         RTE_LOG(ERR, PMD, "No default VNIC\n");
3390 exit:
3391         rte_free(vnic_ids);
3392         return -1;
3393 }
3394
3395 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3396                          uint16_t dst_id,
3397                          struct bnxt_filter_info *filter)
3398 {
3399         int rc = 0;
3400         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3401         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3402         uint32_t enables = 0;
3403
3404         if (filter->fw_em_filter_id != UINT64_MAX)
3405                 bnxt_hwrm_clear_em_filter(bp, filter);
3406
3407         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3408
3409         req.flags = rte_cpu_to_le_32(filter->flags);
3410
3411         enables = filter->enables |
3412               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3413         req.dst_id = rte_cpu_to_le_16(dst_id);
3414
3415         if (filter->ip_addr_type) {
3416                 req.ip_addr_type = filter->ip_addr_type;
3417                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3418         }
3419         if (enables &
3420             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3421                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3422         if (enables &
3423             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3424                 memcpy(req.src_macaddr, filter->src_macaddr,
3425                        ETHER_ADDR_LEN);
3426         if (enables &
3427             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3428                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3429                        ETHER_ADDR_LEN);
3430         if (enables &
3431             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3432                 req.ovlan_vid = filter->l2_ovlan;
3433         if (enables &
3434             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3435                 req.ivlan_vid = filter->l2_ivlan;
3436         if (enables &
3437             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3438                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3439         if (enables &
3440             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3441                 req.ip_protocol = filter->ip_protocol;
3442         if (enables &
3443             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3444                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3445         if (enables &
3446             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3447                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3448         if (enables &
3449             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3450                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3451         if (enables &
3452             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3453                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3454         if (enables &
3455             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3456                 req.mirror_vnic_id = filter->mirror_vnic_id;
3457
3458         req.enables = rte_cpu_to_le_32(enables);
3459
3460         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3461
3462         HWRM_CHECK_RESULT();
3463
3464         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3465         HWRM_UNLOCK();
3466
3467         return rc;
3468 }
3469
3470 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3471 {
3472         int rc = 0;
3473         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3474         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3475
3476         if (filter->fw_em_filter_id == UINT64_MAX)
3477                 return 0;
3478
3479         RTE_LOG(ERR, PMD, "Clear EM filter\n");
3480         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3481
3482         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3483
3484         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3485
3486         HWRM_CHECK_RESULT();
3487         HWRM_UNLOCK();
3488
3489         filter->fw_em_filter_id = -1;
3490         filter->fw_l2_filter_id = -1;
3491
3492         return 0;
3493 }
3494
3495 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3496                          uint16_t dst_id,
3497                          struct bnxt_filter_info *filter)
3498 {
3499         int rc = 0;
3500         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3501         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3502                                                 bp->hwrm_cmd_resp_addr;
3503         uint32_t enables = 0;
3504
3505         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3506                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3507
3508         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3509
3510         req.flags = rte_cpu_to_le_32(filter->flags);
3511
3512         enables = filter->enables |
3513               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3514         req.dst_id = rte_cpu_to_le_16(dst_id);
3515
3516
3517         if (filter->ip_addr_type) {
3518                 req.ip_addr_type = filter->ip_addr_type;
3519                 enables |=
3520                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3521         }
3522         if (enables &
3523             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3524                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3525         if (enables &
3526             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3527                 memcpy(req.src_macaddr, filter->src_macaddr,
3528                        ETHER_ADDR_LEN);
3529         //if (enables &
3530             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3531                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3532                        //ETHER_ADDR_LEN);
3533         if (enables &
3534             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3535                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3536         if (enables &
3537             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3538                 req.ip_protocol = filter->ip_protocol;
3539         if (enables &
3540             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3541                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3542         if (enables &
3543             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3544                 req.src_ipaddr_mask[0] =
3545                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3546         if (enables &
3547             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3548                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3549         if (enables &
3550             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3551                 req.dst_ipaddr_mask[0] =
3552                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3553         if (enables &
3554             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3555                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3556         if (enables &
3557             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3558                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3559         if (enables &
3560             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3561                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3562         if (enables &
3563             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3564                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3565         if (enables &
3566             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3567                 req.mirror_vnic_id = filter->mirror_vnic_id;
3568
3569         req.enables = rte_cpu_to_le_32(enables);
3570
3571         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3572
3573         HWRM_CHECK_RESULT();
3574
3575         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3576         HWRM_UNLOCK();
3577
3578         return rc;
3579 }
3580
3581 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3582                                 struct bnxt_filter_info *filter)
3583 {
3584         int rc = 0;
3585         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3586         struct hwrm_cfa_ntuple_filter_free_output *resp =
3587                                                 bp->hwrm_cmd_resp_addr;
3588
3589         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3590                 return 0;
3591
3592         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3593
3594         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3595
3596         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3597
3598         HWRM_CHECK_RESULT();
3599         HWRM_UNLOCK();
3600
3601         filter->fw_ntuple_filter_id = -1;
3602
3603         return 0;
3604 }