New upstream version 16.11.8
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #define HWRM_CMD_TIMEOUT                2000
54
55 /*
56  * HWRM Functions (sent to HWRM)
57  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59  * command was failed by the ChiMP.
60  */
61
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
63                                         uint32_t msg_len)
64 {
65         unsigned int i;
66         struct input *req = msg;
67         struct output *resp = bp->hwrm_cmd_resp_addr;
68         uint32_t *data = msg;
69         uint8_t *bar;
70         uint8_t *valid;
71
72         /* Write request msg to hwrm channel */
73         for (i = 0; i < msg_len; i += 4) {
74                 bar = (uint8_t *)bp->bar0 + i;
75                 *(volatile uint32_t *)bar = *data;
76                 data++;
77         }
78
79         /* Zero the rest of the request space */
80         for (; i < bp->max_req_len; i += 4) {
81                 bar = (uint8_t *)bp->bar0 + i;
82                 *(volatile uint32_t *)bar = 0;
83         }
84
85         /* Ring channel doorbell */
86         bar = (uint8_t *)bp->bar0 + 0x100;
87         *(volatile uint32_t *)bar = 1;
88
89         /* Poll for the valid bit */
90         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91                 /* Sanity check on the resp->resp_len */
92                 rte_rmb();
93                 if (resp->resp_len && resp->resp_len <=
94                                 bp->max_resp_len) {
95                         /* Last byte of resp contains the valid key */
96                         valid = (uint8_t *)resp + resp->resp_len - 1;
97                         if (*valid == HWRM_RESP_VALID_KEY)
98                                 break;
99                 }
100                 rte_delay_us(600);
101         }
102
103         if (i >= HWRM_CMD_TIMEOUT) {
104                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
105                         req->req_type);
106                 goto err_ret;
107         }
108         return 0;
109
110 err_ret:
111         return -1;
112 }
113
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
115 {
116         int rc;
117
118         rte_spinlock_lock(&bp->hwrm_lock);
119         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120         rte_spinlock_unlock(&bp->hwrm_lock);
121         return rc;
122 }
123
124 #define HWRM_PREP(req, type, cr, resp) \
125         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127         req.cmpl_ring = rte_cpu_to_le_16(cr); \
128         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129         req.target_id = rte_cpu_to_le_16(0xffff); \
130         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
131
132 #define HWRM_CHECK_RESULT \
133         { \
134                 if (rc) { \
135                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
136                                 __func__, rc); \
137                         if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
138                                 rc = -EACCES; \
139                         else if (rc > 0) \
140                                 rc = -EINVAL; \
141                         return rc; \
142                 } \
143                 if (resp->error_code) { \
144                         rc = rte_le_to_cpu_16(resp->error_code); \
145                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
146                         if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
147                                 rc = -EACCES; \
148                         else if (rc > 0) \
149                                 rc = -EINVAL; \
150                         return rc; \
151                 } \
152         }
153
154 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
155 {
156         int rc = 0;
157         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
158         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
159
160         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
161         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
162         req.mask = 0;
163
164         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
165
166         HWRM_CHECK_RESULT;
167
168         return rc;
169 }
170
171 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
172 {
173         int rc = 0;
174         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
175         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
176         uint32_t mask = 0;
177
178         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
179         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
180
181         /* FIXME add multicast flag, when multicast adding options is supported
182          * by ethtool.
183          */
184         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
185                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
186         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
187                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
188         req.mask = rte_cpu_to_le_32(mask);
189
190         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
191
192         HWRM_CHECK_RESULT;
193
194         return rc;
195 }
196
197 int bnxt_hwrm_clear_filter(struct bnxt *bp,
198                            struct bnxt_filter_info *filter)
199 {
200         int rc = 0;
201         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
202         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
203
204         if (filter->fw_l2_filter_id == UINT64_MAX)
205                 return 0;
206
207         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
208
209         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
210
211         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
212
213         HWRM_CHECK_RESULT;
214
215         filter->fw_l2_filter_id = -1;
216
217         return 0;
218 }
219
220 int bnxt_hwrm_set_filter(struct bnxt *bp,
221                          struct bnxt_vnic_info *vnic,
222                          struct bnxt_filter_info *filter)
223 {
224         int rc = 0;
225         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
226         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
227         uint32_t enables = 0;
228
229         if (filter->fw_l2_filter_id != UINT64_MAX)
230                 bnxt_hwrm_clear_filter(bp, filter);
231
232         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
233
234         req.flags = rte_cpu_to_le_32(filter->flags);
235
236         enables = filter->enables |
237               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
238         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
239
240         if (enables &
241             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
242                 memcpy(req.l2_addr, filter->l2_addr,
243                        ETHER_ADDR_LEN);
244         if (enables &
245             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
246                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
247                        ETHER_ADDR_LEN);
248         if (enables &
249             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
250                 req.l2_ovlan = filter->l2_ovlan;
251         if (enables &
252             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
253                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
254
255         req.enables = rte_cpu_to_le_32(enables);
256
257         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
258
259         HWRM_CHECK_RESULT;
260
261         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
262
263         return rc;
264 }
265
266 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
267 {
268         int rc;
269         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
270         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
271
272         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
273
274         memcpy(req.encap_request, fwd_cmd,
275                sizeof(req.encap_request));
276
277         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
278
279         HWRM_CHECK_RESULT;
280
281         return rc;
282 }
283
284 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
285 {
286         int rc = 0;
287         struct hwrm_func_qcaps_input req = {.req_type = 0 };
288         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
289
290         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
291
292         req.fid = rte_cpu_to_le_16(0xffff);
293
294         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
295
296         HWRM_CHECK_RESULT;
297
298         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
299         if (BNXT_PF(bp)) {
300                 struct bnxt_pf_info *pf = &bp->pf;
301
302                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
303                 pf->port_id = resp->port_id;
304                 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
305                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
306                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
307                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
308                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
309                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
310                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
311                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
312                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
313         } else {
314                 struct bnxt_vf_info *vf = &bp->vf;
315
316                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
317                 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
318                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
319                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
320                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
321                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
322                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
323                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
324         }
325
326         return rc;
327 }
328
329 int bnxt_hwrm_func_reset(struct bnxt *bp)
330 {
331         int rc = 0;
332         struct hwrm_func_reset_input req = {.req_type = 0 };
333         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
334
335         HWRM_PREP(req, FUNC_RESET, -1, resp);
336
337         req.enables = rte_cpu_to_le_32(0);
338
339         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
340
341         HWRM_CHECK_RESULT;
342
343         return rc;
344 }
345
346 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
347                                    uint32_t *vf_req_fwd)
348 {
349         int rc;
350         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
351         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
352
353         if (bp->flags & BNXT_FLAG_REGISTERED)
354                 return 0;
355
356         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
357         req.flags = flags;
358         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
359                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
360         req.ver_maj = RTE_VER_YEAR;
361         req.ver_min = RTE_VER_MONTH;
362         req.ver_upd = RTE_VER_MINOR;
363
364         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
365
366         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
367
368         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
369
370         HWRM_CHECK_RESULT;
371
372         bp->flags |= BNXT_FLAG_REGISTERED;
373
374         return rc;
375 }
376
377 int bnxt_hwrm_ver_get(struct bnxt *bp)
378 {
379         int rc = 0;
380         struct hwrm_ver_get_input req = {.req_type = 0 };
381         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
382         uint32_t my_version;
383         uint32_t fw_version;
384         uint16_t max_resp_len;
385         char type[RTE_MEMZONE_NAMESIZE];
386
387         HWRM_PREP(req, VER_GET, -1, resp);
388
389         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
390         req.hwrm_intf_min = HWRM_VERSION_MINOR;
391         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
392
393         /*
394          * Hold the lock since we may be adjusting the response pointers.
395          */
396         rte_spinlock_lock(&bp->hwrm_lock);
397         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
398
399         HWRM_CHECK_RESULT;
400
401         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
402                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
403                 resp->hwrm_intf_upd,
404                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
405         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
406                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
407
408         my_version = HWRM_VERSION_MAJOR << 16;
409         my_version |= HWRM_VERSION_MINOR << 8;
410         my_version |= HWRM_VERSION_UPDATE;
411
412         fw_version = resp->hwrm_intf_maj << 16;
413         fw_version |= resp->hwrm_intf_min << 8;
414         fw_version |= resp->hwrm_intf_upd;
415
416         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
417                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
418                 rc = -EINVAL;
419                 goto error;
420         }
421
422         if (my_version != fw_version) {
423                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
424                 if (my_version < fw_version) {
425                         RTE_LOG(INFO, PMD,
426                                 "Firmware API version is newer than driver.\n");
427                         RTE_LOG(INFO, PMD,
428                                 "The driver may be missing features.\n");
429                 } else {
430                         RTE_LOG(INFO, PMD,
431                                 "Firmware API version is older than driver.\n");
432                         RTE_LOG(INFO, PMD,
433                                 "Not all driver features may be functional.\n");
434                 }
435         }
436
437         if (bp->max_req_len > resp->max_req_win_len) {
438                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
439                 rc = -EINVAL;
440         }
441         bp->max_req_len = resp->max_req_win_len;
442         max_resp_len = resp->max_resp_len;
443         if (bp->max_resp_len != max_resp_len) {
444                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
445                         bp->pdev->addr.domain, bp->pdev->addr.bus,
446                         bp->pdev->addr.devid, bp->pdev->addr.function);
447
448                 rte_free(bp->hwrm_cmd_resp_addr);
449
450                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
451                 if (bp->hwrm_cmd_resp_addr == NULL) {
452                         rc = -ENOMEM;
453                         goto error;
454                 }
455                 bp->hwrm_cmd_resp_dma_addr =
456                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
457                 bp->max_resp_len = max_resp_len;
458         }
459
460 error:
461         rte_spinlock_unlock(&bp->hwrm_lock);
462         return rc;
463 }
464
465 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
466 {
467         int rc;
468         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
469         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
470
471         if (!(bp->flags & BNXT_FLAG_REGISTERED))
472                 return 0;
473
474         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
475         req.flags = flags;
476
477         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
478
479         HWRM_CHECK_RESULT;
480
481         bp->flags &= ~BNXT_FLAG_REGISTERED;
482
483         return rc;
484 }
485
486 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
487 {
488         int rc = 0;
489         struct hwrm_port_phy_cfg_input req = {0};
490         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
491         uint32_t enables = 0;
492
493         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
494
495         if (conf->link_up) {
496                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
497                 if (bp->link_info.auto_mode && conf->link_speed) {
498                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
499                         RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
500                 }
501
502                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
503                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
504                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
505                 /*
506                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
507                  * any auto mode, even "none".
508                  */
509                 if (!conf->link_speed) {
510                         /* No speeds specified. Enable AutoNeg - all speeds */
511                         req.auto_mode =
512                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
513                 }
514                 /* AutoNeg - Advertise speeds specified. */
515                 if (conf->auto_link_speed_mask &&
516                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
517                         req.auto_mode =
518                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
519                         req.auto_link_speed_mask =
520                                 conf->auto_link_speed_mask;
521                         enables |=
522                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
523                 }
524
525                 req.auto_duplex = conf->duplex;
526                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
527                 req.auto_pause = conf->auto_pause;
528                 req.force_pause = conf->force_pause;
529                 /* Set force_pause if there is no auto or if there is a force */
530                 if (req.auto_pause && !req.force_pause)
531                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
532                 else
533                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
534
535                 req.enables = rte_cpu_to_le_32(enables);
536         } else {
537                 req.flags =
538                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
539                 RTE_LOG(INFO, PMD, "Force Link Down\n");
540         }
541
542         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
543
544         HWRM_CHECK_RESULT;
545
546         return rc;
547 }
548
549 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
550                                    struct bnxt_link_info *link_info)
551 {
552         int rc = 0;
553         struct hwrm_port_phy_qcfg_input req = {0};
554         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
555
556         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
557
558         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
559
560         HWRM_CHECK_RESULT;
561
562         link_info->phy_link_status = resp->link;
563         link_info->link_up =
564                 (link_info->phy_link_status ==
565                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
566         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
567         link_info->duplex = resp->duplex;
568         link_info->pause = resp->pause;
569         link_info->auto_pause = resp->auto_pause;
570         link_info->force_pause = resp->force_pause;
571         link_info->auto_mode = resp->auto_mode;
572         link_info->phy_type = resp->phy_type;
573         link_info->media_type = resp->media_type;
574
575         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
576         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
577         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
578         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
579         link_info->phy_ver[0] = resp->phy_maj;
580         link_info->phy_ver[1] = resp->phy_min;
581         link_info->phy_ver[2] = resp->phy_bld;
582
583         return rc;
584 }
585
586 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
587 {
588         int rc = 0;
589         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
590         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
591
592         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
593
594         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
595
596         HWRM_CHECK_RESULT;
597
598 #define GET_QUEUE_INFO(x) \
599         bp->cos_queue[x].id = resp->queue_id##x; \
600         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
601
602         GET_QUEUE_INFO(0);
603         GET_QUEUE_INFO(1);
604         GET_QUEUE_INFO(2);
605         GET_QUEUE_INFO(3);
606         GET_QUEUE_INFO(4);
607         GET_QUEUE_INFO(5);
608         GET_QUEUE_INFO(6);
609         GET_QUEUE_INFO(7);
610
611         return rc;
612 }
613
614 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
615                          struct bnxt_ring *ring,
616                          uint32_t ring_type, uint32_t map_index,
617                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
618 {
619         int rc = 0;
620         struct hwrm_ring_alloc_input req = {.req_type = 0 };
621         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
622
623         HWRM_PREP(req, RING_ALLOC, -1, resp);
624
625         req.enables = rte_cpu_to_le_32(0);
626
627         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
628         req.fbo = rte_cpu_to_le_32(0);
629         /* Association of ring index with doorbell index */
630         req.logical_id = rte_cpu_to_le_16(map_index);
631
632         switch (ring_type) {
633         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
634                 req.queue_id = bp->cos_queue[0].id;
635                 /* FALLTHROUGH */
636         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
637                 req.ring_type = ring_type;
638                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
639                 req.length = rte_cpu_to_le_32(ring->ring_size);
640                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
641                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
642                         req.enables =
643                         rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
644                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
645                 break;
646         case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
647                 req.ring_type = ring_type;
648                 /*
649                  * TODO: Some HWRM versions crash with
650                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
651                  */
652                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
653                 req.length = rte_cpu_to_le_32(ring->ring_size);
654                 break;
655         default:
656                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
657                         ring_type);
658                 return -1;
659         }
660
661         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
662
663         if (rc || resp->error_code) {
664                 if (rc == 0 && resp->error_code)
665                         rc = rte_le_to_cpu_16(resp->error_code);
666                 switch (ring_type) {
667                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
668                         RTE_LOG(ERR, PMD,
669                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
670                         return rc;
671                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
672                         RTE_LOG(ERR, PMD,
673                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
674                         return rc;
675                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
676                         RTE_LOG(ERR, PMD,
677                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
678                         return rc;
679                 default:
680                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
681                         return rc;
682                 }
683         }
684
685         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
686         return rc;
687 }
688
689 int bnxt_hwrm_ring_free(struct bnxt *bp,
690                         struct bnxt_ring *ring, uint32_t ring_type)
691 {
692         int rc;
693         struct hwrm_ring_free_input req = {.req_type = 0 };
694         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
695
696         HWRM_PREP(req, RING_FREE, -1, resp);
697
698         req.ring_type = ring_type;
699         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
700
701         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
702
703         if (rc || resp->error_code) {
704                 if (rc == 0 && resp->error_code)
705                         rc = rte_le_to_cpu_16(resp->error_code);
706
707                 switch (ring_type) {
708                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
709                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
710                                 rc);
711                         return rc;
712                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
713                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
714                                 rc);
715                         return rc;
716                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
717                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
718                                 rc);
719                         return rc;
720                 default:
721                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
722                         return rc;
723                 }
724         }
725         return 0;
726 }
727
728 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
729 {
730         int rc = 0;
731         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
732         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
733
734         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
735
736         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
737         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
738         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
739         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
740
741         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
742
743         HWRM_CHECK_RESULT;
744
745         bp->grp_info[idx].fw_grp_id =
746             rte_le_to_cpu_16(resp->ring_group_id);
747
748         return rc;
749 }
750
751 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
752 {
753         int rc;
754         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
755         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
756
757         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
758
759         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
760
761         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
762
763         HWRM_CHECK_RESULT;
764
765         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
766         return rc;
767 }
768
769 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
770 {
771         int rc = 0;
772         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
773         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
774
775         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
776
777         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
778                 return rc;
779
780         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
781         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
782
783         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
784
785         HWRM_CHECK_RESULT;
786
787         return rc;
788 }
789
790 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
791                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
792 {
793         int rc;
794         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
795         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
796
797         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
798
799         req.update_period_ms = rte_cpu_to_le_32(1000);
800
801         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
802         req.stats_dma_addr =
803             rte_cpu_to_le_64(cpr->hw_stats_map);
804
805         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
806
807         HWRM_CHECK_RESULT;
808
809         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
810         //Tx rings don't need grp_info entry. It is a Rx only attribute.
811         if (idx)
812                 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
813
814         return rc;
815 }
816
817 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
818                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
819 {
820         int rc;
821         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
822         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
823
824         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
825
826         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
827         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
828
829         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
830
831         HWRM_CHECK_RESULT;
832
833         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
834         //Tx rings don't have a grp_info entry. It is a Rx only attribute.
835         if (idx)
836                 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
837
838         return rc;
839 }
840
841 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
842 {
843         int rc = 0, i, j;
844         struct hwrm_vnic_alloc_input req = { 0 };
845         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
846
847         /* map ring groups to this vnic */
848         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++) {
849                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
850                         RTE_LOG(ERR, PMD,
851                                 "Not enough ring groups avail:%x req:%x\n", j,
852                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
853                         break;
854                 }
855                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
856         }
857
858         vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
859         vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
860
861         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
862
863         if (vnic->func_default)
864                 req.flags =
865                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
866         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
867
868         HWRM_CHECK_RESULT;
869
870         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
871         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
872         return rc;
873 }
874
875 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
876 {
877         int rc = 0;
878         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
879         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
880
881         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
882                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
883                 return rc;
884         }
885
886         HWRM_PREP(req, VNIC_CFG, -1, resp);
887
888         /* Only RSS support for now TBD: COS & LB */
889         req.enables =
890             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
891                              HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
892                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
893         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
894         req.dflt_ring_grp =
895                 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
896         req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
897         req.cos_rule = rte_cpu_to_le_16(0xffff);
898         req.lb_rule = rte_cpu_to_le_16(0xffff);
899         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
900                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
901         if (vnic->func_default)
902                 req.flags = 1;
903         if (vnic->vlan_strip)
904                 req.flags |=
905                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
906
907         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
908
909         HWRM_CHECK_RESULT;
910
911         return rc;
912 }
913
914 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
915 {
916         int rc = 0;
917         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
918         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
919                                                 bp->hwrm_cmd_resp_addr;
920
921         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
922
923         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
924
925         HWRM_CHECK_RESULT;
926
927         vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
928         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
929
930         return rc;
931 }
932
933 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
934 {
935         int rc = 0;
936         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
937         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
938                                                 bp->hwrm_cmd_resp_addr;
939
940         if (vnic->fw_rss_cos_lb_ctx == 0xffff) {
941                 RTE_LOG(DEBUG, PMD,
942                         "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
943                 return rc;
944         }
945
946         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
947
948         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
949
950         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
951
952         HWRM_CHECK_RESULT;
953
954         vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
955
956         return rc;
957 }
958
959 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
960 {
961         int rc = 0;
962         struct hwrm_vnic_free_input req = {.req_type = 0 };
963         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
964
965         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
966                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
967                 return rc;
968         }
969
970         HWRM_PREP(req, VNIC_FREE, -1, resp);
971
972         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
973
974         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
975
976         HWRM_CHECK_RESULT;
977
978         vnic->fw_vnic_id = INVALID_HW_RING_ID;
979         return rc;
980 }
981
982 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
983                            struct bnxt_vnic_info *vnic)
984 {
985         int rc = 0;
986         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
987         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
988
989         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
990
991         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
992
993         req.ring_grp_tbl_addr =
994             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
995         req.hash_key_tbl_addr =
996             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
997         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
998
999         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1000
1001         HWRM_CHECK_RESULT;
1002
1003         return rc;
1004 }
1005
1006 /*
1007  * HWRM utility functions
1008  */
1009
1010 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1011 {
1012         unsigned int i;
1013         int rc = 0;
1014
1015         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1016                 struct bnxt_tx_queue *txq;
1017                 struct bnxt_rx_queue *rxq;
1018                 struct bnxt_cp_ring_info *cpr;
1019
1020                 if (i >= bp->rx_cp_nr_rings) {
1021                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1022                         cpr = txq->cp_ring;
1023                 } else {
1024                         rxq = bp->rx_queues[i];
1025                         cpr = rxq->cp_ring;
1026                 }
1027
1028                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1029                 if (rc)
1030                         return rc;
1031         }
1032         return 0;
1033 }
1034
1035 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1036 {
1037         int rc;
1038         unsigned int i;
1039         struct bnxt_cp_ring_info *cpr;
1040
1041         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1042                 unsigned int idx = i + 1;
1043
1044                 if (i >= bp->rx_cp_nr_rings) {
1045                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1046                         //Tx rings don't have a grp_info entry.
1047                         idx = 0;
1048                 } else {
1049                         cpr = bp->rx_queues[i]->cp_ring;
1050                 }
1051                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1052                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1053                         if (rc)
1054                                 return rc;
1055                 }
1056         }
1057         return 0;
1058 }
1059
1060 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1061 {
1062         unsigned int i;
1063         int rc = 0;
1064
1065         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1066                 struct bnxt_tx_queue *txq;
1067                 struct bnxt_rx_queue *rxq;
1068                 struct bnxt_cp_ring_info *cpr;
1069                 unsigned int idx = i + 1;
1070
1071                 if (i >= bp->rx_cp_nr_rings) {
1072                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1073                         cpr = txq->cp_ring;
1074                         //Tx rings don't need grp_info entry.
1075                         idx = 0;
1076                 } else {
1077                         rxq = bp->rx_queues[i];
1078                         cpr = rxq->cp_ring;
1079                 }
1080
1081                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1082
1083                 if (rc)
1084                         return rc;
1085         }
1086         return rc;
1087 }
1088
1089 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1090 {
1091         uint16_t i;
1092         uint32_t rc = 0;
1093
1094         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1095                 unsigned int idx = i + 1;
1096
1097                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1098                         RTE_LOG(ERR, PMD,
1099                                 "Attempt to free invalid ring group %d\n",
1100                                 idx);
1101                         continue;
1102                 }
1103
1104                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1105
1106                 if (rc)
1107                         return rc;
1108         }
1109         return rc;
1110 }
1111
1112 static void bnxt_free_cp_ring(struct bnxt *bp,
1113                               struct bnxt_cp_ring_info *cpr)
1114 {
1115         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1116
1117         bnxt_hwrm_ring_free(bp, cp_ring,
1118                         HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1119         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1120         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1121                         sizeof(*cpr->cp_desc_ring));
1122         cpr->cp_raw_cons = 0;
1123 }
1124
1125 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1126 {
1127         unsigned int i;
1128         int rc = 0;
1129
1130         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1131                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1132                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1133                 struct bnxt_ring *ring = txr->tx_ring_struct;
1134                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1135
1136                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1137                         bnxt_hwrm_ring_free(bp, ring,
1138                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1139                         ring->fw_ring_id = INVALID_HW_RING_ID;
1140                         memset(txr->tx_desc_ring, 0,
1141                                         txr->tx_ring_struct->ring_size *
1142                                         sizeof(*txr->tx_desc_ring));
1143                         memset(txr->tx_buf_ring, 0,
1144                                         txr->tx_ring_struct->ring_size *
1145                                         sizeof(*txr->tx_buf_ring));
1146                         txr->tx_prod = 0;
1147                         txr->tx_cons = 0;
1148                 }
1149                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1150                         bnxt_free_cp_ring(bp, cpr);
1151         }
1152
1153         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1154                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1155                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1156                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1157                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1158                 unsigned int idx = i + 1;
1159
1160                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1161                         bnxt_hwrm_ring_free(bp, ring,
1162                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1163                         ring->fw_ring_id = INVALID_HW_RING_ID;
1164                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1165                         memset(rxr->rx_desc_ring, 0,
1166                                         rxr->rx_ring_struct->ring_size *
1167                                         sizeof(*rxr->rx_desc_ring));
1168                         memset(rxr->rx_buf_ring, 0,
1169                                         rxr->rx_ring_struct->ring_size *
1170                                         sizeof(*rxr->rx_buf_ring));
1171                         rxr->rx_prod = 0;
1172                 }
1173                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1174                         bnxt_free_cp_ring(bp, cpr);
1175                 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1176         }
1177
1178         /* Default completion ring */
1179         {
1180                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1181
1182                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1183                         bnxt_free_cp_ring(bp, cpr);
1184                 bp->grp_info[0].cp_fw_ring_id = INVALID_HW_RING_ID;
1185         }
1186
1187         return rc;
1188 }
1189
1190 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1191 {
1192         uint16_t i;
1193         uint32_t rc = 0;
1194
1195         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1196                 unsigned int idx = i + 1;
1197
1198                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1199                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1200                         continue;
1201
1202                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1203
1204                 if (rc)
1205                         return rc;
1206         }
1207         return rc;
1208 }
1209
1210 void bnxt_free_hwrm_resources(struct bnxt *bp)
1211 {
1212         /* Release memzone */
1213         rte_free(bp->hwrm_cmd_resp_addr);
1214         bp->hwrm_cmd_resp_addr = NULL;
1215         bp->hwrm_cmd_resp_dma_addr = 0;
1216 }
1217
1218 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1219 {
1220         struct rte_pci_device *pdev = bp->pdev;
1221         char type[RTE_MEMZONE_NAMESIZE];
1222
1223         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1224                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1225         bp->max_req_len = HWRM_MAX_REQ_LEN;
1226         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1227         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1228         if (bp->hwrm_cmd_resp_addr == NULL)
1229                 return -ENOMEM;
1230         bp->hwrm_cmd_resp_dma_addr =
1231                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1232         rte_spinlock_init(&bp->hwrm_lock);
1233
1234         return 0;
1235 }
1236
1237 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1238 {
1239         struct bnxt_filter_info *filter;
1240         int rc = 0;
1241
1242         STAILQ_FOREACH(filter, &vnic->filter, next) {
1243                 rc = bnxt_hwrm_clear_filter(bp, filter);
1244                 if (rc)
1245                         break;
1246         }
1247         return rc;
1248 }
1249
1250 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1251 {
1252         struct bnxt_filter_info *filter;
1253         int rc = 0;
1254
1255         STAILQ_FOREACH(filter, &vnic->filter, next) {
1256                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1257                 if (rc)
1258                         break;
1259         }
1260         return rc;
1261 }
1262
1263 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1264 {
1265         struct bnxt_vnic_info *vnic;
1266         unsigned int i;
1267
1268         if (bp->vnic_info == NULL)
1269                 return;
1270
1271         vnic = &bp->vnic_info[0];
1272         bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1273
1274         /* VNIC resources */
1275         for (i = 0; i < bp->nr_vnics; i++) {
1276                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1277
1278                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1279
1280                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1281                 bnxt_hwrm_vnic_free(bp, vnic);
1282
1283                 rte_free(vnic->fw_grp_ids);
1284         }
1285         /* Ring resources */
1286         bnxt_free_all_hwrm_rings(bp);
1287         bnxt_free_all_hwrm_ring_grps(bp);
1288         bnxt_free_all_hwrm_stat_ctxs(bp);
1289 }
1290
1291 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1292 {
1293         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1294
1295         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1296                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1297
1298         switch (conf_link_speed) {
1299         case ETH_LINK_SPEED_10M_HD:
1300         case ETH_LINK_SPEED_100M_HD:
1301                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1302         }
1303         return hw_link_duplex;
1304 }
1305
1306 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1307 {
1308         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1309 }
1310
1311 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1312 {
1313         uint16_t eth_link_speed = 0;
1314
1315         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1316                 return ETH_LINK_SPEED_AUTONEG;
1317
1318         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1319         case ETH_LINK_SPEED_100M:
1320         case ETH_LINK_SPEED_100M_HD:
1321                 eth_link_speed =
1322                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1323                 break;
1324         case ETH_LINK_SPEED_1G:
1325                 eth_link_speed =
1326                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1327                 break;
1328         case ETH_LINK_SPEED_2_5G:
1329                 eth_link_speed =
1330                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1331                 break;
1332         case ETH_LINK_SPEED_10G:
1333                 eth_link_speed =
1334                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1335                 break;
1336         case ETH_LINK_SPEED_20G:
1337                 eth_link_speed =
1338                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1339                 break;
1340         case ETH_LINK_SPEED_25G:
1341                 eth_link_speed =
1342                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1343                 break;
1344         case ETH_LINK_SPEED_40G:
1345                 eth_link_speed =
1346                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1347                 break;
1348         case ETH_LINK_SPEED_50G:
1349                 eth_link_speed =
1350                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1351                 break;
1352         default:
1353                 RTE_LOG(ERR, PMD,
1354                         "Unsupported link speed %d; default to AUTO\n",
1355                         conf_link_speed);
1356                 break;
1357         }
1358         return eth_link_speed;
1359 }
1360
1361 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1362                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1363                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1364                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1365
1366 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1367 {
1368         uint32_t one_speed;
1369
1370         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1371                 return 0;
1372
1373         if (link_speed & ETH_LINK_SPEED_FIXED) {
1374                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1375
1376                 if (one_speed & (one_speed - 1)) {
1377                         RTE_LOG(ERR, PMD,
1378                                 "Invalid advertised speeds (%u) for port %u\n",
1379                                 link_speed, port_id);
1380                         return -EINVAL;
1381                 }
1382                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1383                         RTE_LOG(ERR, PMD,
1384                                 "Unsupported advertised speed (%u) for port %u\n",
1385                                 link_speed, port_id);
1386                         return -EINVAL;
1387                 }
1388         } else {
1389                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1390                         RTE_LOG(ERR, PMD,
1391                                 "Unsupported advertised speeds (%u) for port %u\n",
1392                                 link_speed, port_id);
1393                         return -EINVAL;
1394                 }
1395         }
1396         return 0;
1397 }
1398
1399 static uint16_t
1400 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1401 {
1402         uint16_t ret = 0;
1403
1404         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1405                 if (bp->link_info.support_speeds)
1406                         return bp->link_info.support_speeds;
1407                 link_speed = BNXT_SUPPORTED_SPEEDS;
1408         }
1409
1410         if (link_speed & ETH_LINK_SPEED_100M)
1411                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1412         if (link_speed & ETH_LINK_SPEED_100M_HD)
1413                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1414         if (link_speed & ETH_LINK_SPEED_1G)
1415                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1416         if (link_speed & ETH_LINK_SPEED_2_5G)
1417                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1418         if (link_speed & ETH_LINK_SPEED_10G)
1419                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1420         if (link_speed & ETH_LINK_SPEED_20G)
1421                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1422         if (link_speed & ETH_LINK_SPEED_25G)
1423                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1424         if (link_speed & ETH_LINK_SPEED_40G)
1425                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1426         if (link_speed & ETH_LINK_SPEED_50G)
1427                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1428         return ret;
1429 }
1430
1431 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1432 {
1433         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1434
1435         switch (hw_link_speed) {
1436         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1437                 eth_link_speed = ETH_SPEED_NUM_100M;
1438                 break;
1439         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1440                 eth_link_speed = ETH_SPEED_NUM_1G;
1441                 break;
1442         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1443                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1444                 break;
1445         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1446                 eth_link_speed = ETH_SPEED_NUM_10G;
1447                 break;
1448         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1449                 eth_link_speed = ETH_SPEED_NUM_20G;
1450                 break;
1451         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1452                 eth_link_speed = ETH_SPEED_NUM_25G;
1453                 break;
1454         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1455                 eth_link_speed = ETH_SPEED_NUM_40G;
1456                 break;
1457         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1458                 eth_link_speed = ETH_SPEED_NUM_50G;
1459                 break;
1460         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1461         default:
1462                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1463                         hw_link_speed);
1464                 break;
1465         }
1466         return eth_link_speed;
1467 }
1468
1469 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1470 {
1471         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1472
1473         switch (hw_link_duplex) {
1474         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1475         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1476                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1477                 break;
1478         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1479                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1480                 break;
1481         default:
1482                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1483                         hw_link_duplex);
1484                 break;
1485         }
1486         return eth_link_duplex;
1487 }
1488
1489 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1490 {
1491         int rc = 0;
1492         struct bnxt_link_info *link_info = &bp->link_info;
1493
1494         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1495         if (rc) {
1496                 RTE_LOG(ERR, PMD,
1497                         "Get link config failed with rc %d\n", rc);
1498                 goto exit;
1499         }
1500         if (link_info->link_speed)
1501                 link->link_speed =
1502                         bnxt_parse_hw_link_speed(link_info->link_speed);
1503         else
1504                 link->link_speed = ETH_SPEED_NUM_NONE;
1505         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1506         link->link_status = link_info->link_up;
1507         link->link_autoneg = link_info->auto_mode ==
1508                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1509                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1510 exit:
1511         return rc;
1512 }
1513
1514 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1515 {
1516         int rc = 0;
1517         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1518         struct bnxt_link_info link_req;
1519         uint16_t speed, autoneg;
1520
1521         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1522                 return 0;
1523
1524         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1525                         bp->eth_dev->data->port_id);
1526         if (rc)
1527                 goto error;
1528
1529         memset(&link_req, 0, sizeof(link_req));
1530         link_req.link_up = link_up;
1531         if (!link_up)
1532                 goto port_phy_cfg;
1533
1534         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
1535         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1536         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1537         /* Autoneg can be done only when the FW allows */
1538         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
1539                                 bp->link_info.force_link_speed)) {
1540                 link_req.phy_flags |=
1541                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1542                 link_req.auto_link_speed_mask =
1543                         bnxt_parse_eth_link_speed_mask(bp,
1544                                                        dev_conf->link_speeds);
1545         } else {
1546                 if (bp->link_info.phy_type ==
1547                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
1548                     bp->link_info.phy_type ==
1549                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
1550                     bp->link_info.media_type ==
1551                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
1552                         RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
1553                         return -EINVAL;
1554                 }
1555
1556                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1557                 /* If user wants a particular speed try that first. */
1558                 if (speed)
1559                         link_req.link_speed = speed;
1560                 else if (bp->link_info.force_link_speed)
1561                         link_req.link_speed = bp->link_info.force_link_speed;
1562                 else
1563                         link_req.link_speed = bp->link_info.auto_link_speed;
1564         }
1565         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1566         link_req.auto_pause = bp->link_info.auto_pause;
1567         link_req.force_pause = bp->link_info.force_pause;
1568
1569 port_phy_cfg:
1570         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1571         if (rc) {
1572                 RTE_LOG(ERR, PMD,
1573                         "Set link config failed with rc %d\n", rc);
1574         }
1575
1576 error:
1577         return rc;
1578 }
1579
1580 /* JIRA 22088 */
1581 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1582 {
1583         struct hwrm_func_qcfg_input req = {0};
1584         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1585         int rc = 0;
1586
1587         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1588         req.fid = rte_cpu_to_le_16(0xffff);
1589
1590         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1591
1592         HWRM_CHECK_RESULT;
1593
1594         if (BNXT_VF(bp)) {
1595                 struct bnxt_vf_info *vf = &bp->vf;
1596
1597                 /* Hard Coded.. 0xfff VLAN ID mask */
1598                 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1599         }
1600
1601         switch (resp->port_partition_type) {
1602         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1603         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1604         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1605                 bp->port_partition_type = resp->port_partition_type;
1606                 break;
1607         default:
1608                 bp->port_partition_type = 0;
1609                 break;
1610         }
1611
1612         return rc;
1613 }