New upstream version 16.11.9
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #define HWRM_CMD_TIMEOUT                6000000
54
55 /*
56  * HWRM Functions (sent to HWRM)
57  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59  * command was failed by the ChiMP.
60  */
61
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
63                                         uint32_t msg_len)
64 {
65         unsigned int i;
66         struct input *req = msg;
67         struct output *resp = bp->hwrm_cmd_resp_addr;
68         uint32_t *data = msg;
69         uint8_t *bar;
70         uint8_t *valid;
71
72         /* Write request msg to hwrm channel */
73         for (i = 0; i < msg_len; i += 4) {
74                 bar = (uint8_t *)bp->bar0 + i;
75                 *(volatile uint32_t *)bar = *data;
76                 data++;
77         }
78
79         /* Zero the rest of the request space */
80         for (; i < bp->max_req_len; i += 4) {
81                 bar = (uint8_t *)bp->bar0 + i;
82                 *(volatile uint32_t *)bar = 0;
83         }
84
85         /* Ring channel doorbell */
86         bar = (uint8_t *)bp->bar0 + 0x100;
87         *(volatile uint32_t *)bar = 1;
88
89         /* Poll for the valid bit */
90         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91                 /* Sanity check on the resp->resp_len */
92                 rte_rmb();
93                 if (resp->resp_len && resp->resp_len <=
94                                 bp->max_resp_len) {
95                         /* Last byte of resp contains the valid key */
96                         valid = (uint8_t *)resp + resp->resp_len - 1;
97                         if (*valid == HWRM_RESP_VALID_KEY)
98                                 break;
99                 }
100                 rte_delay_us(1);
101         }
102
103         if (i >= HWRM_CMD_TIMEOUT) {
104                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
105                         req->req_type);
106                 goto err_ret;
107         }
108         return 0;
109
110 err_ret:
111         return -1;
112 }
113
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
115 {
116         int rc;
117
118         rte_spinlock_lock(&bp->hwrm_lock);
119         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120         rte_spinlock_unlock(&bp->hwrm_lock);
121         return rc;
122 }
123
124 #define HWRM_PREP(req, type, cr, resp) \
125         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127         req.cmpl_ring = rte_cpu_to_le_16(cr); \
128         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129         req.target_id = rte_cpu_to_le_16(0xffff); \
130         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
131
132 #define HWRM_CHECK_RESULT \
133         { \
134                 if (rc) { \
135                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
136                                 __func__, rc); \
137                         if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
138                                 rc = -EACCES; \
139                         else if (rc > 0) \
140                                 rc = -EINVAL; \
141                         return rc; \
142                 } \
143                 if (resp->error_code) { \
144                         rc = rte_le_to_cpu_16(resp->error_code); \
145                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
146                         if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
147                                 rc = -EACCES; \
148                         else if (rc > 0) \
149                                 rc = -EINVAL; \
150                         return rc; \
151                 } \
152         }
153
154 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
155 {
156         int rc = 0;
157         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
158         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
159
160         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
161         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
162         req.mask = 0;
163
164         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
165
166         HWRM_CHECK_RESULT;
167
168         return rc;
169 }
170
171 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
172 {
173         int rc = 0;
174         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
175         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
176         uint32_t mask = 0;
177
178         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
179         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
180
181         /* FIXME add multicast flag, when multicast adding options is supported
182          * by ethtool.
183          */
184         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
185                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
186         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
187                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
188         req.mask = rte_cpu_to_le_32(mask);
189
190         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
191
192         HWRM_CHECK_RESULT;
193
194         return rc;
195 }
196
197 int bnxt_hwrm_clear_filter(struct bnxt *bp,
198                            struct bnxt_filter_info *filter)
199 {
200         int rc = 0;
201         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
202         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
203
204         if (filter->fw_l2_filter_id == UINT64_MAX)
205                 return 0;
206
207         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
208
209         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
210
211         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
212
213         HWRM_CHECK_RESULT;
214
215         filter->fw_l2_filter_id = -1;
216
217         return 0;
218 }
219
220 int bnxt_hwrm_set_filter(struct bnxt *bp,
221                          struct bnxt_vnic_info *vnic,
222                          struct bnxt_filter_info *filter)
223 {
224         int rc = 0;
225         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
226         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
227         uint32_t enables = 0;
228
229         if (filter->fw_l2_filter_id != UINT64_MAX)
230                 bnxt_hwrm_clear_filter(bp, filter);
231
232         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
233
234         req.flags = rte_cpu_to_le_32(filter->flags);
235         req.flags |=
236         rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
237
238         enables = filter->enables |
239               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
240         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
241
242         if (enables &
243             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
244                 memcpy(req.l2_addr, filter->l2_addr,
245                        ETHER_ADDR_LEN);
246         if (enables &
247             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
248                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
249                        ETHER_ADDR_LEN);
250         if (enables &
251             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
252                 req.l2_ovlan = filter->l2_ovlan;
253         if (enables &
254             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
255                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
256
257         req.enables = rte_cpu_to_le_32(enables);
258
259         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
260
261         HWRM_CHECK_RESULT;
262
263         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
264
265         return rc;
266 }
267
268 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
269 {
270         int rc;
271         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
272         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
273
274         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
275
276         memcpy(req.encap_request, fwd_cmd,
277                sizeof(req.encap_request));
278
279         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
280
281         HWRM_CHECK_RESULT;
282
283         return rc;
284 }
285
286 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
287 {
288         int rc = 0;
289         struct hwrm_func_qcaps_input req = {.req_type = 0 };
290         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
291
292         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
293
294         req.fid = rte_cpu_to_le_16(0xffff);
295
296         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
297
298         HWRM_CHECK_RESULT;
299
300         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
301         if (BNXT_PF(bp)) {
302                 struct bnxt_pf_info *pf = &bp->pf;
303
304                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
305                 pf->port_id = resp->port_id;
306                 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
307                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
308                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
309                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
310                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
311                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
312                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
313                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
314                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
315         } else {
316                 struct bnxt_vf_info *vf = &bp->vf;
317
318                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
319                 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
320                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
321                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
322                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
323                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
324                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
325                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
326         }
327
328         return rc;
329 }
330
331 int bnxt_hwrm_func_reset(struct bnxt *bp)
332 {
333         int rc = 0;
334         struct hwrm_func_reset_input req = {.req_type = 0 };
335         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
336
337         HWRM_PREP(req, FUNC_RESET, -1, resp);
338
339         req.enables = rte_cpu_to_le_32(0);
340
341         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
342
343         HWRM_CHECK_RESULT;
344
345         return rc;
346 }
347
348 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
349                                    uint32_t *vf_req_fwd)
350 {
351         int rc;
352         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
353         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
354
355         if (bp->flags & BNXT_FLAG_REGISTERED)
356                 return 0;
357
358         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
359         req.flags = flags;
360         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
361                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
362         req.ver_maj = RTE_VER_YEAR;
363         req.ver_min = RTE_VER_MONTH;
364         req.ver_upd = RTE_VER_MINOR;
365
366         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
367
368         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
369
370         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
371
372         HWRM_CHECK_RESULT;
373
374         bp->flags |= BNXT_FLAG_REGISTERED;
375
376         return rc;
377 }
378
379 int bnxt_hwrm_ver_get(struct bnxt *bp)
380 {
381         int rc = 0;
382         struct hwrm_ver_get_input req = {.req_type = 0 };
383         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
384         uint32_t fw_version;
385         uint16_t max_resp_len;
386         char type[RTE_MEMZONE_NAMESIZE];
387
388         HWRM_PREP(req, VER_GET, -1, resp);
389
390         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
391         req.hwrm_intf_min = HWRM_VERSION_MINOR;
392         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
393
394         /*
395          * Hold the lock since we may be adjusting the response pointers.
396          */
397         rte_spinlock_lock(&bp->hwrm_lock);
398         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
399
400         HWRM_CHECK_RESULT;
401
402         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
403                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
404                 resp->hwrm_intf_upd,
405                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
406         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
407                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
408
409         fw_version = resp->hwrm_intf_maj << 16;
410         fw_version |= resp->hwrm_intf_min << 8;
411         fw_version |= resp->hwrm_intf_upd;
412
413         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
414                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
415                 rc = -EINVAL;
416                 goto error;
417         }
418
419         if (bp->max_req_len > resp->max_req_win_len) {
420                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
421                 rc = -EINVAL;
422         }
423         bp->max_req_len = resp->max_req_win_len;
424         max_resp_len = resp->max_resp_len;
425         if (bp->max_resp_len != max_resp_len) {
426                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
427                         bp->pdev->addr.domain, bp->pdev->addr.bus,
428                         bp->pdev->addr.devid, bp->pdev->addr.function);
429
430                 rte_free(bp->hwrm_cmd_resp_addr);
431
432                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
433                 if (bp->hwrm_cmd_resp_addr == NULL) {
434                         rc = -ENOMEM;
435                         goto error;
436                 }
437                 bp->hwrm_cmd_resp_dma_addr =
438                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
439                 bp->max_resp_len = max_resp_len;
440         }
441
442 error:
443         rte_spinlock_unlock(&bp->hwrm_lock);
444         return rc;
445 }
446
447 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
448 {
449         int rc;
450         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
451         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
452
453         if (!(bp->flags & BNXT_FLAG_REGISTERED))
454                 return 0;
455
456         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
457         req.flags = flags;
458
459         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
460
461         HWRM_CHECK_RESULT;
462
463         bp->flags &= ~BNXT_FLAG_REGISTERED;
464
465         return rc;
466 }
467
468 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
469 {
470         int rc = 0;
471         struct hwrm_port_phy_cfg_input req = {0};
472         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
473         uint32_t enables = 0;
474
475         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
476
477         if (conf->link_up) {
478                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
479                 if (bp->link_info.auto_mode && conf->link_speed) {
480                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
481                         RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
482                 }
483
484                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
485                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
486                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
487                 /*
488                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
489                  * any auto mode, even "none".
490                  */
491                 if (!conf->link_speed) {
492                         /* No speeds specified. Enable AutoNeg - all speeds */
493                         req.auto_mode =
494                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
495                 }
496                 /* AutoNeg - Advertise speeds specified. */
497                 if (conf->auto_link_speed_mask &&
498                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
499                         req.auto_mode =
500                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
501                         req.auto_link_speed_mask =
502                                 conf->auto_link_speed_mask;
503                         enables |=
504                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
505                 }
506
507                 req.auto_duplex = conf->duplex;
508                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
509                 req.auto_pause = conf->auto_pause;
510                 req.force_pause = conf->force_pause;
511                 /* Set force_pause if there is no auto or if there is a force */
512                 if (req.auto_pause && !req.force_pause)
513                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
514                 else
515                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
516
517                 req.enables = rte_cpu_to_le_32(enables);
518         } else {
519                 req.flags =
520                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
521                 RTE_LOG(INFO, PMD, "Force Link Down\n");
522         }
523
524         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
525
526         HWRM_CHECK_RESULT;
527
528         return rc;
529 }
530
531 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
532                                    struct bnxt_link_info *link_info)
533 {
534         int rc = 0;
535         struct hwrm_port_phy_qcfg_input req = {0};
536         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
537
538         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
539
540         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
541
542         HWRM_CHECK_RESULT;
543
544         link_info->phy_link_status = resp->link;
545         link_info->link_up =
546                 (link_info->phy_link_status ==
547                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
548         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
549         link_info->duplex = resp->duplex;
550         link_info->pause = resp->pause;
551         link_info->auto_pause = resp->auto_pause;
552         link_info->force_pause = resp->force_pause;
553         link_info->auto_mode = resp->auto_mode;
554         link_info->phy_type = resp->phy_type;
555         link_info->media_type = resp->media_type;
556
557         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
558         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
559         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
560         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
561         link_info->phy_ver[0] = resp->phy_maj;
562         link_info->phy_ver[1] = resp->phy_min;
563         link_info->phy_ver[2] = resp->phy_bld;
564
565         return rc;
566 }
567
568 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
569 {
570         int rc = 0;
571         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
572         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
573
574         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
575
576         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
577
578         HWRM_CHECK_RESULT;
579
580 #define GET_QUEUE_INFO(x) \
581         bp->cos_queue[x].id = resp->queue_id##x; \
582         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
583
584         GET_QUEUE_INFO(0);
585         GET_QUEUE_INFO(1);
586         GET_QUEUE_INFO(2);
587         GET_QUEUE_INFO(3);
588         GET_QUEUE_INFO(4);
589         GET_QUEUE_INFO(5);
590         GET_QUEUE_INFO(6);
591         GET_QUEUE_INFO(7);
592
593         return rc;
594 }
595
596 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
597                          struct bnxt_ring *ring,
598                          uint32_t ring_type, uint32_t map_index,
599                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
600 {
601         int rc = 0;
602         struct hwrm_ring_alloc_input req = {.req_type = 0 };
603         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
604
605         HWRM_PREP(req, RING_ALLOC, -1, resp);
606
607         req.enables = rte_cpu_to_le_32(0);
608
609         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
610         req.fbo = rte_cpu_to_le_32(0);
611         /* Association of ring index with doorbell index */
612         req.logical_id = rte_cpu_to_le_16(map_index);
613
614         switch (ring_type) {
615         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
616                 req.queue_id = bp->cos_queue[0].id;
617                 /* FALLTHROUGH */
618         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
619                 req.ring_type = ring_type;
620                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
621                 req.length = rte_cpu_to_le_32(ring->ring_size);
622                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
623                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
624                         req.enables =
625                         rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
626                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
627                 break;
628         case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
629                 req.ring_type = ring_type;
630                 /*
631                  * TODO: Some HWRM versions crash with
632                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
633                  */
634                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
635                 req.length = rte_cpu_to_le_32(ring->ring_size);
636                 break;
637         default:
638                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
639                         ring_type);
640                 return -1;
641         }
642
643         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
644
645         if (rc || resp->error_code) {
646                 if (rc == 0 && resp->error_code)
647                         rc = rte_le_to_cpu_16(resp->error_code);
648                 switch (ring_type) {
649                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
650                         RTE_LOG(ERR, PMD,
651                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
652                         return rc;
653                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
654                         RTE_LOG(ERR, PMD,
655                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
656                         return rc;
657                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
658                         RTE_LOG(ERR, PMD,
659                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
660                         return rc;
661                 default:
662                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
663                         return rc;
664                 }
665         }
666
667         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
668         return rc;
669 }
670
671 int bnxt_hwrm_ring_free(struct bnxt *bp,
672                         struct bnxt_ring *ring, uint32_t ring_type)
673 {
674         int rc;
675         struct hwrm_ring_free_input req = {.req_type = 0 };
676         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
677
678         HWRM_PREP(req, RING_FREE, -1, resp);
679
680         req.ring_type = ring_type;
681         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
682
683         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
684
685         if (rc || resp->error_code) {
686                 if (rc == 0 && resp->error_code)
687                         rc = rte_le_to_cpu_16(resp->error_code);
688
689                 switch (ring_type) {
690                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
691                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
692                                 rc);
693                         return rc;
694                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
695                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
696                                 rc);
697                         return rc;
698                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
699                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
700                                 rc);
701                         return rc;
702                 default:
703                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
704                         return rc;
705                 }
706         }
707         return 0;
708 }
709
710 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
711 {
712         int rc = 0;
713         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
714         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
715
716         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
717
718         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
719         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
720         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
721         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
722
723         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
724
725         HWRM_CHECK_RESULT;
726
727         bp->grp_info[idx].fw_grp_id =
728             rte_le_to_cpu_16(resp->ring_group_id);
729
730         return rc;
731 }
732
733 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
734 {
735         int rc;
736         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
737         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
738
739         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
740
741         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
742
743         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
744
745         HWRM_CHECK_RESULT;
746
747         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
748         return rc;
749 }
750
751 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
752 {
753         int rc = 0;
754         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
755         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
756
757         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
758
759         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
760                 return rc;
761
762         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
763         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
764
765         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
766
767         HWRM_CHECK_RESULT;
768
769         return rc;
770 }
771
772 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
773                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
774 {
775         int rc;
776         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
777         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
778
779         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
780
781         req.update_period_ms = rte_cpu_to_le_32(1000);
782
783         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
784         req.stats_dma_addr =
785             rte_cpu_to_le_64(cpr->hw_stats_map);
786
787         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
788
789         HWRM_CHECK_RESULT;
790
791         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
792         //Tx rings don't need grp_info entry. It is a Rx only attribute.
793         if (idx)
794                 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
795
796         return rc;
797 }
798
799 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
800                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
801 {
802         int rc;
803         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
804         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
805
806         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
807
808         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
809         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
810
811         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
812
813         HWRM_CHECK_RESULT;
814
815         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
816         //Tx rings don't have a grp_info entry. It is a Rx only attribute.
817         if (idx)
818                 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
819
820         return rc;
821 }
822
823 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
824 {
825         int rc = 0, i, j;
826         struct hwrm_vnic_alloc_input req = { 0 };
827         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
828
829         /* map ring groups to this vnic */
830         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++) {
831                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
832                         RTE_LOG(ERR, PMD,
833                                 "Not enough ring groups avail:%x req:%x\n", j,
834                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
835                         break;
836                 }
837                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
838         }
839
840         vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
841         vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
842
843         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
844
845         if (vnic->func_default)
846                 req.flags =
847                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
848         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
849
850         HWRM_CHECK_RESULT;
851
852         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
853         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
854         return rc;
855 }
856
857 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
858 {
859         int rc = 0;
860         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
861         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
862
863         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
864                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
865                 return rc;
866         }
867
868         HWRM_PREP(req, VNIC_CFG, -1, resp);
869
870         /* Only RSS support for now TBD: COS & LB */
871         req.enables =
872             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
873                              HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
874                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
875         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
876         req.dflt_ring_grp =
877                 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
878         req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
879         req.cos_rule = rte_cpu_to_le_16(0xffff);
880         req.lb_rule = rte_cpu_to_le_16(0xffff);
881         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
882                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
883         /* Configure default VNIC only once. */
884         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
885                 req.flags = 1;
886                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
887         }
888         if (vnic->vlan_strip)
889                 req.flags |=
890                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
891
892         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
893
894         HWRM_CHECK_RESULT;
895
896         return rc;
897 }
898
899 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
900 {
901         int rc = 0;
902         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
903         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
904                                                 bp->hwrm_cmd_resp_addr;
905
906         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
907
908         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
909
910         HWRM_CHECK_RESULT;
911
912         vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
913         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
914
915         return rc;
916 }
917
918 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
919 {
920         int rc = 0;
921         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
922         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
923                                                 bp->hwrm_cmd_resp_addr;
924
925         if (vnic->fw_rss_cos_lb_ctx == 0xffff) {
926                 RTE_LOG(DEBUG, PMD,
927                         "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
928                 return rc;
929         }
930
931         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
932
933         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
934
935         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
936
937         HWRM_CHECK_RESULT;
938
939         vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
940
941         return rc;
942 }
943
944 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
945 {
946         int rc = 0;
947         struct hwrm_vnic_free_input req = {.req_type = 0 };
948         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
949
950         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
951                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
952                 return rc;
953         }
954
955         HWRM_PREP(req, VNIC_FREE, -1, resp);
956
957         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
958
959         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
960
961         HWRM_CHECK_RESULT;
962
963         vnic->fw_vnic_id = INVALID_HW_RING_ID;
964         /* Configure default VNIC again if necessary. */
965         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
966                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
967
968         return rc;
969 }
970
971 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
972                            struct bnxt_vnic_info *vnic)
973 {
974         int rc = 0;
975         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
976         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
977
978         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
979
980         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
981
982         req.ring_grp_tbl_addr =
983             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
984         req.hash_key_tbl_addr =
985             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
986         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
987
988         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
989
990         HWRM_CHECK_RESULT;
991
992         return rc;
993 }
994
995 /*
996  * HWRM utility functions
997  */
998
999 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1000 {
1001         unsigned int i;
1002         int rc = 0;
1003
1004         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1005                 struct bnxt_tx_queue *txq;
1006                 struct bnxt_rx_queue *rxq;
1007                 struct bnxt_cp_ring_info *cpr;
1008
1009                 if (i >= bp->rx_cp_nr_rings) {
1010                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1011                         cpr = txq->cp_ring;
1012                 } else {
1013                         rxq = bp->rx_queues[i];
1014                         cpr = rxq->cp_ring;
1015                 }
1016
1017                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1018                 if (rc)
1019                         return rc;
1020         }
1021         return 0;
1022 }
1023
1024 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1025 {
1026         int rc;
1027         unsigned int i;
1028         struct bnxt_cp_ring_info *cpr;
1029
1030         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1031                 unsigned int idx = i + 1;
1032
1033                 if (i >= bp->rx_cp_nr_rings) {
1034                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1035                         //Tx rings don't have a grp_info entry.
1036                         idx = 0;
1037                 } else {
1038                         cpr = bp->rx_queues[i]->cp_ring;
1039                 }
1040                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1041                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1042                         if (rc)
1043                                 return rc;
1044                 }
1045         }
1046         return 0;
1047 }
1048
1049 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1050 {
1051         unsigned int i;
1052         int rc = 0;
1053
1054         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1055                 struct bnxt_tx_queue *txq;
1056                 struct bnxt_rx_queue *rxq;
1057                 struct bnxt_cp_ring_info *cpr;
1058                 unsigned int idx = i + 1;
1059
1060                 if (i >= bp->rx_cp_nr_rings) {
1061                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1062                         cpr = txq->cp_ring;
1063                         //Tx rings don't need grp_info entry.
1064                         idx = 0;
1065                 } else {
1066                         rxq = bp->rx_queues[i];
1067                         cpr = rxq->cp_ring;
1068                 }
1069
1070                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1071
1072                 if (rc)
1073                         return rc;
1074         }
1075         return rc;
1076 }
1077
1078 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1079 {
1080         uint16_t i;
1081         uint32_t rc = 0;
1082
1083         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1084                 unsigned int idx = i + 1;
1085
1086                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1087                         RTE_LOG(ERR, PMD,
1088                                 "Attempt to free invalid ring group %d\n",
1089                                 idx);
1090                         continue;
1091                 }
1092
1093                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1094
1095                 if (rc)
1096                         return rc;
1097         }
1098         return rc;
1099 }
1100
1101 static void bnxt_free_cp_ring(struct bnxt *bp,
1102                               struct bnxt_cp_ring_info *cpr)
1103 {
1104         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1105
1106         bnxt_hwrm_ring_free(bp, cp_ring,
1107                         HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1108         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1109         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1110                         sizeof(*cpr->cp_desc_ring));
1111         cpr->cp_raw_cons = 0;
1112 }
1113
1114 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1115 {
1116         unsigned int i;
1117         int rc = 0;
1118
1119         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1120                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1121                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1122                 struct bnxt_ring *ring = txr->tx_ring_struct;
1123                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1124
1125                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1126                         bnxt_hwrm_ring_free(bp, ring,
1127                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1128                         ring->fw_ring_id = INVALID_HW_RING_ID;
1129                         memset(txr->tx_desc_ring, 0,
1130                                         txr->tx_ring_struct->ring_size *
1131                                         sizeof(*txr->tx_desc_ring));
1132                         memset(txr->tx_buf_ring, 0,
1133                                         txr->tx_ring_struct->ring_size *
1134                                         sizeof(*txr->tx_buf_ring));
1135                         txr->tx_prod = 0;
1136                         txr->tx_cons = 0;
1137                 }
1138                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1139                         bnxt_free_cp_ring(bp, cpr);
1140         }
1141
1142         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1143                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1144                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1145                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1146                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1147                 unsigned int idx = i + 1;
1148
1149                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1150                         bnxt_hwrm_ring_free(bp, ring,
1151                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1152                         ring->fw_ring_id = INVALID_HW_RING_ID;
1153                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1154                         memset(rxr->rx_desc_ring, 0,
1155                                         rxr->rx_ring_struct->ring_size *
1156                                         sizeof(*rxr->rx_desc_ring));
1157                         memset(rxr->rx_buf_ring, 0,
1158                                         rxr->rx_ring_struct->ring_size *
1159                                         sizeof(*rxr->rx_buf_ring));
1160                         rxr->rx_prod = 0;
1161                 }
1162                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1163                         bnxt_free_cp_ring(bp, cpr);
1164                 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1165         }
1166
1167         /* Default completion ring */
1168         {
1169                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1170
1171                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1172                         bnxt_free_cp_ring(bp, cpr);
1173                 bp->grp_info[0].cp_fw_ring_id = INVALID_HW_RING_ID;
1174         }
1175
1176         return rc;
1177 }
1178
1179 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1180 {
1181         uint16_t i;
1182         uint32_t rc = 0;
1183
1184         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1185                 unsigned int idx = i + 1;
1186
1187                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1188                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1189                         continue;
1190
1191                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1192
1193                 if (rc)
1194                         return rc;
1195         }
1196         return rc;
1197 }
1198
1199 void bnxt_free_hwrm_resources(struct bnxt *bp)
1200 {
1201         /* Release memzone */
1202         rte_free(bp->hwrm_cmd_resp_addr);
1203         bp->hwrm_cmd_resp_addr = NULL;
1204         bp->hwrm_cmd_resp_dma_addr = 0;
1205 }
1206
1207 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1208 {
1209         struct rte_pci_device *pdev = bp->pdev;
1210         char type[RTE_MEMZONE_NAMESIZE];
1211
1212         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1213                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1214         bp->max_req_len = HWRM_MAX_REQ_LEN;
1215         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1216         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1217         if (bp->hwrm_cmd_resp_addr == NULL)
1218                 return -ENOMEM;
1219         bp->hwrm_cmd_resp_dma_addr =
1220                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1221         rte_spinlock_init(&bp->hwrm_lock);
1222
1223         return 0;
1224 }
1225
1226 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1227 {
1228         struct bnxt_filter_info *filter;
1229         int rc = 0;
1230
1231         STAILQ_FOREACH(filter, &vnic->filter, next) {
1232                 rc = bnxt_hwrm_clear_filter(bp, filter);
1233                 if (rc)
1234                         break;
1235         }
1236         return rc;
1237 }
1238
1239 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1240 {
1241         struct bnxt_filter_info *filter;
1242         int rc = 0;
1243
1244         STAILQ_FOREACH(filter, &vnic->filter, next) {
1245                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1246                 if (rc)
1247                         break;
1248         }
1249         return rc;
1250 }
1251
1252 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1253 {
1254         struct bnxt_vnic_info *vnic;
1255         unsigned int i;
1256
1257         if (bp->vnic_info == NULL)
1258                 return;
1259
1260         vnic = &bp->vnic_info[0];
1261         bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1262
1263         /* VNIC resources */
1264         for (i = 0; i < bp->nr_vnics; i++) {
1265                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1266
1267                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1268
1269                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1270                 bnxt_hwrm_vnic_free(bp, vnic);
1271
1272                 rte_free(vnic->fw_grp_ids);
1273         }
1274         /* Ring resources */
1275         bnxt_free_all_hwrm_rings(bp);
1276         bnxt_free_all_hwrm_ring_grps(bp);
1277         bnxt_free_all_hwrm_stat_ctxs(bp);
1278 }
1279
1280 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1281 {
1282         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1283
1284         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1285                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1286
1287         switch (conf_link_speed) {
1288         case ETH_LINK_SPEED_10M_HD:
1289         case ETH_LINK_SPEED_100M_HD:
1290                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1291         }
1292         return hw_link_duplex;
1293 }
1294
1295 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1296 {
1297         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1298 }
1299
1300 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1301 {
1302         uint16_t eth_link_speed = 0;
1303
1304         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1305                 return ETH_LINK_SPEED_AUTONEG;
1306
1307         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1308         case ETH_LINK_SPEED_100M:
1309         case ETH_LINK_SPEED_100M_HD:
1310                 eth_link_speed =
1311                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1312                 break;
1313         case ETH_LINK_SPEED_1G:
1314                 eth_link_speed =
1315                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1316                 break;
1317         case ETH_LINK_SPEED_2_5G:
1318                 eth_link_speed =
1319                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1320                 break;
1321         case ETH_LINK_SPEED_10G:
1322                 eth_link_speed =
1323                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1324                 break;
1325         case ETH_LINK_SPEED_20G:
1326                 eth_link_speed =
1327                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1328                 break;
1329         case ETH_LINK_SPEED_25G:
1330                 eth_link_speed =
1331                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1332                 break;
1333         case ETH_LINK_SPEED_40G:
1334                 eth_link_speed =
1335                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1336                 break;
1337         case ETH_LINK_SPEED_50G:
1338                 eth_link_speed =
1339                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1340                 break;
1341         default:
1342                 RTE_LOG(ERR, PMD,
1343                         "Unsupported link speed %d; default to AUTO\n",
1344                         conf_link_speed);
1345                 break;
1346         }
1347         return eth_link_speed;
1348 }
1349
1350 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1351                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1352                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1353                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1354
1355 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1356 {
1357         uint32_t one_speed;
1358
1359         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1360                 return 0;
1361
1362         if (link_speed & ETH_LINK_SPEED_FIXED) {
1363                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1364
1365                 if (one_speed & (one_speed - 1)) {
1366                         RTE_LOG(ERR, PMD,
1367                                 "Invalid advertised speeds (%u) for port %u\n",
1368                                 link_speed, port_id);
1369                         return -EINVAL;
1370                 }
1371                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1372                         RTE_LOG(ERR, PMD,
1373                                 "Unsupported advertised speed (%u) for port %u\n",
1374                                 link_speed, port_id);
1375                         return -EINVAL;
1376                 }
1377         } else {
1378                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1379                         RTE_LOG(ERR, PMD,
1380                                 "Unsupported advertised speeds (%u) for port %u\n",
1381                                 link_speed, port_id);
1382                         return -EINVAL;
1383                 }
1384         }
1385         return 0;
1386 }
1387
1388 static uint16_t
1389 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1390 {
1391         uint16_t ret = 0;
1392
1393         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1394                 if (bp->link_info.support_speeds)
1395                         return bp->link_info.support_speeds;
1396                 link_speed = BNXT_SUPPORTED_SPEEDS;
1397         }
1398
1399         if (link_speed & ETH_LINK_SPEED_100M)
1400                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1401         if (link_speed & ETH_LINK_SPEED_100M_HD)
1402                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1403         if (link_speed & ETH_LINK_SPEED_1G)
1404                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1405         if (link_speed & ETH_LINK_SPEED_2_5G)
1406                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1407         if (link_speed & ETH_LINK_SPEED_10G)
1408                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1409         if (link_speed & ETH_LINK_SPEED_20G)
1410                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1411         if (link_speed & ETH_LINK_SPEED_25G)
1412                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1413         if (link_speed & ETH_LINK_SPEED_40G)
1414                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1415         if (link_speed & ETH_LINK_SPEED_50G)
1416                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1417         return ret;
1418 }
1419
1420 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1421 {
1422         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1423
1424         switch (hw_link_speed) {
1425         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1426                 eth_link_speed = ETH_SPEED_NUM_100M;
1427                 break;
1428         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1429                 eth_link_speed = ETH_SPEED_NUM_1G;
1430                 break;
1431         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1432                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1433                 break;
1434         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1435                 eth_link_speed = ETH_SPEED_NUM_10G;
1436                 break;
1437         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1438                 eth_link_speed = ETH_SPEED_NUM_20G;
1439                 break;
1440         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1441                 eth_link_speed = ETH_SPEED_NUM_25G;
1442                 break;
1443         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1444                 eth_link_speed = ETH_SPEED_NUM_40G;
1445                 break;
1446         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1447                 eth_link_speed = ETH_SPEED_NUM_50G;
1448                 break;
1449         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1450         default:
1451                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1452                         hw_link_speed);
1453                 break;
1454         }
1455         return eth_link_speed;
1456 }
1457
1458 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1459 {
1460         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1461
1462         switch (hw_link_duplex) {
1463         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1464         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1465                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1466                 break;
1467         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1468                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1469                 break;
1470         default:
1471                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1472                         hw_link_duplex);
1473                 break;
1474         }
1475         return eth_link_duplex;
1476 }
1477
1478 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1479 {
1480         int rc = 0;
1481         struct bnxt_link_info *link_info = &bp->link_info;
1482
1483         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1484         if (rc) {
1485                 RTE_LOG(ERR, PMD,
1486                         "Get link config failed with rc %d\n", rc);
1487                 goto exit;
1488         }
1489         if (link_info->link_speed)
1490                 link->link_speed =
1491                         bnxt_parse_hw_link_speed(link_info->link_speed);
1492         else
1493                 link->link_speed = ETH_SPEED_NUM_NONE;
1494         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1495         link->link_status = link_info->link_up;
1496         link->link_autoneg = link_info->auto_mode ==
1497                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1498                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1499 exit:
1500         return rc;
1501 }
1502
1503 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1504 {
1505         int rc = 0;
1506         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1507         struct bnxt_link_info link_req;
1508         uint16_t speed, autoneg;
1509
1510         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1511                 return 0;
1512
1513         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1514                         bp->eth_dev->data->port_id);
1515         if (rc)
1516                 goto error;
1517
1518         memset(&link_req, 0, sizeof(link_req));
1519         link_req.link_up = link_up;
1520         if (!link_up)
1521                 goto port_phy_cfg;
1522
1523         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
1524         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1525         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1526         /* Autoneg can be done only when the FW allows */
1527         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
1528                                 bp->link_info.force_link_speed)) {
1529                 link_req.phy_flags |=
1530                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1531                 link_req.auto_link_speed_mask =
1532                         bnxt_parse_eth_link_speed_mask(bp,
1533                                                        dev_conf->link_speeds);
1534         } else {
1535                 if (bp->link_info.phy_type ==
1536                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
1537                     bp->link_info.phy_type ==
1538                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
1539                     bp->link_info.media_type ==
1540                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
1541                         RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
1542                         return -EINVAL;
1543                 }
1544
1545                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1546                 /* If user wants a particular speed try that first. */
1547                 if (speed)
1548                         link_req.link_speed = speed;
1549                 else if (bp->link_info.force_link_speed)
1550                         link_req.link_speed = bp->link_info.force_link_speed;
1551                 else
1552                         link_req.link_speed = bp->link_info.auto_link_speed;
1553         }
1554         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1555         link_req.auto_pause = bp->link_info.auto_pause;
1556         link_req.force_pause = bp->link_info.force_pause;
1557
1558 port_phy_cfg:
1559         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1560         if (rc) {
1561                 RTE_LOG(ERR, PMD,
1562                         "Set link config failed with rc %d\n", rc);
1563         }
1564
1565 error:
1566         return rc;
1567 }
1568
1569 /* JIRA 22088 */
1570 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1571 {
1572         struct hwrm_func_qcfg_input req = {0};
1573         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1574         int rc = 0;
1575
1576         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1577         req.fid = rte_cpu_to_le_16(0xffff);
1578
1579         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1580
1581         HWRM_CHECK_RESULT;
1582
1583         if (BNXT_VF(bp)) {
1584                 struct bnxt_vf_info *vf = &bp->vf;
1585
1586                 /* Hard Coded.. 0xfff VLAN ID mask */
1587                 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1588         }
1589
1590         switch (resp->port_partition_type) {
1591         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1592         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1593         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1594                 bp->port_partition_type = resp->port_partition_type;
1595                 break;
1596         default:
1597                 bp->port_partition_type = 0;
1598                 break;
1599         }
1600
1601         return rc;
1602 }