8ff4c15d47e6c1dfc9710558c559a5df7cbff7f0
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #define HWRM_CMD_TIMEOUT                2000
54
55 /*
56  * HWRM Functions (sent to HWRM)
57  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59  * command was failed by the ChiMP.
60  */
61
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
63                                         uint32_t msg_len)
64 {
65         unsigned int i;
66         struct input *req = msg;
67         struct output *resp = bp->hwrm_cmd_resp_addr;
68         uint32_t *data = msg;
69         uint8_t *bar;
70         uint8_t *valid;
71
72         /* Write request msg to hwrm channel */
73         for (i = 0; i < msg_len; i += 4) {
74                 bar = (uint8_t *)bp->bar0 + i;
75                 *(volatile uint32_t *)bar = *data;
76                 data++;
77         }
78
79         /* Zero the rest of the request space */
80         for (; i < bp->max_req_len; i += 4) {
81                 bar = (uint8_t *)bp->bar0 + i;
82                 *(volatile uint32_t *)bar = 0;
83         }
84
85         /* Ring channel doorbell */
86         bar = (uint8_t *)bp->bar0 + 0x100;
87         *(volatile uint32_t *)bar = 1;
88
89         /* Poll for the valid bit */
90         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91                 /* Sanity check on the resp->resp_len */
92                 rte_rmb();
93                 if (resp->resp_len && resp->resp_len <=
94                                 bp->max_resp_len) {
95                         /* Last byte of resp contains the valid key */
96                         valid = (uint8_t *)resp + resp->resp_len - 1;
97                         if (*valid == HWRM_RESP_VALID_KEY)
98                                 break;
99                 }
100                 rte_delay_us(600);
101         }
102
103         if (i >= HWRM_CMD_TIMEOUT) {
104                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
105                         req->req_type);
106                 goto err_ret;
107         }
108         return 0;
109
110 err_ret:
111         return -1;
112 }
113
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
115 {
116         int rc;
117
118         rte_spinlock_lock(&bp->hwrm_lock);
119         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120         rte_spinlock_unlock(&bp->hwrm_lock);
121         return rc;
122 }
123
124 #define HWRM_PREP(req, type, cr, resp) \
125         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127         req.cmpl_ring = rte_cpu_to_le_16(cr); \
128         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129         req.target_id = rte_cpu_to_le_16(0xffff); \
130         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
131
132 #define HWRM_CHECK_RESULT \
133         { \
134                 if (rc) { \
135                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
136                                 __func__, rc); \
137                         return rc; \
138                 } \
139                 if (resp->error_code) { \
140                         rc = rte_le_to_cpu_16(resp->error_code); \
141                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
142                         return rc; \
143                 } \
144         }
145
146 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
147 {
148         int rc = 0;
149         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
150         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
151
152         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
153         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
154         req.mask = 0;
155
156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
157
158         HWRM_CHECK_RESULT;
159
160         return rc;
161 }
162
163 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
164 {
165         int rc = 0;
166         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
167         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
168         uint32_t mask = 0;
169
170         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
171         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
172
173         /* FIXME add multicast flag, when multicast adding options is supported
174          * by ethtool.
175          */
176         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
177                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
178         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
179                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
180         req.mask = rte_cpu_to_le_32(mask);
181
182         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
183
184         HWRM_CHECK_RESULT;
185
186         return rc;
187 }
188
189 int bnxt_hwrm_clear_filter(struct bnxt *bp,
190                            struct bnxt_filter_info *filter)
191 {
192         int rc = 0;
193         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
194         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
195
196         if (filter->fw_l2_filter_id == UINT64_MAX)
197                 return 0;
198
199         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
200
201         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
202
203         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
204
205         HWRM_CHECK_RESULT;
206
207         filter->fw_l2_filter_id = -1;
208
209         return 0;
210 }
211
212 int bnxt_hwrm_set_filter(struct bnxt *bp,
213                          struct bnxt_vnic_info *vnic,
214                          struct bnxt_filter_info *filter)
215 {
216         int rc = 0;
217         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
218         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
219         uint32_t enables = 0;
220
221         if (filter->fw_l2_filter_id != UINT64_MAX)
222                 bnxt_hwrm_clear_filter(bp, filter);
223
224         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
225
226         req.flags = rte_cpu_to_le_32(filter->flags);
227
228         enables = filter->enables |
229               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
230         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
231
232         if (enables &
233             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
234                 memcpy(req.l2_addr, filter->l2_addr,
235                        ETHER_ADDR_LEN);
236         if (enables &
237             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
238                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
239                        ETHER_ADDR_LEN);
240         if (enables &
241             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
242                 req.l2_ovlan = filter->l2_ovlan;
243         if (enables &
244             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
245                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
246
247         req.enables = rte_cpu_to_le_32(enables);
248
249         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
250
251         HWRM_CHECK_RESULT;
252
253         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
254
255         return rc;
256 }
257
258 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
259 {
260         int rc;
261         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
262         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
263
264         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
265
266         memcpy(req.encap_request, fwd_cmd,
267                sizeof(req.encap_request));
268
269         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
270
271         HWRM_CHECK_RESULT;
272
273         return rc;
274 }
275
276 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
277 {
278         int rc = 0;
279         struct hwrm_func_qcaps_input req = {.req_type = 0 };
280         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
281
282         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
283
284         req.fid = rte_cpu_to_le_16(0xffff);
285
286         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
287
288         HWRM_CHECK_RESULT;
289
290         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
291         if (BNXT_PF(bp)) {
292                 struct bnxt_pf_info *pf = &bp->pf;
293
294                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
295                 pf->port_id = resp->port_id;
296                 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
297                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
298                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
299                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
300                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
301                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
302                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
303                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
304                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
305         } else {
306                 struct bnxt_vf_info *vf = &bp->vf;
307
308                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
309                 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
310                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
311                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
312                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
313                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
314                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
315                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
316         }
317
318         return rc;
319 }
320
321 int bnxt_hwrm_func_reset(struct bnxt *bp)
322 {
323         int rc = 0;
324         struct hwrm_func_reset_input req = {.req_type = 0 };
325         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
326
327         HWRM_PREP(req, FUNC_RESET, -1, resp);
328
329         req.enables = rte_cpu_to_le_32(0);
330
331         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
332
333         HWRM_CHECK_RESULT;
334
335         return rc;
336 }
337
338 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
339                                    uint32_t *vf_req_fwd)
340 {
341         int rc;
342         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
343         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
344
345         if (bp->flags & BNXT_FLAG_REGISTERED)
346                 return 0;
347
348         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
349         req.flags = flags;
350         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
351                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
352         req.ver_maj = RTE_VER_YEAR;
353         req.ver_min = RTE_VER_MONTH;
354         req.ver_upd = RTE_VER_MINOR;
355
356         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
357
358         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
359
360         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
361
362         HWRM_CHECK_RESULT;
363
364         bp->flags |= BNXT_FLAG_REGISTERED;
365
366         return rc;
367 }
368
369 int bnxt_hwrm_ver_get(struct bnxt *bp)
370 {
371         int rc = 0;
372         struct hwrm_ver_get_input req = {.req_type = 0 };
373         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
374         uint32_t my_version;
375         uint32_t fw_version;
376         uint16_t max_resp_len;
377         char type[RTE_MEMZONE_NAMESIZE];
378
379         HWRM_PREP(req, VER_GET, -1, resp);
380
381         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
382         req.hwrm_intf_min = HWRM_VERSION_MINOR;
383         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
384
385         /*
386          * Hold the lock since we may be adjusting the response pointers.
387          */
388         rte_spinlock_lock(&bp->hwrm_lock);
389         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
390
391         HWRM_CHECK_RESULT;
392
393         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
394                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
395                 resp->hwrm_intf_upd,
396                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
397         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
398                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
399
400         my_version = HWRM_VERSION_MAJOR << 16;
401         my_version |= HWRM_VERSION_MINOR << 8;
402         my_version |= HWRM_VERSION_UPDATE;
403
404         fw_version = resp->hwrm_intf_maj << 16;
405         fw_version |= resp->hwrm_intf_min << 8;
406         fw_version |= resp->hwrm_intf_upd;
407
408         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
409                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
410                 rc = -EINVAL;
411                 goto error;
412         }
413
414         if (my_version != fw_version) {
415                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
416                 if (my_version < fw_version) {
417                         RTE_LOG(INFO, PMD,
418                                 "Firmware API version is newer than driver.\n");
419                         RTE_LOG(INFO, PMD,
420                                 "The driver may be missing features.\n");
421                 } else {
422                         RTE_LOG(INFO, PMD,
423                                 "Firmware API version is older than driver.\n");
424                         RTE_LOG(INFO, PMD,
425                                 "Not all driver features may be functional.\n");
426                 }
427         }
428
429         if (bp->max_req_len > resp->max_req_win_len) {
430                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
431                 rc = -EINVAL;
432         }
433         bp->max_req_len = resp->max_req_win_len;
434         max_resp_len = resp->max_resp_len;
435         if (bp->max_resp_len != max_resp_len) {
436                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
437                         bp->pdev->addr.domain, bp->pdev->addr.bus,
438                         bp->pdev->addr.devid, bp->pdev->addr.function);
439
440                 rte_free(bp->hwrm_cmd_resp_addr);
441
442                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
443                 if (bp->hwrm_cmd_resp_addr == NULL) {
444                         rc = -ENOMEM;
445                         goto error;
446                 }
447                 bp->hwrm_cmd_resp_dma_addr =
448                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
449                 bp->max_resp_len = max_resp_len;
450         }
451
452 error:
453         rte_spinlock_unlock(&bp->hwrm_lock);
454         return rc;
455 }
456
457 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
458 {
459         int rc;
460         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
461         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
462
463         if (!(bp->flags & BNXT_FLAG_REGISTERED))
464                 return 0;
465
466         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
467         req.flags = flags;
468
469         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
470
471         HWRM_CHECK_RESULT;
472
473         bp->flags &= ~BNXT_FLAG_REGISTERED;
474
475         return rc;
476 }
477
478 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
479 {
480         int rc = 0;
481         struct hwrm_port_phy_cfg_input req = {0};
482         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
483         uint32_t enables = 0;
484
485         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
486
487         if (conf->link_up) {
488                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
489                 if (bp->link_info.auto_mode && conf->link_speed) {
490                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
491                         RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
492                 }
493
494                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
495                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
496                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
497                 /*
498                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
499                  * any auto mode, even "none".
500                  */
501                 if (!conf->link_speed) {
502                         /* No speeds specified. Enable AutoNeg - all speeds */
503                         req.auto_mode =
504                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
505                 }
506                 /* AutoNeg - Advertise speeds specified. */
507                 if (conf->auto_link_speed_mask &&
508                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
509                         req.auto_mode =
510                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
511                         req.auto_link_speed_mask =
512                                 conf->auto_link_speed_mask;
513                         enables |=
514                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
515                 }
516
517                 req.auto_duplex = conf->duplex;
518                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
519                 req.auto_pause = conf->auto_pause;
520                 req.force_pause = conf->force_pause;
521                 /* Set force_pause if there is no auto or if there is a force */
522                 if (req.auto_pause && !req.force_pause)
523                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
524                 else
525                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
526
527                 req.enables = rte_cpu_to_le_32(enables);
528         } else {
529                 req.flags =
530                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
531                 RTE_LOG(INFO, PMD, "Force Link Down\n");
532         }
533
534         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
535
536         HWRM_CHECK_RESULT;
537
538         return rc;
539 }
540
541 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
542                                    struct bnxt_link_info *link_info)
543 {
544         int rc = 0;
545         struct hwrm_port_phy_qcfg_input req = {0};
546         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
547
548         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
549
550         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
551
552         HWRM_CHECK_RESULT;
553
554         link_info->phy_link_status = resp->link;
555         link_info->link_up =
556                 (link_info->phy_link_status ==
557                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
558         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
559         link_info->duplex = resp->duplex;
560         link_info->pause = resp->pause;
561         link_info->auto_pause = resp->auto_pause;
562         link_info->force_pause = resp->force_pause;
563         link_info->auto_mode = resp->auto_mode;
564         link_info->phy_type = resp->phy_type;
565         link_info->media_type = resp->media_type;
566
567         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
568         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
569         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
570         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
571         link_info->phy_ver[0] = resp->phy_maj;
572         link_info->phy_ver[1] = resp->phy_min;
573         link_info->phy_ver[2] = resp->phy_bld;
574
575         return rc;
576 }
577
578 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
579 {
580         int rc = 0;
581         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
582         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
583
584         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
585
586         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
587
588         HWRM_CHECK_RESULT;
589
590 #define GET_QUEUE_INFO(x) \
591         bp->cos_queue[x].id = resp->queue_id##x; \
592         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
593
594         GET_QUEUE_INFO(0);
595         GET_QUEUE_INFO(1);
596         GET_QUEUE_INFO(2);
597         GET_QUEUE_INFO(3);
598         GET_QUEUE_INFO(4);
599         GET_QUEUE_INFO(5);
600         GET_QUEUE_INFO(6);
601         GET_QUEUE_INFO(7);
602
603         return rc;
604 }
605
606 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
607                          struct bnxt_ring *ring,
608                          uint32_t ring_type, uint32_t map_index,
609                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
610 {
611         int rc = 0;
612         struct hwrm_ring_alloc_input req = {.req_type = 0 };
613         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
614
615         HWRM_PREP(req, RING_ALLOC, -1, resp);
616
617         req.enables = rte_cpu_to_le_32(0);
618
619         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
620         req.fbo = rte_cpu_to_le_32(0);
621         /* Association of ring index with doorbell index */
622         req.logical_id = rte_cpu_to_le_16(map_index);
623
624         switch (ring_type) {
625         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
626                 req.queue_id = bp->cos_queue[0].id;
627                 /* FALLTHROUGH */
628         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
629                 req.ring_type = ring_type;
630                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
631                 req.length = rte_cpu_to_le_32(ring->ring_size);
632                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
633                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
634                         req.enables =
635                         rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
636                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
637                 break;
638         case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
639                 req.ring_type = ring_type;
640                 /*
641                  * TODO: Some HWRM versions crash with
642                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
643                  */
644                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
645                 req.length = rte_cpu_to_le_32(ring->ring_size);
646                 break;
647         default:
648                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
649                         ring_type);
650                 return -1;
651         }
652
653         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
654
655         if (rc || resp->error_code) {
656                 if (rc == 0 && resp->error_code)
657                         rc = rte_le_to_cpu_16(resp->error_code);
658                 switch (ring_type) {
659                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
660                         RTE_LOG(ERR, PMD,
661                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
662                         return rc;
663                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
664                         RTE_LOG(ERR, PMD,
665                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
666                         return rc;
667                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
668                         RTE_LOG(ERR, PMD,
669                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
670                         return rc;
671                 default:
672                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
673                         return rc;
674                 }
675         }
676
677         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
678         return rc;
679 }
680
681 int bnxt_hwrm_ring_free(struct bnxt *bp,
682                         struct bnxt_ring *ring, uint32_t ring_type)
683 {
684         int rc;
685         struct hwrm_ring_free_input req = {.req_type = 0 };
686         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
687
688         HWRM_PREP(req, RING_FREE, -1, resp);
689
690         req.ring_type = ring_type;
691         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
692
693         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
694
695         if (rc || resp->error_code) {
696                 if (rc == 0 && resp->error_code)
697                         rc = rte_le_to_cpu_16(resp->error_code);
698
699                 switch (ring_type) {
700                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
701                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
702                                 rc);
703                         return rc;
704                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
705                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
706                                 rc);
707                         return rc;
708                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
709                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
710                                 rc);
711                         return rc;
712                 default:
713                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
714                         return rc;
715                 }
716         }
717         return 0;
718 }
719
720 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
721 {
722         int rc = 0;
723         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
724         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
725
726         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
727
728         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
729         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
730         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
731         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
732
733         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
734
735         HWRM_CHECK_RESULT;
736
737         bp->grp_info[idx].fw_grp_id =
738             rte_le_to_cpu_16(resp->ring_group_id);
739
740         return rc;
741 }
742
743 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
744 {
745         int rc;
746         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
747         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
748
749         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
750
751         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
752
753         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
754
755         HWRM_CHECK_RESULT;
756
757         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
758         return rc;
759 }
760
761 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
762 {
763         int rc = 0;
764         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
765         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
766
767         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
768
769         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
770                 return rc;
771
772         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
773         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
774
775         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
776
777         HWRM_CHECK_RESULT;
778
779         return rc;
780 }
781
782 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
783                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
784 {
785         int rc;
786         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
787         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
788
789         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
790
791         req.update_period_ms = rte_cpu_to_le_32(1000);
792
793         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
794         req.stats_dma_addr =
795             rte_cpu_to_le_64(cpr->hw_stats_map);
796
797         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
798
799         HWRM_CHECK_RESULT;
800
801         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
802         //Tx rings don't need grp_info entry. It is a Rx only attribute.
803         if (idx)
804                 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
805
806         return rc;
807 }
808
809 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
810                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
811 {
812         int rc;
813         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
814         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
815
816         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
817
818         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
819         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
820
821         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
822
823         HWRM_CHECK_RESULT;
824
825         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
826         //Tx rings don't have a grp_info entry. It is a Rx only attribute.
827         if (idx)
828                 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
829
830         return rc;
831 }
832
833 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
834 {
835         int rc = 0, i, j;
836         struct hwrm_vnic_alloc_input req = { 0 };
837         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
838
839         /* map ring groups to this vnic */
840         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
841                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
842                         RTE_LOG(ERR, PMD,
843                                 "Not enough ring groups avail:%x req:%x\n", j,
844                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
845                         break;
846                 }
847                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
848         }
849
850         vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
851         vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
852
853         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
854
855         if (vnic->func_default)
856                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
857         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
858
859         HWRM_CHECK_RESULT;
860
861         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
862         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
863         return rc;
864 }
865
866 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
867 {
868         int rc = 0;
869         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
870         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
871
872         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
873                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
874                 return rc;
875         }
876
877         HWRM_PREP(req, VNIC_CFG, -1, resp);
878
879         /* Only RSS support for now TBD: COS & LB */
880         req.enables =
881             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
882                              HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
883                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
884         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
885         req.dflt_ring_grp =
886                 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
887         req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
888         req.cos_rule = rte_cpu_to_le_16(0xffff);
889         req.lb_rule = rte_cpu_to_le_16(0xffff);
890         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
891                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
892         if (vnic->func_default)
893                 req.flags = 1;
894         if (vnic->vlan_strip)
895                 req.flags |=
896                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
897
898         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
899
900         HWRM_CHECK_RESULT;
901
902         return rc;
903 }
904
905 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
906 {
907         int rc = 0;
908         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
909         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
910                                                 bp->hwrm_cmd_resp_addr;
911
912         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
913
914         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
915
916         HWRM_CHECK_RESULT;
917
918         vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
919         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
920
921         return rc;
922 }
923
924 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
925 {
926         int rc = 0;
927         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
928         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
929                                                 bp->hwrm_cmd_resp_addr;
930
931         if (vnic->fw_rss_cos_lb_ctx == 0xffff) {
932                 RTE_LOG(DEBUG, PMD,
933                         "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
934                 return rc;
935         }
936
937         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
938
939         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
940
941         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
942
943         HWRM_CHECK_RESULT;
944
945         vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
946
947         return rc;
948 }
949
950 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
951 {
952         int rc = 0;
953         struct hwrm_vnic_free_input req = {.req_type = 0 };
954         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
955
956         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
957                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
958                 return rc;
959         }
960
961         HWRM_PREP(req, VNIC_FREE, -1, resp);
962
963         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
964
965         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
966
967         HWRM_CHECK_RESULT;
968
969         vnic->fw_vnic_id = INVALID_HW_RING_ID;
970         return rc;
971 }
972
973 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
974                            struct bnxt_vnic_info *vnic)
975 {
976         int rc = 0;
977         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
978         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
979
980         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
981
982         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
983
984         req.ring_grp_tbl_addr =
985             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
986         req.hash_key_tbl_addr =
987             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
988         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
989
990         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
991
992         HWRM_CHECK_RESULT;
993
994         return rc;
995 }
996
997 /*
998  * HWRM utility functions
999  */
1000
1001 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1002 {
1003         unsigned int i;
1004         int rc = 0;
1005
1006         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1007                 struct bnxt_tx_queue *txq;
1008                 struct bnxt_rx_queue *rxq;
1009                 struct bnxt_cp_ring_info *cpr;
1010
1011                 if (i >= bp->rx_cp_nr_rings) {
1012                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1013                         cpr = txq->cp_ring;
1014                 } else {
1015                         rxq = bp->rx_queues[i];
1016                         cpr = rxq->cp_ring;
1017                 }
1018
1019                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1020                 if (rc)
1021                         return rc;
1022         }
1023         return 0;
1024 }
1025
1026 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1027 {
1028         int rc;
1029         unsigned int i;
1030         struct bnxt_cp_ring_info *cpr;
1031
1032         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1033                 unsigned int idx = i + 1;
1034
1035                 if (i >= bp->rx_cp_nr_rings) {
1036                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1037                         //Tx rings don't have a grp_info entry.
1038                         idx = 0;
1039                 } else {
1040                         cpr = bp->rx_queues[i]->cp_ring;
1041                 }
1042                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1043                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1044                         if (rc)
1045                                 return rc;
1046                 }
1047         }
1048         return 0;
1049 }
1050
1051 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1052 {
1053         unsigned int i;
1054         int rc = 0;
1055
1056         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1057                 struct bnxt_tx_queue *txq;
1058                 struct bnxt_rx_queue *rxq;
1059                 struct bnxt_cp_ring_info *cpr;
1060                 unsigned int idx = i + 1;
1061
1062                 if (i >= bp->rx_cp_nr_rings) {
1063                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1064                         cpr = txq->cp_ring;
1065                         //Tx rings don't need grp_info entry.
1066                         idx = 0;
1067                 } else {
1068                         rxq = bp->rx_queues[i];
1069                         cpr = rxq->cp_ring;
1070                 }
1071
1072                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1073
1074                 if (rc)
1075                         return rc;
1076         }
1077         return rc;
1078 }
1079
1080 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1081 {
1082         uint16_t i;
1083         uint32_t rc = 0;
1084
1085         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1086                 unsigned int idx = i + 1;
1087
1088                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1089                         RTE_LOG(ERR, PMD,
1090                                 "Attempt to free invalid ring group %d\n",
1091                                 idx);
1092                         continue;
1093                 }
1094
1095                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1096
1097                 if (rc)
1098                         return rc;
1099         }
1100         return rc;
1101 }
1102
1103 static void bnxt_free_cp_ring(struct bnxt *bp,
1104                               struct bnxt_cp_ring_info *cpr)
1105 {
1106         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1107
1108         bnxt_hwrm_ring_free(bp, cp_ring,
1109                         HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1110         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1111         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1112                         sizeof(*cpr->cp_desc_ring));
1113         cpr->cp_raw_cons = 0;
1114 }
1115
1116 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1117 {
1118         unsigned int i;
1119         int rc = 0;
1120
1121         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1122                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1123                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1124                 struct bnxt_ring *ring = txr->tx_ring_struct;
1125                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1126
1127                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1128                         bnxt_hwrm_ring_free(bp, ring,
1129                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1130                         ring->fw_ring_id = INVALID_HW_RING_ID;
1131                         memset(txr->tx_desc_ring, 0,
1132                                         txr->tx_ring_struct->ring_size *
1133                                         sizeof(*txr->tx_desc_ring));
1134                         memset(txr->tx_buf_ring, 0,
1135                                         txr->tx_ring_struct->ring_size *
1136                                         sizeof(*txr->tx_buf_ring));
1137                         txr->tx_prod = 0;
1138                         txr->tx_cons = 0;
1139                 }
1140                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1141                         bnxt_free_cp_ring(bp, cpr);
1142         }
1143
1144         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1145                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1146                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1147                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1148                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1149                 unsigned int idx = i + 1;
1150
1151                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1152                         bnxt_hwrm_ring_free(bp, ring,
1153                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1154                         ring->fw_ring_id = INVALID_HW_RING_ID;
1155                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1156                         memset(rxr->rx_desc_ring, 0,
1157                                         rxr->rx_ring_struct->ring_size *
1158                                         sizeof(*rxr->rx_desc_ring));
1159                         memset(rxr->rx_buf_ring, 0,
1160                                         rxr->rx_ring_struct->ring_size *
1161                                         sizeof(*rxr->rx_buf_ring));
1162                         rxr->rx_prod = 0;
1163                 }
1164                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1165                         bnxt_free_cp_ring(bp, cpr);
1166                 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1167         }
1168
1169         /* Default completion ring */
1170         {
1171                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1172
1173                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1174                         bnxt_free_cp_ring(bp, cpr);
1175                 bp->grp_info[0].cp_fw_ring_id = INVALID_HW_RING_ID;
1176         }
1177
1178         return rc;
1179 }
1180
1181 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1182 {
1183         uint16_t i;
1184         uint32_t rc = 0;
1185
1186         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1187                 unsigned int idx = i + 1;
1188
1189                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1190                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1191                         continue;
1192
1193                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1194
1195                 if (rc)
1196                         return rc;
1197         }
1198         return rc;
1199 }
1200
1201 void bnxt_free_hwrm_resources(struct bnxt *bp)
1202 {
1203         /* Release memzone */
1204         rte_free(bp->hwrm_cmd_resp_addr);
1205         bp->hwrm_cmd_resp_addr = NULL;
1206         bp->hwrm_cmd_resp_dma_addr = 0;
1207 }
1208
1209 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1210 {
1211         struct rte_pci_device *pdev = bp->pdev;
1212         char type[RTE_MEMZONE_NAMESIZE];
1213
1214         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1215                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1216         bp->max_req_len = HWRM_MAX_REQ_LEN;
1217         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1218         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1219         if (bp->hwrm_cmd_resp_addr == NULL)
1220                 return -ENOMEM;
1221         bp->hwrm_cmd_resp_dma_addr =
1222                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1223         rte_spinlock_init(&bp->hwrm_lock);
1224
1225         return 0;
1226 }
1227
1228 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1229 {
1230         struct bnxt_filter_info *filter;
1231         int rc = 0;
1232
1233         STAILQ_FOREACH(filter, &vnic->filter, next) {
1234                 rc = bnxt_hwrm_clear_filter(bp, filter);
1235                 if (rc)
1236                         break;
1237         }
1238         return rc;
1239 }
1240
1241 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1242 {
1243         struct bnxt_filter_info *filter;
1244         int rc = 0;
1245
1246         STAILQ_FOREACH(filter, &vnic->filter, next) {
1247                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1248                 if (rc)
1249                         break;
1250         }
1251         return rc;
1252 }
1253
1254 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1255 {
1256         struct bnxt_vnic_info *vnic;
1257         unsigned int i;
1258
1259         if (bp->vnic_info == NULL)
1260                 return;
1261
1262         vnic = &bp->vnic_info[0];
1263         bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1264
1265         /* VNIC resources */
1266         for (i = 0; i < bp->nr_vnics; i++) {
1267                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1268
1269                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1270
1271                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1272                 bnxt_hwrm_vnic_free(bp, vnic);
1273         }
1274         /* Ring resources */
1275         bnxt_free_all_hwrm_rings(bp);
1276         bnxt_free_all_hwrm_ring_grps(bp);
1277         bnxt_free_all_hwrm_stat_ctxs(bp);
1278 }
1279
1280 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1281 {
1282         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1283
1284         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1285                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1286
1287         switch (conf_link_speed) {
1288         case ETH_LINK_SPEED_10M_HD:
1289         case ETH_LINK_SPEED_100M_HD:
1290                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1291         }
1292         return hw_link_duplex;
1293 }
1294
1295 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1296 {
1297         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1298 }
1299
1300 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1301 {
1302         uint16_t eth_link_speed = 0;
1303
1304         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1305                 return ETH_LINK_SPEED_AUTONEG;
1306
1307         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1308         case ETH_LINK_SPEED_100M:
1309         case ETH_LINK_SPEED_100M_HD:
1310                 eth_link_speed =
1311                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1312                 break;
1313         case ETH_LINK_SPEED_1G:
1314                 eth_link_speed =
1315                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1316                 break;
1317         case ETH_LINK_SPEED_2_5G:
1318                 eth_link_speed =
1319                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1320                 break;
1321         case ETH_LINK_SPEED_10G:
1322                 eth_link_speed =
1323                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1324                 break;
1325         case ETH_LINK_SPEED_20G:
1326                 eth_link_speed =
1327                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1328                 break;
1329         case ETH_LINK_SPEED_25G:
1330                 eth_link_speed =
1331                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1332                 break;
1333         case ETH_LINK_SPEED_40G:
1334                 eth_link_speed =
1335                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1336                 break;
1337         case ETH_LINK_SPEED_50G:
1338                 eth_link_speed =
1339                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1340                 break;
1341         default:
1342                 RTE_LOG(ERR, PMD,
1343                         "Unsupported link speed %d; default to AUTO\n",
1344                         conf_link_speed);
1345                 break;
1346         }
1347         return eth_link_speed;
1348 }
1349
1350 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1351                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1352                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1353                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1354
1355 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1356 {
1357         uint32_t one_speed;
1358
1359         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1360                 return 0;
1361
1362         if (link_speed & ETH_LINK_SPEED_FIXED) {
1363                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1364
1365                 if (one_speed & (one_speed - 1)) {
1366                         RTE_LOG(ERR, PMD,
1367                                 "Invalid advertised speeds (%u) for port %u\n",
1368                                 link_speed, port_id);
1369                         return -EINVAL;
1370                 }
1371                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1372                         RTE_LOG(ERR, PMD,
1373                                 "Unsupported advertised speed (%u) for port %u\n",
1374                                 link_speed, port_id);
1375                         return -EINVAL;
1376                 }
1377         } else {
1378                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1379                         RTE_LOG(ERR, PMD,
1380                                 "Unsupported advertised speeds (%u) for port %u\n",
1381                                 link_speed, port_id);
1382                         return -EINVAL;
1383                 }
1384         }
1385         return 0;
1386 }
1387
1388 static uint16_t
1389 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1390 {
1391         uint16_t ret = 0;
1392
1393         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1394                 if (bp->link_info.support_speeds)
1395                         return bp->link_info.support_speeds;
1396                 link_speed = BNXT_SUPPORTED_SPEEDS;
1397         }
1398
1399         if (link_speed & ETH_LINK_SPEED_100M)
1400                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1401         if (link_speed & ETH_LINK_SPEED_100M_HD)
1402                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1403         if (link_speed & ETH_LINK_SPEED_1G)
1404                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1405         if (link_speed & ETH_LINK_SPEED_2_5G)
1406                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1407         if (link_speed & ETH_LINK_SPEED_10G)
1408                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1409         if (link_speed & ETH_LINK_SPEED_20G)
1410                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1411         if (link_speed & ETH_LINK_SPEED_25G)
1412                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1413         if (link_speed & ETH_LINK_SPEED_40G)
1414                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1415         if (link_speed & ETH_LINK_SPEED_50G)
1416                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1417         return ret;
1418 }
1419
1420 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1421 {
1422         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1423
1424         switch (hw_link_speed) {
1425         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1426                 eth_link_speed = ETH_SPEED_NUM_100M;
1427                 break;
1428         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1429                 eth_link_speed = ETH_SPEED_NUM_1G;
1430                 break;
1431         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1432                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1433                 break;
1434         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1435                 eth_link_speed = ETH_SPEED_NUM_10G;
1436                 break;
1437         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1438                 eth_link_speed = ETH_SPEED_NUM_20G;
1439                 break;
1440         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1441                 eth_link_speed = ETH_SPEED_NUM_25G;
1442                 break;
1443         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1444                 eth_link_speed = ETH_SPEED_NUM_40G;
1445                 break;
1446         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1447                 eth_link_speed = ETH_SPEED_NUM_50G;
1448                 break;
1449         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1450         default:
1451                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1452                         hw_link_speed);
1453                 break;
1454         }
1455         return eth_link_speed;
1456 }
1457
1458 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1459 {
1460         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1461
1462         switch (hw_link_duplex) {
1463         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1464         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1465                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1466                 break;
1467         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1468                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1469                 break;
1470         default:
1471                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1472                         hw_link_duplex);
1473                 break;
1474         }
1475         return eth_link_duplex;
1476 }
1477
1478 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1479 {
1480         int rc = 0;
1481         struct bnxt_link_info *link_info = &bp->link_info;
1482
1483         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1484         if (rc) {
1485                 RTE_LOG(ERR, PMD,
1486                         "Get link config failed with rc %d\n", rc);
1487                 goto exit;
1488         }
1489         if (link_info->link_speed)
1490                 link->link_speed =
1491                         bnxt_parse_hw_link_speed(link_info->link_speed);
1492         else
1493                 link->link_speed = ETH_SPEED_NUM_NONE;
1494         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1495         link->link_status = link_info->link_up;
1496         link->link_autoneg = link_info->auto_mode ==
1497                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1498                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1499 exit:
1500         return rc;
1501 }
1502
1503 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1504 {
1505         int rc = 0;
1506         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1507         struct bnxt_link_info link_req;
1508         uint16_t speed, autoneg;
1509
1510         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1511                 return 0;
1512
1513         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1514                         bp->eth_dev->data->port_id);
1515         if (rc)
1516                 goto error;
1517
1518         memset(&link_req, 0, sizeof(link_req));
1519         link_req.link_up = link_up;
1520         if (!link_up)
1521                 goto port_phy_cfg;
1522
1523         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
1524         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1525         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1526         /* Autoneg can be done only when the FW allows */
1527         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
1528                                 bp->link_info.force_link_speed)) {
1529                 link_req.phy_flags |=
1530                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1531                 link_req.auto_link_speed_mask =
1532                         bnxt_parse_eth_link_speed_mask(bp,
1533                                                        dev_conf->link_speeds);
1534         } else {
1535                 if (bp->link_info.phy_type ==
1536                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
1537                     bp->link_info.phy_type ==
1538                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
1539                     bp->link_info.media_type ==
1540                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
1541                         RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
1542                         return -EINVAL;
1543                 }
1544
1545                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1546                 /* If user wants a particular speed try that first. */
1547                 if (speed)
1548                         link_req.link_speed = speed;
1549                 else if (bp->link_info.force_link_speed)
1550                         link_req.link_speed = bp->link_info.force_link_speed;
1551                 else
1552                         link_req.link_speed = bp->link_info.auto_link_speed;
1553         }
1554         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1555         link_req.auto_pause = bp->link_info.auto_pause;
1556         link_req.force_pause = bp->link_info.force_pause;
1557
1558 port_phy_cfg:
1559         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1560         if (rc) {
1561                 RTE_LOG(ERR, PMD,
1562                         "Set link config failed with rc %d\n", rc);
1563         }
1564
1565 error:
1566         return rc;
1567 }
1568
1569 /* JIRA 22088 */
1570 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1571 {
1572         struct hwrm_func_qcfg_input req = {0};
1573         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1574         int rc = 0;
1575
1576         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1577         req.fid = rte_cpu_to_le_16(0xffff);
1578
1579         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1580
1581         HWRM_CHECK_RESULT;
1582
1583         if (BNXT_VF(bp)) {
1584                 struct bnxt_vf_info *vf = &bp->vf;
1585
1586                 /* Hard Coded.. 0xfff VLAN ID mask */
1587                 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1588         }
1589
1590         switch (resp->port_partition_type) {
1591         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1592         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1593         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1594                 bp->port_partition_type = resp->port_partition_type;
1595                 break;
1596         default:
1597                 bp->port_partition_type = 0;
1598                 break;
1599         }
1600
1601         return rc;
1602 }