New upstream version 16.11.4
[deb_dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <inttypes.h>
35 #include <stdbool.h>
36
37 #include <rte_dev.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
41
42 #include "bnxt.h"
43 #include "bnxt_cpr.h"
44 #include "bnxt_filter.h"
45 #include "bnxt_hwrm.h"
46 #include "bnxt_irq.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_rxq.h"
49 #include "bnxt_rxr.h"
50 #include "bnxt_stats.h"
51 #include "bnxt_txq.h"
52 #include "bnxt_txr.h"
53 #include "bnxt_vnic.h"
54 #include "hsi_struct_def_dpdk.h"
55
56 #define DRV_MODULE_NAME         "bnxt"
57 static const char bnxt_version[] =
58         "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
59
60 #define PCI_VENDOR_ID_BROADCOM 0x14E4
61
62 #define BROADCOM_DEV_ID_57301 0x16c8
63 #define BROADCOM_DEV_ID_57302 0x16c9
64 #define BROADCOM_DEV_ID_57304_PF 0x16ca
65 #define BROADCOM_DEV_ID_57304_VF 0x16cb
66 #define BROADCOM_DEV_ID_NS2 0x16cd
67 #define BROADCOM_DEV_ID_57402 0x16d0
68 #define BROADCOM_DEV_ID_57404 0x16d1
69 #define BROADCOM_DEV_ID_57406_PF 0x16d2
70 #define BROADCOM_DEV_ID_57406_VF 0x16d3
71 #define BROADCOM_DEV_ID_57402_MF 0x16d4
72 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
73 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
74 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
75 #define BROADCOM_DEV_ID_57404_MF 0x16e7
76 #define BROADCOM_DEV_ID_57406_MF 0x16e8
77 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
78 #define BROADCOM_DEV_ID_57407_MF 0x16ea
79
80 static struct rte_pci_id bnxt_pci_id_map[] = {
81         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
82         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
83         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
84         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
85         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
86         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
87         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
88         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
89         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
90         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
91         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
92         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
93         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
94         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
95         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
96         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
97         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
98         { .vendor_id = 0, /* sentinel */ },
99 };
100
101 #define BNXT_ETH_RSS_SUPPORT (  \
102         ETH_RSS_IPV4 |          \
103         ETH_RSS_NONFRAG_IPV4_TCP |      \
104         ETH_RSS_NONFRAG_IPV4_UDP |      \
105         ETH_RSS_IPV6 |          \
106         ETH_RSS_NONFRAG_IPV6_TCP |      \
107         ETH_RSS_NONFRAG_IPV6_UDP)
108
109 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
110
111 /***********************/
112
113 /*
114  * High level utility functions
115  */
116
117 static void bnxt_free_mem(struct bnxt *bp)
118 {
119         bnxt_free_filter_mem(bp);
120         bnxt_free_vnic_attributes(bp);
121         bnxt_free_vnic_mem(bp);
122
123         bnxt_free_stats(bp);
124         bnxt_free_tx_rings(bp);
125         bnxt_free_rx_rings(bp);
126         bnxt_free_def_cp_ring(bp);
127 }
128
129 static int bnxt_alloc_mem(struct bnxt *bp)
130 {
131         int rc;
132
133         /* Default completion ring */
134         rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
135         if (rc)
136                 goto alloc_mem_err;
137
138         rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
139                               bp->def_cp_ring, "def_cp");
140         if (rc)
141                 goto alloc_mem_err;
142
143         rc = bnxt_alloc_vnic_mem(bp);
144         if (rc)
145                 goto alloc_mem_err;
146
147         rc = bnxt_alloc_vnic_attributes(bp);
148         if (rc)
149                 goto alloc_mem_err;
150
151         rc = bnxt_alloc_filter_mem(bp);
152         if (rc)
153                 goto alloc_mem_err;
154
155         return 0;
156
157 alloc_mem_err:
158         bnxt_free_mem(bp);
159         return rc;
160 }
161
162 static int bnxt_init_chip(struct bnxt *bp)
163 {
164         unsigned int i, rss_idx, fw_idx;
165         struct rte_eth_link new;
166         int rc;
167
168         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
169         if (rc) {
170                 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
171                 goto err_out;
172         }
173
174         rc = bnxt_alloc_hwrm_rings(bp);
175         if (rc) {
176                 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
177                 goto err_out;
178         }
179
180         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
181         if (rc) {
182                 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
183                 goto err_out;
184         }
185
186         rc = bnxt_mq_rx_configure(bp);
187         if (rc) {
188                 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
189                 goto err_out;
190         }
191
192         /* VNIC configuration */
193         for (i = 0; i < bp->nr_vnics; i++) {
194                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
195
196                 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
197                 if (rc) {
198                         RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n",
199                                 rc);
200                         goto err_out;
201                 }
202
203                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
204                 if (rc) {
205                         RTE_LOG(ERR, PMD,
206                                 "HWRM vnic ctx alloc failure rc: %x\n", rc);
207                         goto err_out;
208                 }
209
210                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
211                 if (rc) {
212                         RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
213                         goto err_out;
214                 }
215
216                 rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
217                 if (rc) {
218                         RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n",
219                                 rc);
220                         goto err_out;
221                 }
222                 if (vnic->rss_table && vnic->hash_type) {
223                         /*
224                          * Fill the RSS hash & redirection table with
225                          * ring group ids for all VNICs
226                          */
227                         for (rss_idx = 0, fw_idx = 0;
228                              rss_idx < HW_HASH_INDEX_SIZE;
229                              rss_idx++, fw_idx++) {
230                                 if (vnic->fw_grp_ids[fw_idx] ==
231                                     INVALID_HW_RING_ID)
232                                         fw_idx = 0;
233                                 vnic->rss_table[rss_idx] =
234                                                 vnic->fw_grp_ids[fw_idx];
235                         }
236                         rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
237                         if (rc) {
238                                 RTE_LOG(ERR, PMD,
239                                         "HWRM vnic set RSS failure rc: %x\n",
240                                         rc);
241                                 goto err_out;
242                         }
243                 }
244         }
245         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
246         if (rc) {
247                 RTE_LOG(ERR, PMD,
248                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
249                 goto err_out;
250         }
251
252         rc = bnxt_get_hwrm_link_config(bp, &new);
253         if (rc) {
254                 RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
255                 goto err_out;
256         }
257
258         if (!bp->link_info.link_up) {
259                 rc = bnxt_set_hwrm_link_config(bp, true);
260                 if (rc) {
261                         RTE_LOG(ERR, PMD,
262                                 "HWRM link config failure rc: %x\n", rc);
263                         goto err_out;
264                 }
265         }
266         bnxt_print_link_info(bp->eth_dev);
267
268         return 0;
269
270 err_out:
271         bnxt_free_all_hwrm_resources(bp);
272
273         return rc;
274 }
275
276 static int bnxt_shutdown_nic(struct bnxt *bp)
277 {
278         bnxt_free_all_hwrm_resources(bp);
279         bnxt_free_all_filters(bp);
280         bnxt_free_all_vnics(bp);
281         return 0;
282 }
283
284 static int bnxt_init_nic(struct bnxt *bp)
285 {
286         int rc;
287
288         bnxt_init_ring_grps(bp);
289         bnxt_init_vnics(bp);
290         bnxt_init_filters(bp);
291
292         rc = bnxt_init_chip(bp);
293         if (rc)
294                 return rc;
295
296         return 0;
297 }
298
299 /*
300  * Device configuration and status function
301  */
302
303 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
304                                   struct rte_eth_dev_info *dev_info)
305 {
306         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
307         uint16_t max_vnics, i, j, vpool, vrxq;
308
309         /* MAC Specifics */
310         dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
311         dev_info->max_hash_mac_addrs = 0;
312
313         /* PF/VF specifics */
314         if (BNXT_PF(bp)) {
315                 dev_info->max_rx_queues = bp->pf.max_rx_rings;
316                 dev_info->max_tx_queues = bp->pf.max_tx_rings;
317                 dev_info->max_vfs = bp->pf.active_vfs;
318                 dev_info->reta_size = bp->pf.max_rsscos_ctx;
319                 max_vnics = bp->pf.max_vnics;
320         } else {
321                 dev_info->max_rx_queues = bp->vf.max_rx_rings;
322                 dev_info->max_tx_queues = bp->vf.max_tx_rings;
323                 dev_info->reta_size = bp->vf.max_rsscos_ctx;
324                 max_vnics = bp->vf.max_vnics;
325         }
326
327         /* Fast path specifics */
328         dev_info->min_rx_bufsize = 1;
329         dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
330                                   + VLAN_TAG_SIZE;
331         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
332                                         DEV_RX_OFFLOAD_IPV4_CKSUM |
333                                         DEV_RX_OFFLOAD_UDP_CKSUM |
334                                         DEV_RX_OFFLOAD_TCP_CKSUM |
335                                         DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
336         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
337                                         DEV_TX_OFFLOAD_IPV4_CKSUM |
338                                         DEV_TX_OFFLOAD_TCP_CKSUM |
339                                         DEV_TX_OFFLOAD_UDP_CKSUM |
340                                         DEV_TX_OFFLOAD_TCP_TSO;
341
342         /* *INDENT-OFF* */
343         dev_info->default_rxconf = (struct rte_eth_rxconf) {
344                 .rx_thresh = {
345                         .pthresh = 8,
346                         .hthresh = 8,
347                         .wthresh = 0,
348                 },
349                 .rx_free_thresh = 32,
350                 .rx_drop_en = 0,
351         };
352
353         dev_info->default_txconf = (struct rte_eth_txconf) {
354                 .tx_thresh = {
355                         .pthresh = 32,
356                         .hthresh = 0,
357                         .wthresh = 0,
358                 },
359                 .tx_free_thresh = 32,
360                 .tx_rs_thresh = 32,
361                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
362                              ETH_TXQ_FLAGS_NOOFFLOADS,
363         };
364         eth_dev->data->dev_conf.intr_conf.lsc = 1;
365
366         /* *INDENT-ON* */
367
368         /*
369          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
370          *       need further investigation.
371          */
372
373         /* VMDq resources */
374         vpool = 64; /* ETH_64_POOLS */
375         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
376         for (i = 0; i < 4; vpool >>= 1, i++) {
377                 if (max_vnics > vpool) {
378                         for (j = 0; j < 5; vrxq >>= 1, j++) {
379                                 if (dev_info->max_rx_queues > vrxq) {
380                                         if (vpool > vrxq)
381                                                 vpool = vrxq;
382                                         goto found;
383                                 }
384                         }
385                         /* Not enough resources to support VMDq */
386                         break;
387                 }
388         }
389         /* Not enough resources to support VMDq */
390         vpool = 0;
391         vrxq = 0;
392 found:
393         dev_info->max_vmdq_pools = vpool;
394         dev_info->vmdq_queue_num = vrxq;
395
396         dev_info->vmdq_pool_base = 0;
397         dev_info->vmdq_queue_base = 0;
398 }
399
400 /* Configure the device based on the configuration provided */
401 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
402 {
403         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
404
405         bp->rx_queues = (void *)eth_dev->data->rx_queues;
406         bp->tx_queues = (void *)eth_dev->data->tx_queues;
407
408         /* Inherit new configurations */
409         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
410         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
411         bp->rx_cp_nr_rings = bp->rx_nr_rings;
412         bp->tx_cp_nr_rings = bp->tx_nr_rings;
413
414         if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
415                 eth_dev->data->mtu =
416                                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
417                                 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
418         return 0;
419 }
420
421 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
422 {
423         struct rte_eth_link *link = &eth_dev->data->dev_link;
424
425         if (link->link_status)
426                 RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
427                         (uint8_t)(eth_dev->data->port_id),
428                         (uint32_t)link->link_speed,
429                         (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
430                         ("full-duplex") : ("half-duplex\n"));
431         else
432                 RTE_LOG(INFO, PMD, "Port %d Link Down\n",
433                         (uint8_t)(eth_dev->data->port_id));
434 }
435
436 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
437 {
438         bnxt_print_link_info(eth_dev);
439         return 0;
440 }
441
442 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
443 {
444         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
445         int rc;
446
447         bp->dev_stopped = 0;
448         rc = bnxt_hwrm_func_reset(bp);
449         if (rc) {
450                 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
451                 rc = -1;
452                 goto error;
453         }
454
455         rc = bnxt_setup_int(bp);
456         if (rc)
457                 goto error;
458
459         rc = bnxt_alloc_mem(bp);
460         if (rc)
461                 goto error;
462
463         rc = bnxt_request_int(bp);
464         if (rc)
465                 goto error;
466
467         rc = bnxt_init_nic(bp);
468         if (rc)
469                 goto error;
470
471         bnxt_enable_int(bp);
472
473         bnxt_link_update_op(eth_dev, 1);
474         return 0;
475
476 error:
477         bnxt_shutdown_nic(bp);
478         bnxt_disable_int(bp);
479         bnxt_free_int(bp);
480         bnxt_free_tx_mbufs(bp);
481         bnxt_free_rx_mbufs(bp);
482         bnxt_free_mem(bp);
483         return rc;
484 }
485
486 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
487 {
488         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
489         int rc = 0;
490
491         if (!bp->link_info.link_up)
492                 rc = bnxt_set_hwrm_link_config(bp, true);
493         if (!rc)
494                 eth_dev->data->dev_link.link_status = 1;
495
496         bnxt_print_link_info(eth_dev);
497         return 0;
498 }
499
500 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
501 {
502         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
503
504         eth_dev->data->dev_link.link_status = 0;
505         bnxt_set_hwrm_link_config(bp, false);
506         bp->link_info.link_up = 0;
507
508         return 0;
509 }
510
511 /* Unload the driver, release resources */
512 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
513 {
514         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
515
516         if (bp->eth_dev->data->dev_started) {
517                 /* TBD: STOP HW queues DMA */
518                 eth_dev->data->dev_link.link_status = 0;
519         }
520         bnxt_set_hwrm_link_config(bp, false);
521         bnxt_disable_int(bp);
522         bnxt_free_int(bp);
523         bnxt_shutdown_nic(bp);
524         bp->dev_stopped = 1;
525 }
526
527 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
528 {
529         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
530
531         if (bp->dev_stopped == 0)
532                 bnxt_dev_stop_op(eth_dev);
533
534         bnxt_free_tx_mbufs(bp);
535         bnxt_free_rx_mbufs(bp);
536         bnxt_free_mem(bp);
537         if (eth_dev->data->mac_addrs != NULL) {
538                 rte_free(eth_dev->data->mac_addrs);
539                 eth_dev->data->mac_addrs = NULL;
540         }
541         if (bp->grp_info != NULL) {
542                 rte_free(bp->grp_info);
543                 bp->grp_info = NULL;
544         }
545 }
546
547 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
548                                     uint32_t index)
549 {
550         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
551         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
552         struct bnxt_vnic_info *vnic;
553         struct bnxt_filter_info *filter, *temp_filter;
554         uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS);
555         uint32_t i;
556
557         /*
558          * Loop through all VNICs from the specified filter flow pools to
559          * remove the corresponding MAC addr filter
560          */
561         for (i = 0; i < pool; i++) {
562                 if (!(pool_mask & (1ULL << i)))
563                         continue;
564
565                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
566                         filter = STAILQ_FIRST(&vnic->filter);
567                         while (filter) {
568                                 temp_filter = STAILQ_NEXT(filter, next);
569                                 if (filter->mac_index == index) {
570                                         STAILQ_REMOVE(&vnic->filter, filter,
571                                                       bnxt_filter_info, next);
572                                         bnxt_hwrm_clear_filter(bp, filter);
573                                         filter->mac_index = INVALID_MAC_INDEX;
574                                         memset(&filter->l2_addr, 0,
575                                                ETHER_ADDR_LEN);
576                                         STAILQ_INSERT_TAIL(
577                                                         &bp->free_filter_list,
578                                                         filter, next);
579                                 }
580                                 filter = temp_filter;
581                         }
582                 }
583         }
584 }
585
586 static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
587                                  struct ether_addr *mac_addr,
588                                  uint32_t index, uint32_t pool)
589 {
590         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
591         struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
592         struct bnxt_filter_info *filter;
593
594         if (BNXT_VF(bp)) {
595                 RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
596                 return;
597         }
598
599         if (!vnic) {
600                 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
601                 return;
602         }
603         /* Attach requested MAC address to the new l2_filter */
604         STAILQ_FOREACH(filter, &vnic->filter, next) {
605                 if (filter->mac_index == index) {
606                         RTE_LOG(ERR, PMD,
607                                 "MAC addr already existed for pool %d\n", pool);
608                         return;
609                 }
610         }
611         filter = bnxt_alloc_filter(bp);
612         if (!filter) {
613                 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
614                 return;
615         }
616         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
617         filter->mac_index = index;
618         memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
619         bnxt_hwrm_set_filter(bp, vnic, filter);
620 }
621
622 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
623 {
624         int rc = 0;
625         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
626         struct rte_eth_link new;
627         unsigned int cnt = BNXT_LINK_WAIT_CNT;
628
629         memset(&new, 0, sizeof(new));
630         do {
631                 /* Retrieve link info from hardware */
632                 rc = bnxt_get_hwrm_link_config(bp, &new);
633                 if (rc) {
634                         new.link_speed = ETH_LINK_SPEED_100M;
635                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
636                         RTE_LOG(ERR, PMD,
637                                 "Failed to retrieve link rc = 0x%x!", rc);
638                         goto out;
639                 }
640                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
641
642                 if (!wait_to_complete)
643                         break;
644         } while (!new.link_status && cnt--);
645
646 out:
647         /* Timed out or success */
648         if (new.link_status != eth_dev->data->dev_link.link_status ||
649         new.link_speed != eth_dev->data->dev_link.link_speed) {
650                 memcpy(&eth_dev->data->dev_link, &new,
651                         sizeof(struct rte_eth_link));
652                 bnxt_print_link_info(eth_dev);
653         }
654
655         return rc;
656 }
657
658 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
659 {
660         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
661         struct bnxt_vnic_info *vnic;
662
663         if (bp->vnic_info == NULL)
664                 return;
665
666         vnic = &bp->vnic_info[0];
667
668         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
669         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
670 }
671
672 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
673 {
674         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
675         struct bnxt_vnic_info *vnic;
676
677         if (bp->vnic_info == NULL)
678                 return;
679
680         vnic = &bp->vnic_info[0];
681
682         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
683         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
684 }
685
686 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
687 {
688         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
689         struct bnxt_vnic_info *vnic;
690
691         if (bp->vnic_info == NULL)
692                 return;
693
694         vnic = &bp->vnic_info[0];
695
696         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
697         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
698 }
699
700 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
701 {
702         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
703         struct bnxt_vnic_info *vnic;
704
705         if (bp->vnic_info == NULL)
706                 return;
707
708         vnic = &bp->vnic_info[0];
709
710         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
711         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
712 }
713
714 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
715                             struct rte_eth_rss_reta_entry64 *reta_conf,
716                             uint16_t reta_size)
717 {
718         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
719         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
720         struct bnxt_vnic_info *vnic;
721         int i;
722
723         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
724                 return -EINVAL;
725
726         if (reta_size != HW_HASH_INDEX_SIZE) {
727                 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
728                         "(%d) must equal the size supported by the hardware "
729                         "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
730                 return -EINVAL;
731         }
732         /* Update the RSS VNIC(s) */
733         for (i = 0; i < MAX_FF_POOLS; i++) {
734                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
735                         memcpy(vnic->rss_table, reta_conf, reta_size);
736
737                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
738                 }
739         }
740         return 0;
741 }
742
743 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
744                               struct rte_eth_rss_reta_entry64 *reta_conf,
745                               uint16_t reta_size)
746 {
747         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
748         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
749
750         /* Retrieve from the default VNIC */
751         if (!vnic)
752                 return -EINVAL;
753         if (!vnic->rss_table)
754                 return -EINVAL;
755
756         if (reta_size != HW_HASH_INDEX_SIZE) {
757                 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
758                         "(%d) must equal the size supported by the hardware "
759                         "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
760                 return -EINVAL;
761         }
762         /* EW - need to revisit here copying from u64 to u16 */
763         memcpy(reta_conf, vnic->rss_table, reta_size);
764
765         if (rte_intr_allow_others(&eth_dev->pci_dev->intr_handle)) {
766                 if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
767                         bnxt_dev_lsc_intr_setup(eth_dev);
768         }
769
770         return 0;
771 }
772
773 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
774                                    struct rte_eth_rss_conf *rss_conf)
775 {
776         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
777         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
778         struct bnxt_vnic_info *vnic;
779         uint16_t hash_type = 0;
780         int i;
781
782         /*
783          * If RSS enablement were different than dev_configure,
784          * then return -EINVAL
785          */
786         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
787                 if (!rss_conf->rss_hf)
788                         return -EINVAL;
789         } else {
790                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
791                         return -EINVAL;
792         }
793         if (rss_conf->rss_hf & ETH_RSS_IPV4)
794                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
795         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
796                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
797         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
798                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
799         if (rss_conf->rss_hf & ETH_RSS_IPV6)
800                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
801         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
802                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
803         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
804                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
805
806         /* Update the RSS VNIC(s) */
807         for (i = 0; i < MAX_FF_POOLS; i++) {
808                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
809                         vnic->hash_type = hash_type;
810
811                         /*
812                          * Use the supplied key if the key length is
813                          * acceptable and the rss_key is not NULL
814                          */
815                         if (rss_conf->rss_key &&
816                             rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
817                                 memcpy(vnic->rss_hash_key, rss_conf->rss_key,
818                                        rss_conf->rss_key_len);
819
820                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
821                 }
822         }
823         return 0;
824 }
825
826 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
827                                      struct rte_eth_rss_conf *rss_conf)
828 {
829         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
830         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
831         int len;
832         uint32_t hash_types;
833
834         /* RSS configuration is the same for all VNICs */
835         if (vnic && vnic->rss_hash_key) {
836                 if (rss_conf->rss_key) {
837                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
838                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
839                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
840                 }
841
842                 hash_types = vnic->hash_type;
843                 rss_conf->rss_hf = 0;
844                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
845                         rss_conf->rss_hf |= ETH_RSS_IPV4;
846                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
847                 }
848                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
849                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
850                         hash_types &=
851                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
852                 }
853                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
854                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
855                         hash_types &=
856                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
857                 }
858                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
859                         rss_conf->rss_hf |= ETH_RSS_IPV6;
860                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
861                 }
862                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
863                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
864                         hash_types &=
865                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
866                 }
867                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
868                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
869                         hash_types &=
870                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
871                 }
872                 if (hash_types) {
873                         RTE_LOG(ERR, PMD,
874                                 "Unknwon RSS config from firmware (%08x), RSS disabled",
875                                 vnic->hash_type);
876                         return -ENOTSUP;
877                 }
878         } else {
879                 rss_conf->rss_hf = 0;
880         }
881         return 0;
882 }
883
884 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
885                                struct rte_eth_fc_conf *fc_conf __rte_unused)
886 {
887         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
888         struct rte_eth_link link_info;
889         int rc;
890
891         rc = bnxt_get_hwrm_link_config(bp, &link_info);
892         if (rc)
893                 return rc;
894
895         memset(fc_conf, 0, sizeof(*fc_conf));
896         if (bp->link_info.auto_pause)
897                 fc_conf->autoneg = 1;
898         switch (bp->link_info.pause) {
899         case 0:
900                 fc_conf->mode = RTE_FC_NONE;
901                 break;
902         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
903                 fc_conf->mode = RTE_FC_TX_PAUSE;
904                 break;
905         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
906                 fc_conf->mode = RTE_FC_RX_PAUSE;
907                 break;
908         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
909                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
910                 fc_conf->mode = RTE_FC_FULL;
911                 break;
912         }
913         return 0;
914 }
915
916 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
917                                struct rte_eth_fc_conf *fc_conf)
918 {
919         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
920
921         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
922                 RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
923                 return -ENOTSUP;
924         }
925
926         switch (fc_conf->mode) {
927         case RTE_FC_NONE:
928                 bp->link_info.auto_pause = 0;
929                 bp->link_info.force_pause = 0;
930                 break;
931         case RTE_FC_RX_PAUSE:
932                 if (fc_conf->autoneg) {
933                         bp->link_info.auto_pause =
934                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
935                         bp->link_info.force_pause = 0;
936                 } else {
937                         bp->link_info.auto_pause = 0;
938                         bp->link_info.force_pause =
939                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
940                 }
941                 break;
942         case RTE_FC_TX_PAUSE:
943                 if (fc_conf->autoneg) {
944                         bp->link_info.auto_pause =
945                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
946                         bp->link_info.force_pause = 0;
947                 } else {
948                         bp->link_info.auto_pause = 0;
949                         bp->link_info.force_pause =
950                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
951                 }
952                 break;
953         case RTE_FC_FULL:
954                 if (fc_conf->autoneg) {
955                         bp->link_info.auto_pause =
956                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
957                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
958                         bp->link_info.force_pause = 0;
959                 } else {
960                         bp->link_info.auto_pause = 0;
961                         bp->link_info.force_pause =
962                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
963                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
964                 }
965                 break;
966         }
967         return bnxt_set_hwrm_link_config(bp, true);
968 }
969
970 /*
971  * Initialization
972  */
973
974 static struct eth_dev_ops bnxt_dev_ops = {
975         .dev_infos_get = bnxt_dev_info_get_op,
976         .dev_close = bnxt_dev_close_op,
977         .dev_configure = bnxt_dev_configure_op,
978         .dev_start = bnxt_dev_start_op,
979         .dev_stop = bnxt_dev_stop_op,
980         .dev_set_link_up = bnxt_dev_set_link_up_op,
981         .dev_set_link_down = bnxt_dev_set_link_down_op,
982         .stats_get = bnxt_stats_get_op,
983         .stats_reset = bnxt_stats_reset_op,
984         .rx_queue_setup = bnxt_rx_queue_setup_op,
985         .rx_queue_release = bnxt_rx_queue_release_op,
986         .tx_queue_setup = bnxt_tx_queue_setup_op,
987         .tx_queue_release = bnxt_tx_queue_release_op,
988         .reta_update = bnxt_reta_update_op,
989         .reta_query = bnxt_reta_query_op,
990         .rss_hash_update = bnxt_rss_hash_update_op,
991         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
992         .link_update = bnxt_link_update_op,
993         .promiscuous_enable = bnxt_promiscuous_enable_op,
994         .promiscuous_disable = bnxt_promiscuous_disable_op,
995         .allmulticast_enable = bnxt_allmulticast_enable_op,
996         .allmulticast_disable = bnxt_allmulticast_disable_op,
997         .mac_addr_add = bnxt_mac_addr_add_op,
998         .mac_addr_remove = bnxt_mac_addr_remove_op,
999         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
1000         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
1001 };
1002
1003 static bool bnxt_vf_pciid(uint16_t id)
1004 {
1005         if (id == BROADCOM_DEV_ID_57304_VF ||
1006             id == BROADCOM_DEV_ID_57406_VF ||
1007             id == BROADCOM_DEV_ID_5731X_VF ||
1008             id == BROADCOM_DEV_ID_5741X_VF)
1009                 return true;
1010         return false;
1011 }
1012
1013 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
1014 {
1015         int rc;
1016         struct bnxt *bp = eth_dev->data->dev_private;
1017
1018         /* enable device (incl. PCI PM wakeup), and bus-mastering */
1019         if (!eth_dev->pci_dev->mem_resource[0].addr) {
1020                 RTE_LOG(ERR, PMD,
1021                         "Cannot find PCI device base address, aborting\n");
1022                 rc = -ENODEV;
1023                 goto init_err_disable;
1024         }
1025
1026         bp->eth_dev = eth_dev;
1027         bp->pdev = eth_dev->pci_dev;
1028
1029         bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr;
1030         if (!bp->bar0) {
1031                 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
1032                 rc = -ENOMEM;
1033                 goto init_err_release;
1034         }
1035         return 0;
1036
1037 init_err_release:
1038         if (bp->bar0)
1039                 bp->bar0 = NULL;
1040
1041 init_err_disable:
1042
1043         return rc;
1044 }
1045
1046 static int
1047 bnxt_dev_init(struct rte_eth_dev *eth_dev)
1048 {
1049         static int version_printed;
1050         struct bnxt *bp;
1051         int rc;
1052
1053         if (version_printed++ == 0)
1054                 RTE_LOG(INFO, PMD, "%s", bnxt_version);
1055
1056         rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
1057         bp = eth_dev->data->dev_private;
1058
1059         if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id))
1060                 bp->flags |= BNXT_FLAG_VF;
1061
1062         rc = bnxt_init_board(eth_dev);
1063         if (rc) {
1064                 RTE_LOG(ERR, PMD,
1065                         "Board initialization failed rc: %x\n", rc);
1066                 goto error;
1067         }
1068         eth_dev->dev_ops = &bnxt_dev_ops;
1069         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
1070         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
1071
1072         rc = bnxt_alloc_hwrm_resources(bp);
1073         if (rc) {
1074                 RTE_LOG(ERR, PMD,
1075                         "hwrm resource allocation failure rc: %x\n", rc);
1076                 goto error_free;
1077         }
1078         rc = bnxt_hwrm_ver_get(bp);
1079         if (rc)
1080                 goto error_free;
1081         bnxt_hwrm_queue_qportcfg(bp);
1082
1083         bnxt_hwrm_func_qcfg(bp);
1084
1085         /* Get the MAX capabilities for this function */
1086         rc = bnxt_hwrm_func_qcaps(bp);
1087         if (rc) {
1088                 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
1089                 goto error_free;
1090         }
1091         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
1092                                         ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
1093         if (eth_dev->data->mac_addrs == NULL) {
1094                 RTE_LOG(ERR, PMD,
1095                         "Failed to alloc %u bytes needed to store MAC addr tbl",
1096                         ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
1097                 rc = -ENOMEM;
1098                 goto error_free;
1099         }
1100         /* Copy the permanent MAC from the qcap response address now. */
1101         if (BNXT_PF(bp))
1102                 memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr));
1103         else
1104                 memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr));
1105         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
1106         bp->grp_info = rte_zmalloc("bnxt_grp_info",
1107                                 sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
1108         if (!bp->grp_info) {
1109                 RTE_LOG(ERR, PMD,
1110                         "Failed to alloc %zu bytes needed to store group info table\n",
1111                         sizeof(*bp->grp_info) * bp->max_ring_grps);
1112                 rc = -ENOMEM;
1113                 goto error_free;
1114         }
1115
1116         rc = bnxt_hwrm_func_driver_register(bp, 0,
1117                                             bp->pf.vf_req_fwd);
1118         if (rc) {
1119                 RTE_LOG(ERR, PMD,
1120                         "Failed to register driver");
1121                 rc = -EBUSY;
1122                 goto error_free;
1123         }
1124
1125         RTE_LOG(INFO, PMD,
1126                 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
1127                 eth_dev->pci_dev->mem_resource[0].phys_addr,
1128                 eth_dev->pci_dev->mem_resource[0].addr);
1129
1130         bp->dev_stopped = 0;
1131
1132         return 0;
1133
1134 error_free:
1135         eth_dev->driver->eth_dev_uninit(eth_dev);
1136 error:
1137         return rc;
1138 }
1139
1140 static int
1141 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
1142         struct bnxt *bp = eth_dev->data->dev_private;
1143         int rc;
1144
1145         if (eth_dev->data->mac_addrs != NULL) {
1146                 rte_free(eth_dev->data->mac_addrs);
1147                 eth_dev->data->mac_addrs = NULL;
1148         }
1149         if (bp->grp_info != NULL) {
1150                 rte_free(bp->grp_info);
1151                 bp->grp_info = NULL;
1152         }
1153         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
1154         bnxt_free_hwrm_resources(bp);
1155         if (bp->dev_stopped == 0)
1156                 bnxt_dev_close_op(eth_dev);
1157         eth_dev->dev_ops = NULL;
1158         eth_dev->rx_pkt_burst = NULL;
1159         eth_dev->tx_pkt_burst = NULL;
1160
1161         return rc;
1162 }
1163
1164 static struct eth_driver bnxt_rte_pmd = {
1165         .pci_drv = {
1166                     .id_table = bnxt_pci_id_map,
1167                     .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
1168                             RTE_PCI_DRV_DETACHABLE | RTE_PCI_DRV_INTR_LSC,
1169                     .probe = rte_eth_dev_pci_probe,
1170                     .remove = rte_eth_dev_pci_remove
1171                     },
1172         .eth_dev_init = bnxt_dev_init,
1173         .eth_dev_uninit = bnxt_dev_uninit,
1174         .dev_private_size = sizeof(struct bnxt),
1175 };
1176
1177 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd.pci_drv);
1178 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);