Imported Upstream version 16.11.1
[deb_dpdk.git] / drivers / net / qede / qede_ethdev.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12
13 /* Globals */
14 static const struct qed_eth_ops *qed_ops;
15 static const char *drivername = "qede pmd";
16 static int64_t timer_period = 1;
17
18 struct rte_qede_xstats_name_off {
19         char name[RTE_ETH_XSTATS_NAME_SIZE];
20         uint64_t offset;
21 };
22
23 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
24         {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
25         {"rx_multicast_bytes",
26                 offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
27         {"rx_broadcast_bytes",
28                 offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
29         {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
30         {"rx_multicast_packets",
31                 offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
32         {"rx_broadcast_packets",
33                 offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
34
35         {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
36         {"tx_multicast_bytes",
37                 offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
38         {"tx_broadcast_bytes",
39                 offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
40         {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
41         {"tx_multicast_packets",
42                 offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
43         {"tx_broadcast_packets",
44                 offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
45
46         {"rx_64_byte_packets",
47                 offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
48         {"rx_65_to_127_byte_packets",
49                 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
50         {"rx_128_to_255_byte_packets",
51                 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
52         {"rx_256_to_511_byte_packets",
53                 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
54         {"rx_512_to_1023_byte_packets",
55                 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
56         {"rx_1024_to_1518_byte_packets",
57                 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
58         {"rx_1519_to_1522_byte_packets",
59                 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
60         {"rx_1519_to_2047_byte_packets",
61                 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
62         {"rx_2048_to_4095_byte_packets",
63                 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
64         {"rx_4096_to_9216_byte_packets",
65                 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
66         {"rx_9217_to_16383_byte_packets",
67                 offsetof(struct ecore_eth_stats,
68                          rx_9217_to_16383_byte_packets)},
69         {"tx_64_byte_packets",
70                 offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
71         {"tx_65_to_127_byte_packets",
72                 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
73         {"tx_128_to_255_byte_packets",
74                 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
75         {"tx_256_to_511_byte_packets",
76                 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
77         {"tx_512_to_1023_byte_packets",
78                 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
79         {"tx_1024_to_1518_byte_packets",
80                 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
81         {"trx_1519_to_1522_byte_packets",
82                 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
83         {"tx_2048_to_4095_byte_packets",
84                 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
85         {"tx_4096_to_9216_byte_packets",
86                 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
87         {"tx_9217_to_16383_byte_packets",
88                 offsetof(struct ecore_eth_stats,
89                          tx_9217_to_16383_byte_packets)},
90
91         {"rx_mac_crtl_frames",
92                 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
93         {"tx_mac_control_frames",
94                 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
95         {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
96         {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
97         {"rx_priority_flow_control_frames",
98                 offsetof(struct ecore_eth_stats, rx_pfc_frames)},
99         {"tx_priority_flow_control_frames",
100                 offsetof(struct ecore_eth_stats, tx_pfc_frames)},
101
102         {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
103         {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
104         {"rx_carrier_errors",
105                 offsetof(struct ecore_eth_stats, rx_carrier_errors)},
106         {"rx_oversize_packet_errors",
107                 offsetof(struct ecore_eth_stats, rx_oversize_packets)},
108         {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
109         {"rx_undersize_packet_errors",
110                 offsetof(struct ecore_eth_stats, rx_undersize_packets)},
111         {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
112         {"rx_host_buffer_not_available",
113                 offsetof(struct ecore_eth_stats, no_buff_discards)},
114         /* Number of packets discarded because they are bigger than MTU */
115         {"rx_packet_too_big_discards",
116                 offsetof(struct ecore_eth_stats, packet_too_big_discard)},
117         {"rx_ttl_zero_discards",
118                 offsetof(struct ecore_eth_stats, ttl0_discard)},
119         {"rx_multi_function_tag_filter_discards",
120                 offsetof(struct ecore_eth_stats, mftag_filter_discards)},
121         {"rx_mac_filter_discards",
122                 offsetof(struct ecore_eth_stats, mac_filter_discards)},
123         {"rx_hw_buffer_truncates",
124                 offsetof(struct ecore_eth_stats, brb_truncates)},
125         {"rx_hw_buffer_discards",
126                 offsetof(struct ecore_eth_stats, brb_discards)},
127         {"tx_lpi_entry_count",
128                 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
129         {"tx_total_collisions",
130                 offsetof(struct ecore_eth_stats, tx_total_collisions)},
131         {"tx_error_drop_packets",
132                 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
133
134         {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
135         {"rx_mac_unicast_packets",
136                 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
137         {"rx_mac_multicast_packets",
138                 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
139         {"rx_mac_broadcast_packets",
140                 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
141         {"rx_mac_frames_ok",
142                 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
143         {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
144         {"tx_mac_unicast_packets",
145                 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
146         {"tx_mac_multicast_packets",
147                 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
148         {"tx_mac_broadcast_packets",
149                 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
150
151         {"lro_coalesced_packets",
152                 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
153         {"lro_coalesced_events",
154                 offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
155         {"lro_aborts_num",
156                 offsetof(struct ecore_eth_stats, tpa_aborts_num)},
157         {"lro_not_coalesced_packets",
158                 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
159         {"lro_coalesced_bytes",
160                 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
161 };
162
163 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
164         {"rx_q_segments",
165                 offsetof(struct qede_rx_queue, rx_segs)},
166         {"rx_q_hw_errors",
167                 offsetof(struct qede_rx_queue, rx_hw_errors)},
168         {"rx_q_allocation_errors",
169                 offsetof(struct qede_rx_queue, rx_alloc_errors)}
170 };
171
172 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
173 {
174         ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
175 }
176
177 static void
178 qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
179 {
180         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
181         struct qede_dev *qdev = eth_dev->data->dev_private;
182         struct ecore_dev *edev = &qdev->edev;
183
184         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
185         if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
186                 DP_ERR(edev, "rte_intr_enable failed\n");
187 }
188
189 static void
190 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
191 {
192         rte_memcpy(&qdev->dev_info, info, sizeof(*info));
193         qdev->num_tc = qdev->dev_info.num_tc;
194         qdev->ops = qed_ops;
195 }
196
197 static void qede_print_adapter_info(struct qede_dev *qdev)
198 {
199         struct ecore_dev *edev = &qdev->edev;
200         struct qed_dev_info *info = &qdev->dev_info.common;
201         static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
202         static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
203
204         DP_INFO(edev, "*********************************\n");
205         DP_INFO(edev, " DPDK version:%s\n", rte_version());
206         DP_INFO(edev, " Chip details : %s%d\n",
207                   ECORE_IS_BB(edev) ? "BB" : "AH",
208                   CHIP_REV_IS_A0(edev) ? 0 : 1);
209         snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
210                  info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
211         snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
212                  ver_str, QEDE_PMD_VERSION);
213         DP_INFO(edev, " Driver version : %s\n", drv_ver);
214         DP_INFO(edev, " Firmware version : %s\n", ver_str);
215
216         snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
217                  "%d.%d.%d.%d",
218                 (info->mfw_rev >> 24) & 0xff,
219                 (info->mfw_rev >> 16) & 0xff,
220                 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
221         DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
222         DP_INFO(edev, " Firmware file : %s\n", fw_file);
223         DP_INFO(edev, "*********************************\n");
224 }
225
226 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
227 {
228         memset(ucast, 0, sizeof(struct ecore_filter_ucast));
229         ucast->is_rx_filter = true;
230         ucast->is_tx_filter = true;
231         /* ucast->assert_on_error = true; - For debug */
232 }
233
234 static int
235 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
236                   bool add)
237 {
238         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
239         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
240         struct qede_ucast_entry *tmp = NULL;
241         struct qede_ucast_entry *u;
242         struct ether_addr *mac_addr;
243
244         mac_addr  = (struct ether_addr *)ucast->mac;
245         if (add) {
246                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
247                         if ((memcmp(mac_addr, &tmp->mac,
248                                     ETHER_ADDR_LEN) == 0) &&
249                              ucast->vlan == tmp->vlan) {
250                                 DP_ERR(edev, "Unicast MAC is already added"
251                                        " with vlan = %u, vni = %u\n",
252                                        ucast->vlan,  ucast->vni);
253                                         return -EEXIST;
254                         }
255                 }
256                 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
257                                RTE_CACHE_LINE_SIZE);
258                 if (!u) {
259                         DP_ERR(edev, "Did not allocate memory for ucast\n");
260                         return -ENOMEM;
261                 }
262                 ether_addr_copy(mac_addr, &u->mac);
263                 u->vlan = ucast->vlan;
264                 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
265                 qdev->num_uc_addr++;
266         } else {
267                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
268                         if ((memcmp(mac_addr, &tmp->mac,
269                                     ETHER_ADDR_LEN) == 0) &&
270                             ucast->vlan == tmp->vlan)
271                         break;
272                 }
273                 if (tmp == NULL) {
274                         DP_INFO(edev, "Unicast MAC is not found\n");
275                         return -EINVAL;
276                 }
277                 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
278                 qdev->num_uc_addr--;
279         }
280
281         return 0;
282 }
283
284 static int
285 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
286                   bool add)
287 {
288         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
289         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
290         struct ether_addr *mac_addr;
291         struct qede_mcast_entry *tmp = NULL;
292         struct qede_mcast_entry *m;
293
294         mac_addr  = (struct ether_addr *)mcast->mac;
295         if (add) {
296                 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
297                         if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
298                                 DP_ERR(edev,
299                                         "Multicast MAC is already added\n");
300                                 return -EEXIST;
301                         }
302                 }
303                 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
304                         RTE_CACHE_LINE_SIZE);
305                 if (!m) {
306                         DP_ERR(edev,
307                                 "Did not allocate memory for mcast\n");
308                         return -ENOMEM;
309                 }
310                 ether_addr_copy(mac_addr, &m->mac);
311                 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
312                 qdev->num_mc_addr++;
313         } else {
314                 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
315                         if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
316                                 break;
317                 }
318                 if (tmp == NULL) {
319                         DP_INFO(edev, "Multicast mac is not found\n");
320                         return -EINVAL;
321                 }
322                 SLIST_REMOVE(&qdev->mc_list_head, tmp,
323                              qede_mcast_entry, list);
324                 qdev->num_mc_addr--;
325         }
326
327         return 0;
328 }
329
330 static enum _ecore_status_t
331 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
332                  bool add)
333 {
334         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
335         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
336         enum _ecore_status_t rc;
337         struct ecore_filter_mcast mcast;
338         struct qede_mcast_entry *tmp;
339         uint16_t j = 0;
340
341         /* Multicast */
342         if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
343                 if (add) {
344                         if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
345                                 DP_ERR(edev,
346                                        "Mcast filter table limit exceeded, "
347                                        "Please enable mcast promisc mode\n");
348                                 return -ECORE_INVAL;
349                         }
350                 }
351                 rc = qede_mcast_filter(eth_dev, ucast, add);
352                 if (rc == 0) {
353                         DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
354                         memset(&mcast, 0, sizeof(mcast));
355                         mcast.num_mc_addrs = qdev->num_mc_addr;
356                         mcast.opcode = ECORE_FILTER_ADD;
357                         SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
358                                 ether_addr_copy(&tmp->mac,
359                                         (struct ether_addr *)&mcast.mac[j]);
360                                 j++;
361                         }
362                         rc = ecore_filter_mcast_cmd(edev, &mcast,
363                                                     ECORE_SPQ_MODE_CB, NULL);
364                 }
365                 if (rc != ECORE_SUCCESS) {
366                         DP_ERR(edev, "Failed to add multicast filter"
367                                " rc = %d, op = %d\n", rc, add);
368                 }
369         } else { /* Unicast */
370                 if (add) {
371                         if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) {
372                                 DP_ERR(edev,
373                                        "Ucast filter table limit exceeded,"
374                                        " Please enable promisc mode\n");
375                                 return -ECORE_INVAL;
376                         }
377                 }
378                 rc = qede_ucast_filter(eth_dev, ucast, add);
379                 if (rc == 0)
380                         rc = ecore_filter_ucast_cmd(edev, ucast,
381                                                     ECORE_SPQ_MODE_CB, NULL);
382                 if (rc != ECORE_SUCCESS) {
383                         DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
384                                rc, add);
385                 }
386         }
387
388         return rc;
389 }
390
391 static void
392 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
393                   uint32_t index, __rte_unused uint32_t pool)
394 {
395         struct ecore_filter_ucast ucast;
396
397         qede_set_ucast_cmn_params(&ucast);
398         ucast.type = ECORE_FILTER_MAC;
399         ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
400         (void)qede_mac_int_ops(eth_dev, &ucast, 1);
401 }
402
403 static void
404 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
405 {
406         struct qede_dev *qdev = eth_dev->data->dev_private;
407         struct ecore_dev *edev = &qdev->edev;
408         struct ether_addr mac_addr;
409         struct ecore_filter_ucast ucast;
410         int rc;
411
412         PMD_INIT_FUNC_TRACE(edev);
413
414         if (index >= qdev->dev_info.num_mac_addrs) {
415                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
416                        index, qdev->dev_info.num_mac_addrs);
417                 return;
418         }
419
420         qede_set_ucast_cmn_params(&ucast);
421         ucast.opcode = ECORE_FILTER_REMOVE;
422         ucast.type = ECORE_FILTER_MAC;
423
424         /* Use the index maintained by rte */
425         ether_addr_copy(&eth_dev->data->mac_addrs[index],
426                         (struct ether_addr *)&ucast.mac);
427
428         ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
429 }
430
431 static void
432 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
433 {
434         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
435         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
436         struct ecore_filter_ucast ucast;
437         int rc;
438
439         if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
440                                                mac_addr->addr_bytes)) {
441                 DP_ERR(edev, "Setting MAC address is not allowed\n");
442                 ether_addr_copy(&qdev->primary_mac,
443                                 &eth_dev->data->mac_addrs[0]);
444                 return;
445         }
446
447         /* First remove the primary mac */
448         qede_set_ucast_cmn_params(&ucast);
449         ucast.opcode = ECORE_FILTER_REMOVE;
450         ucast.type = ECORE_FILTER_MAC;
451         ether_addr_copy(&qdev->primary_mac,
452                         (struct ether_addr *)&ucast.mac);
453         rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
454         if (rc != 0) {
455                 DP_ERR(edev, "Unable to remove current macaddr"
456                              " Reverting to previous default mac\n");
457                 ether_addr_copy(&qdev->primary_mac,
458                                 &eth_dev->data->mac_addrs[0]);
459                 return;
460         }
461
462         /* Add new MAC */
463         ucast.opcode = ECORE_FILTER_ADD;
464         ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
465         rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
466         if (rc != 0)
467                 DP_ERR(edev, "Unable to add new default mac\n");
468         else
469                 ether_addr_copy(mac_addr, &qdev->primary_mac);
470 }
471
472 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
473 {
474         struct ecore_dev *edev = &qdev->edev;
475         struct qed_update_vport_params params = {
476                 .vport_id = 0,
477                 .accept_any_vlan = action,
478                 .update_accept_any_vlan_flg = 1,
479         };
480         int rc;
481
482         /* Proceed only if action actually needs to be performed */
483         if (qdev->accept_any_vlan == action)
484                 return;
485
486         rc = qdev->ops->vport_update(edev, &params);
487         if (rc) {
488                 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
489                        action ? "enable" : "disable");
490         } else {
491                 DP_INFO(edev, "%s accept-any-vlan\n",
492                         action ? "enabled" : "disabled");
493                 qdev->accept_any_vlan = action;
494         }
495 }
496
497 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
498 {
499         struct qed_update_vport_params vport_update_params;
500         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
501         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
502         int rc;
503
504         memset(&vport_update_params, 0, sizeof(vport_update_params));
505         vport_update_params.vport_id = 0;
506         vport_update_params.update_inner_vlan_removal_flg = 1;
507         vport_update_params.inner_vlan_removal_flg = set_stripping;
508         rc = qdev->ops->vport_update(edev, &vport_update_params);
509         if (rc) {
510                 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
511                 return rc;
512         }
513
514         return 0;
515 }
516
517 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
518 {
519         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
520         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
521         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
522
523         if (mask & ETH_VLAN_STRIP_MASK) {
524                 if (rxmode->hw_vlan_strip)
525                         (void)qede_vlan_stripping(eth_dev, 1);
526                 else
527                         (void)qede_vlan_stripping(eth_dev, 0);
528         }
529
530         if (mask & ETH_VLAN_FILTER_MASK) {
531                 /* VLAN filtering kicks in when a VLAN is added */
532                 if (rxmode->hw_vlan_filter) {
533                         qede_vlan_filter_set(eth_dev, 0, 1);
534                 } else {
535                         if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
536                                 DP_NOTICE(edev, false,
537                                   " Please remove existing VLAN filters"
538                                   " before disabling VLAN filtering\n");
539                                 /* Signal app that VLAN filtering is still
540                                  * enabled
541                                  */
542                                 rxmode->hw_vlan_filter = true;
543                         } else {
544                                 qede_vlan_filter_set(eth_dev, 0, 0);
545                         }
546                 }
547         }
548
549         if (mask & ETH_VLAN_EXTEND_MASK)
550                 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
551                         " and classification is based on outer tag only\n");
552
553         DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
554                 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
555 }
556
557 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
558                                 uint16_t vlan_id, int on)
559 {
560         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
561         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
562         struct qed_dev_eth_info *dev_info = &qdev->dev_info;
563         struct qede_vlan_entry *tmp = NULL;
564         struct qede_vlan_entry *vlan;
565         struct ecore_filter_ucast ucast;
566         int rc;
567
568         if (on) {
569                 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
570                         DP_INFO(edev, "Reached max VLAN filter limit"
571                                       " enabling accept_any_vlan\n");
572                         qede_config_accept_any_vlan(qdev, true);
573                         return 0;
574                 }
575
576                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
577                         if (tmp->vid == vlan_id) {
578                                 DP_ERR(edev, "VLAN %u already configured\n",
579                                        vlan_id);
580                                 return -EEXIST;
581                         }
582                 }
583
584                 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
585                                   RTE_CACHE_LINE_SIZE);
586
587                 if (!vlan) {
588                         DP_ERR(edev, "Did not allocate memory for VLAN\n");
589                         return -ENOMEM;
590                 }
591
592                 qede_set_ucast_cmn_params(&ucast);
593                 ucast.opcode = ECORE_FILTER_ADD;
594                 ucast.type = ECORE_FILTER_VLAN;
595                 ucast.vlan = vlan_id;
596                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
597                                             NULL);
598                 if (rc != 0) {
599                         DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
600                                rc);
601                         rte_free(vlan);
602                 } else {
603                         vlan->vid = vlan_id;
604                         SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
605                         qdev->configured_vlans++;
606                         DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
607                                 vlan_id, qdev->configured_vlans);
608                 }
609         } else {
610                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
611                         if (tmp->vid == vlan_id)
612                                 break;
613                 }
614
615                 if (!tmp) {
616                         if (qdev->configured_vlans == 0) {
617                                 DP_INFO(edev,
618                                         "No VLAN filters configured yet\n");
619                                 return 0;
620                         }
621
622                         DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
623                         return -EINVAL;
624                 }
625
626                 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
627
628                 qede_set_ucast_cmn_params(&ucast);
629                 ucast.opcode = ECORE_FILTER_REMOVE;
630                 ucast.type = ECORE_FILTER_VLAN;
631                 ucast.vlan = vlan_id;
632                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
633                                             NULL);
634                 if (rc != 0) {
635                         DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
636                                vlan_id, rc);
637                 } else {
638                         qdev->configured_vlans--;
639                         DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
640                                 vlan_id, qdev->configured_vlans);
641                 }
642         }
643
644         return rc;
645 }
646
647 static int qede_init_vport(struct qede_dev *qdev)
648 {
649         struct ecore_dev *edev = &qdev->edev;
650         struct qed_start_vport_params start = {0};
651         int rc;
652
653         start.remove_inner_vlan = 1;
654         start.gro_enable = 0;
655         start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
656         start.vport_id = 0;
657         start.drop_ttl0 = false;
658         start.clear_stats = 1;
659         start.handle_ptp_pkts = 0;
660
661         rc = qdev->ops->vport_start(edev, &start);
662         if (rc) {
663                 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
664                 return rc;
665         }
666
667         DP_INFO(edev,
668                 "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
669                 start.vport_id, ETHER_MTU);
670
671         return 0;
672 }
673
674 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
675 {
676         struct qede_dev *qdev = eth_dev->data->dev_private;
677         struct ecore_dev *edev = &qdev->edev;
678         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
679         int rc, i, j;
680
681         PMD_INIT_FUNC_TRACE(edev);
682
683         /* Check requirements for 100G mode */
684         if (edev->num_hwfns > 1) {
685                 if (eth_dev->data->nb_rx_queues < 2 ||
686                     eth_dev->data->nb_tx_queues < 2) {
687                         DP_NOTICE(edev, false,
688                                   "100G mode needs min. 2 RX/TX queues\n");
689                         return -EINVAL;
690                 }
691
692                 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
693                     (eth_dev->data->nb_tx_queues % 2 != 0)) {
694                         DP_NOTICE(edev, false,
695                                   "100G mode needs even no. of RX/TX queues\n");
696                         return -EINVAL;
697                 }
698         }
699
700         /* Sanity checks and throw warnings */
701         if (rxmode->enable_scatter == 1)
702                 eth_dev->data->scattered_rx = 1;
703
704         if (rxmode->enable_lro == 1) {
705                 DP_INFO(edev, "LRO is not supported\n");
706                 return -EINVAL;
707         }
708
709         if (!rxmode->hw_strip_crc)
710                 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
711
712         if (!rxmode->hw_ip_checksum)
713                 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
714                               "in hw\n");
715
716         /* Check for the port restart case */
717         if (qdev->state != QEDE_DEV_INIT) {
718                 rc = qdev->ops->vport_stop(edev, 0);
719                 if (rc != 0)
720                         return rc;
721                 qede_dealloc_fp_resc(eth_dev);
722         }
723
724         qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
725         qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
726         qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
727
728         /* Fastpath status block should be initialized before sending
729          * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
730          */
731         rc = qede_alloc_fp_resc(qdev);
732         if (rc != 0)
733                 return rc;
734
735         /* Issue VPORT-START with default config values to allow
736          * other port configurations early on.
737          */
738         rc = qede_init_vport(qdev);
739         if (rc != 0)
740                 return rc;
741
742         SLIST_INIT(&qdev->vlan_list_head);
743
744         /* Add primary mac for PF */
745         if (IS_PF(edev))
746                 qede_mac_addr_set(eth_dev, &qdev->primary_mac);
747
748         /* Enable VLAN offloads by default */
749         qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
750                                        ETH_VLAN_FILTER_MASK |
751                                        ETH_VLAN_EXTEND_MASK);
752
753         qdev->state = QEDE_DEV_CONFIG;
754
755         DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
756                 (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
757                 qdev->num_tc);
758
759         return 0;
760 }
761
762 /* Info about HW descriptor ring limitations */
763 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
764         .nb_max = NUM_RX_BDS_MAX,
765         .nb_min = 128,
766         .nb_align = 128 /* lowest common multiple */
767 };
768
769 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
770         .nb_max = NUM_TX_BDS_MAX,
771         .nb_min = 256,
772         .nb_align = 256
773 };
774
775 static void
776 qede_dev_info_get(struct rte_eth_dev *eth_dev,
777                   struct rte_eth_dev_info *dev_info)
778 {
779         struct qede_dev *qdev = eth_dev->data->dev_private;
780         struct ecore_dev *edev = &qdev->edev;
781         struct qed_link_output link;
782         uint32_t speed_cap = 0;
783
784         PMD_INIT_FUNC_TRACE(edev);
785
786         dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
787                                               QEDE_ETH_OVERHEAD);
788         dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
789         dev_info->rx_desc_lim = qede_rx_desc_lim;
790         dev_info->tx_desc_lim = qede_tx_desc_lim;
791         dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
792         dev_info->max_tx_queues = dev_info->max_rx_queues;
793         dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
794         if (IS_VF(edev))
795                 dev_info->max_vfs = 0;
796         else
797                 dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
798         dev_info->driver_name = qdev->drv_ver;
799         dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
800         dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
801
802         dev_info->default_txconf = (struct rte_eth_txconf) {
803                 .txq_flags = QEDE_TXQ_FLAGS,
804         };
805
806         dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
807                                      DEV_RX_OFFLOAD_IPV4_CKSUM |
808                                      DEV_RX_OFFLOAD_UDP_CKSUM |
809                                      DEV_RX_OFFLOAD_TCP_CKSUM);
810         dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
811                                      DEV_TX_OFFLOAD_IPV4_CKSUM |
812                                      DEV_TX_OFFLOAD_UDP_CKSUM |
813                                      DEV_TX_OFFLOAD_TCP_CKSUM);
814
815         memset(&link, 0, sizeof(struct qed_link_output));
816         qdev->ops->common->get_link(edev, &link);
817         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
818                 speed_cap |= ETH_LINK_SPEED_1G;
819         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
820                 speed_cap |= ETH_LINK_SPEED_10G;
821         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
822                 speed_cap |= ETH_LINK_SPEED_25G;
823         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
824                 speed_cap |= ETH_LINK_SPEED_40G;
825         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
826                 speed_cap |= ETH_LINK_SPEED_50G;
827         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
828                 speed_cap |= ETH_LINK_SPEED_100G;
829         dev_info->speed_capa = speed_cap;
830 }
831
832 /* return 0 means link status changed, -1 means not changed */
833 static int
834 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
835 {
836         struct qede_dev *qdev = eth_dev->data->dev_private;
837         struct ecore_dev *edev = &qdev->edev;
838         uint16_t link_duplex;
839         struct qed_link_output link;
840         struct rte_eth_link *curr = &eth_dev->data->dev_link;
841
842         memset(&link, 0, sizeof(struct qed_link_output));
843         qdev->ops->common->get_link(edev, &link);
844
845         /* Link Speed */
846         curr->link_speed = link.speed;
847
848         /* Link Mode */
849         switch (link.duplex) {
850         case QEDE_DUPLEX_HALF:
851                 link_duplex = ETH_LINK_HALF_DUPLEX;
852                 break;
853         case QEDE_DUPLEX_FULL:
854                 link_duplex = ETH_LINK_FULL_DUPLEX;
855                 break;
856         case QEDE_DUPLEX_UNKNOWN:
857         default:
858                 link_duplex = -1;
859         }
860         curr->link_duplex = link_duplex;
861
862         /* Link Status */
863         curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
864
865         /* AN */
866         curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
867                              ETH_LINK_AUTONEG : ETH_LINK_FIXED;
868
869         DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
870                 curr->link_speed, curr->link_duplex,
871                 curr->link_autoneg, curr->link_status);
872
873         /* return 0 means link status changed, -1 means not changed */
874         return ((curr->link_status == link.link_up) ? -1 : 0);
875 }
876
877 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
878 {
879         struct qede_dev *qdev = eth_dev->data->dev_private;
880         struct ecore_dev *edev = &qdev->edev;
881
882         PMD_INIT_FUNC_TRACE(edev);
883
884         enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
885
886         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
887                 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
888
889         qed_configure_filter_rx_mode(eth_dev, type);
890 }
891
892 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
893 {
894         struct qede_dev *qdev = eth_dev->data->dev_private;
895         struct ecore_dev *edev = &qdev->edev;
896
897         PMD_INIT_FUNC_TRACE(edev);
898
899         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
900                 qed_configure_filter_rx_mode(eth_dev,
901                                 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
902         else
903                 qed_configure_filter_rx_mode(eth_dev,
904                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
905 }
906
907 static void qede_poll_sp_sb_cb(void *param)
908 {
909         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
910         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
911         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
912         int rc;
913
914         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
915         qede_interrupt_action(&edev->hwfns[1]);
916
917         rc = rte_eal_alarm_set(timer_period * US_PER_S,
918                                qede_poll_sp_sb_cb,
919                                (void *)eth_dev);
920         if (rc != 0) {
921                 DP_ERR(edev, "Unable to start periodic"
922                              " timer rc %d\n", rc);
923                 assert(false && "Unable to start periodic timer");
924         }
925 }
926
927 static void qede_dev_close(struct rte_eth_dev *eth_dev)
928 {
929         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
930         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
931         int rc;
932
933         PMD_INIT_FUNC_TRACE(edev);
934
935         /* dev_stop() shall cleanup fp resources in hw but without releasing
936          * dma memories and sw structures so that dev_start() can be called
937          * by the app without reconfiguration. However, in dev_close() we
938          * can release all the resources and device can be brought up newly
939          */
940         if (qdev->state != QEDE_DEV_STOP)
941                 qede_dev_stop(eth_dev);
942         else
943                 DP_INFO(edev, "Device is already stopped\n");
944
945         rc = qdev->ops->vport_stop(edev, 0);
946         if (rc != 0)
947                 DP_ERR(edev, "Failed to stop VPORT\n");
948
949         qede_dealloc_fp_resc(eth_dev);
950
951         qdev->ops->common->slowpath_stop(edev);
952
953         qdev->ops->common->remove(edev);
954
955         rte_intr_disable(&eth_dev->pci_dev->intr_handle);
956
957         rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
958                                      qede_interrupt_handler, (void *)eth_dev);
959
960         if (edev->num_hwfns > 1)
961                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
962
963         qdev->state = QEDE_DEV_INIT; /* Go back to init state */
964 }
965
966 static void
967 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
968 {
969         struct qede_dev *qdev = eth_dev->data->dev_private;
970         struct ecore_dev *edev = &qdev->edev;
971         struct ecore_eth_stats stats;
972         unsigned int i = 0, j = 0, qid;
973         unsigned int rxq_stat_cntrs, txq_stat_cntrs;
974         struct qede_tx_queue *txq;
975
976         qdev->ops->get_vport_stats(edev, &stats);
977
978         /* RX Stats */
979         eth_stats->ipackets = stats.rx_ucast_pkts +
980             stats.rx_mcast_pkts + stats.rx_bcast_pkts;
981
982         eth_stats->ibytes = stats.rx_ucast_bytes +
983             stats.rx_mcast_bytes + stats.rx_bcast_bytes;
984
985         eth_stats->ierrors = stats.rx_crc_errors +
986             stats.rx_align_errors +
987             stats.rx_carrier_errors +
988             stats.rx_oversize_packets +
989             stats.rx_jabbers + stats.rx_undersize_packets;
990
991         eth_stats->rx_nombuf = stats.no_buff_discards;
992
993         eth_stats->imissed = stats.mftag_filter_discards +
994             stats.mac_filter_discards +
995             stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
996
997         /* TX stats */
998         eth_stats->opackets = stats.tx_ucast_pkts +
999             stats.tx_mcast_pkts + stats.tx_bcast_pkts;
1000
1001         eth_stats->obytes = stats.tx_ucast_bytes +
1002             stats.tx_mcast_bytes + stats.tx_bcast_bytes;
1003
1004         eth_stats->oerrors = stats.tx_err_drop_pkts;
1005
1006         /* Queue stats */
1007         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1008                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1009         txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1010                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1011         if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) ||
1012             (txq_stat_cntrs != QEDE_TSS_COUNT(qdev)))
1013                 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1014                        "Not all the queue stats will be displayed. Set"
1015                        " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1016                        " appropriately and retry.\n");
1017
1018         for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
1019                 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
1020                         eth_stats->q_ipackets[i] =
1021                                 *(uint64_t *)(
1022                                         ((char *)(qdev->fp_array[(qid)].rxq)) +
1023                                         offsetof(struct qede_rx_queue,
1024                                         rcv_pkts));
1025                         eth_stats->q_errors[i] =
1026                                 *(uint64_t *)(
1027                                         ((char *)(qdev->fp_array[(qid)].rxq)) +
1028                                         offsetof(struct qede_rx_queue,
1029                                         rx_hw_errors)) +
1030                                 *(uint64_t *)(
1031                                         ((char *)(qdev->fp_array[(qid)].rxq)) +
1032                                         offsetof(struct qede_rx_queue,
1033                                         rx_alloc_errors));
1034                         i++;
1035                 }
1036                 if (i == rxq_stat_cntrs)
1037                         break;
1038         }
1039
1040         for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
1041                 if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
1042                         txq = qdev->fp_array[(qid)].txqs[0];
1043                         eth_stats->q_opackets[j] =
1044                                 *((uint64_t *)(uintptr_t)
1045                                         (((uint64_t)(uintptr_t)(txq)) +
1046                                          offsetof(struct qede_tx_queue,
1047                                                   xmit_pkts)));
1048                         j++;
1049                 }
1050                 if (j == txq_stat_cntrs)
1051                         break;
1052         }
1053 }
1054
1055 static unsigned
1056 qede_get_xstats_count(struct qede_dev *qdev) {
1057         return RTE_DIM(qede_xstats_strings) +
1058                 (RTE_DIM(qede_rxq_xstats_strings) *
1059                  RTE_MIN(QEDE_RSS_COUNT(qdev),
1060                          RTE_ETHDEV_QUEUE_STAT_CNTRS));
1061 }
1062
1063 static int
1064 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
1065                       struct rte_eth_xstat_name *xstats_names, unsigned limit)
1066 {
1067         struct qede_dev *qdev = dev->data->dev_private;
1068         const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1069         unsigned int i, qid, stat_idx = 0;
1070         unsigned int rxq_stat_cntrs;
1071
1072         if (xstats_names != NULL) {
1073                 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1074                         snprintf(xstats_names[stat_idx].name,
1075                                 sizeof(xstats_names[stat_idx].name),
1076                                 "%s",
1077                                 qede_xstats_strings[i].name);
1078                         stat_idx++;
1079                 }
1080
1081                 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1082                                          RTE_ETHDEV_QUEUE_STAT_CNTRS);
1083                 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1084                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1085                                 snprintf(xstats_names[stat_idx].name,
1086                                         sizeof(xstats_names[stat_idx].name),
1087                                         "%.4s%d%s",
1088                                         qede_rxq_xstats_strings[i].name, qid,
1089                                         qede_rxq_xstats_strings[i].name + 4);
1090                                 stat_idx++;
1091                         }
1092                 }
1093         }
1094
1095         return stat_cnt;
1096 }
1097
1098 static int
1099 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1100                 unsigned int n)
1101 {
1102         struct qede_dev *qdev = dev->data->dev_private;
1103         struct ecore_dev *edev = &qdev->edev;
1104         struct ecore_eth_stats stats;
1105         const unsigned int num = qede_get_xstats_count(qdev);
1106         unsigned int i, qid, stat_idx = 0;
1107         unsigned int rxq_stat_cntrs;
1108
1109         if (n < num)
1110                 return num;
1111
1112         qdev->ops->get_vport_stats(edev, &stats);
1113
1114         for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1115                 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1116                                              qede_xstats_strings[i].offset);
1117                 xstats[stat_idx].id = stat_idx;
1118                 stat_idx++;
1119         }
1120
1121         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1122                                  RTE_ETHDEV_QUEUE_STAT_CNTRS);
1123         for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1124                 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
1125                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1126                                 xstats[stat_idx].value = *(uint64_t *)(
1127                                         ((char *)(qdev->fp_array[(qid)].rxq)) +
1128                                          qede_rxq_xstats_strings[i].offset);
1129                                 xstats[stat_idx].id = stat_idx;
1130                                 stat_idx++;
1131                         }
1132                 }
1133         }
1134
1135         return stat_idx;
1136 }
1137
1138 static void
1139 qede_reset_xstats(struct rte_eth_dev *dev)
1140 {
1141         struct qede_dev *qdev = dev->data->dev_private;
1142         struct ecore_dev *edev = &qdev->edev;
1143
1144         ecore_reset_vport_stats(edev);
1145 }
1146
1147 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1148 {
1149         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1150         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1151         struct qed_link_params link_params;
1152         int rc;
1153
1154         DP_INFO(edev, "setting link state %d\n", link_up);
1155         memset(&link_params, 0, sizeof(link_params));
1156         link_params.link_up = link_up;
1157         rc = qdev->ops->common->set_link(edev, &link_params);
1158         if (rc != ECORE_SUCCESS)
1159                 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1160
1161         return rc;
1162 }
1163
1164 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1165 {
1166         return qede_dev_set_link_state(eth_dev, true);
1167 }
1168
1169 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1170 {
1171         return qede_dev_set_link_state(eth_dev, false);
1172 }
1173
1174 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1175 {
1176         struct qede_dev *qdev = eth_dev->data->dev_private;
1177         struct ecore_dev *edev = &qdev->edev;
1178
1179         ecore_reset_vport_stats(edev);
1180 }
1181
1182 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1183 {
1184         enum qed_filter_rx_mode_type type =
1185             QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1186
1187         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1188                 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1189
1190         qed_configure_filter_rx_mode(eth_dev, type);
1191 }
1192
1193 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1194 {
1195         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1196                 qed_configure_filter_rx_mode(eth_dev,
1197                                 QED_FILTER_RX_MODE_TYPE_PROMISC);
1198         else
1199                 qed_configure_filter_rx_mode(eth_dev,
1200                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1201 }
1202
1203 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1204                               struct rte_eth_fc_conf *fc_conf)
1205 {
1206         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1207         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1208         struct qed_link_output current_link;
1209         struct qed_link_params params;
1210
1211         memset(&current_link, 0, sizeof(current_link));
1212         qdev->ops->common->get_link(edev, &current_link);
1213
1214         memset(&params, 0, sizeof(params));
1215         params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1216         if (fc_conf->autoneg) {
1217                 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1218                         DP_ERR(edev, "Autoneg not supported\n");
1219                         return -EINVAL;
1220                 }
1221                 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1222         }
1223
1224         /* Pause is assumed to be supported (SUPPORTED_Pause) */
1225         if (fc_conf->mode == RTE_FC_FULL)
1226                 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1227                                         QED_LINK_PAUSE_RX_ENABLE);
1228         if (fc_conf->mode == RTE_FC_TX_PAUSE)
1229                 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1230         if (fc_conf->mode == RTE_FC_RX_PAUSE)
1231                 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1232
1233         params.link_up = true;
1234         (void)qdev->ops->common->set_link(edev, &params);
1235
1236         return 0;
1237 }
1238
1239 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1240                               struct rte_eth_fc_conf *fc_conf)
1241 {
1242         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1243         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1244         struct qed_link_output current_link;
1245
1246         memset(&current_link, 0, sizeof(current_link));
1247         qdev->ops->common->get_link(edev, &current_link);
1248
1249         if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1250                 fc_conf->autoneg = true;
1251
1252         if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1253                                          QED_LINK_PAUSE_TX_ENABLE))
1254                 fc_conf->mode = RTE_FC_FULL;
1255         else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1256                 fc_conf->mode = RTE_FC_RX_PAUSE;
1257         else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1258                 fc_conf->mode = RTE_FC_TX_PAUSE;
1259         else
1260                 fc_conf->mode = RTE_FC_NONE;
1261
1262         return 0;
1263 }
1264
1265 static const uint32_t *
1266 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1267 {
1268         static const uint32_t ptypes[] = {
1269                 RTE_PTYPE_L3_IPV4,
1270                 RTE_PTYPE_L3_IPV6,
1271                 RTE_PTYPE_UNKNOWN
1272         };
1273
1274         if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1275                 return ptypes;
1276
1277         return NULL;
1278 }
1279
1280 void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1281 {
1282         *rss_caps = 0;
1283         *rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
1284         *rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
1285         *rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
1286         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
1287         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
1288         *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
1289 }
1290
1291 static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1292                                 struct rte_eth_rss_conf *rss_conf)
1293 {
1294         struct qed_update_vport_params vport_update_params;
1295         struct qede_dev *qdev = eth_dev->data->dev_private;
1296         struct ecore_dev *edev = &qdev->edev;
1297         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1298         uint64_t hf = rss_conf->rss_hf;
1299         int i;
1300
1301         memset(&vport_update_params, 0, sizeof(vport_update_params));
1302
1303         if (hf != 0) {
1304                 /* Enable RSS */
1305                 qede_init_rss_caps(&qdev->rss_params.rss_caps, hf);
1306                 memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1307                        sizeof(vport_update_params.rss_params));
1308                 if (key)
1309                         memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
1310                                rss_conf->rss_key_len);
1311                 vport_update_params.update_rss_flg = 1;
1312                 qdev->rss_enabled = 1;
1313         } else {
1314                 /* Disable RSS */
1315                 qdev->rss_enabled = 0;
1316         }
1317
1318         /* If the mapping doesn't fit any supported, return */
1319         if (qdev->rss_params.rss_caps == 0 && hf != 0)
1320                 return -EINVAL;
1321
1322         DP_INFO(edev, "%s\n", (vport_update_params.update_rss_flg) ?
1323                                 "Enabling RSS" : "Disabling RSS");
1324
1325         vport_update_params.vport_id = 0;
1326
1327         return qdev->ops->vport_update(edev, &vport_update_params);
1328 }
1329
1330 int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1331                            struct rte_eth_rss_conf *rss_conf)
1332 {
1333         struct qede_dev *qdev = eth_dev->data->dev_private;
1334         uint64_t hf;
1335
1336         if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
1337                 return -EINVAL;
1338
1339         if (rss_conf->rss_key)
1340                 memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
1341                        sizeof(qdev->rss_params.rss_key));
1342
1343         hf = 0;
1344         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4)     ?
1345                         ETH_RSS_IPV4 : 0;
1346         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6)     ?
1347                         ETH_RSS_IPV6 : 0;
1348         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6)     ?
1349                         ETH_RSS_IPV6_EX : 0;
1350         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
1351                         ETH_RSS_NONFRAG_IPV4_TCP : 0;
1352         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1353                         ETH_RSS_NONFRAG_IPV6_TCP : 0;
1354         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1355                         ETH_RSS_IPV6_TCP_EX : 0;
1356
1357         rss_conf->rss_hf = hf;
1358
1359         return 0;
1360 }
1361
1362 static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
1363                                 struct rte_eth_rss_reta_entry64 *reta_conf,
1364                                 uint16_t reta_size)
1365 {
1366         struct qed_update_vport_params vport_update_params;
1367         struct qede_dev *qdev = eth_dev->data->dev_private;
1368         struct ecore_dev *edev = &qdev->edev;
1369         uint16_t i, idx, shift;
1370
1371         if (reta_size > ETH_RSS_RETA_SIZE_128) {
1372                 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
1373                        reta_size);
1374                 return -EINVAL;
1375         }
1376
1377         memset(&vport_update_params, 0, sizeof(vport_update_params));
1378         memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1379                sizeof(vport_update_params.rss_params));
1380
1381         for (i = 0; i < reta_size; i++) {
1382                 idx = i / RTE_RETA_GROUP_SIZE;
1383                 shift = i % RTE_RETA_GROUP_SIZE;
1384                 if (reta_conf[idx].mask & (1ULL << shift)) {
1385                         uint8_t entry = reta_conf[idx].reta[shift];
1386                         qdev->rss_params.rss_ind_table[i] = entry;
1387                 }
1388         }
1389
1390         vport_update_params.update_rss_flg = 1;
1391         vport_update_params.vport_id = 0;
1392
1393         return qdev->ops->vport_update(edev, &vport_update_params);
1394 }
1395
1396 int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
1397                         struct rte_eth_rss_reta_entry64 *reta_conf,
1398                         uint16_t reta_size)
1399 {
1400         struct qede_dev *qdev = eth_dev->data->dev_private;
1401         uint16_t i, idx, shift;
1402
1403         if (reta_size > ETH_RSS_RETA_SIZE_128) {
1404                 struct ecore_dev *edev = &qdev->edev;
1405                 DP_ERR(edev, "reta_size %d is not supported\n",
1406                        reta_size);
1407         }
1408
1409         for (i = 0; i < reta_size; i++) {
1410                 idx = i / RTE_RETA_GROUP_SIZE;
1411                 shift = i % RTE_RETA_GROUP_SIZE;
1412                 if (reta_conf[idx].mask & (1ULL << shift)) {
1413                         uint8_t entry = qdev->rss_params.rss_ind_table[i];
1414                         reta_conf[idx].reta[shift] = entry;
1415                 }
1416         }
1417
1418         return 0;
1419 }
1420
1421 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1422 {
1423         uint32_t frame_size;
1424         struct qede_dev *qdev = dev->data->dev_private;
1425         struct rte_eth_dev_info dev_info = {0};
1426
1427         qede_dev_info_get(dev, &dev_info);
1428
1429         /* VLAN_TAG = 4 */
1430         frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
1431
1432         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1433                 return -EINVAL;
1434
1435         if (!dev->data->scattered_rx &&
1436             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1437                 return -EINVAL;
1438
1439         if (frame_size > ETHER_MAX_LEN)
1440                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1441         else
1442                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1443
1444         /* update max frame size */
1445         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1446         qdev->mtu = mtu;
1447         qede_dev_stop(dev);
1448         qede_dev_start(dev);
1449
1450         return 0;
1451 }
1452
1453 static const struct eth_dev_ops qede_eth_dev_ops = {
1454         .dev_configure = qede_dev_configure,
1455         .dev_infos_get = qede_dev_info_get,
1456         .rx_queue_setup = qede_rx_queue_setup,
1457         .rx_queue_release = qede_rx_queue_release,
1458         .tx_queue_setup = qede_tx_queue_setup,
1459         .tx_queue_release = qede_tx_queue_release,
1460         .dev_start = qede_dev_start,
1461         .dev_set_link_up = qede_dev_set_link_up,
1462         .dev_set_link_down = qede_dev_set_link_down,
1463         .link_update = qede_link_update,
1464         .promiscuous_enable = qede_promiscuous_enable,
1465         .promiscuous_disable = qede_promiscuous_disable,
1466         .allmulticast_enable = qede_allmulticast_enable,
1467         .allmulticast_disable = qede_allmulticast_disable,
1468         .dev_stop = qede_dev_stop,
1469         .dev_close = qede_dev_close,
1470         .stats_get = qede_get_stats,
1471         .stats_reset = qede_reset_stats,
1472         .xstats_get = qede_get_xstats,
1473         .xstats_reset = qede_reset_xstats,
1474         .xstats_get_names = qede_get_xstats_names,
1475         .mac_addr_add = qede_mac_addr_add,
1476         .mac_addr_remove = qede_mac_addr_remove,
1477         .mac_addr_set = qede_mac_addr_set,
1478         .vlan_offload_set = qede_vlan_offload_set,
1479         .vlan_filter_set = qede_vlan_filter_set,
1480         .flow_ctrl_set = qede_flow_ctrl_set,
1481         .flow_ctrl_get = qede_flow_ctrl_get,
1482         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1483         .rss_hash_update = qede_rss_hash_update,
1484         .rss_hash_conf_get = qede_rss_hash_conf_get,
1485         .reta_update  = qede_rss_reta_update,
1486         .reta_query  = qede_rss_reta_query,
1487         .mtu_set = qede_set_mtu,
1488 };
1489
1490 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
1491         .dev_configure = qede_dev_configure,
1492         .dev_infos_get = qede_dev_info_get,
1493         .rx_queue_setup = qede_rx_queue_setup,
1494         .rx_queue_release = qede_rx_queue_release,
1495         .tx_queue_setup = qede_tx_queue_setup,
1496         .tx_queue_release = qede_tx_queue_release,
1497         .dev_start = qede_dev_start,
1498         .dev_set_link_up = qede_dev_set_link_up,
1499         .dev_set_link_down = qede_dev_set_link_down,
1500         .link_update = qede_link_update,
1501         .promiscuous_enable = qede_promiscuous_enable,
1502         .promiscuous_disable = qede_promiscuous_disable,
1503         .allmulticast_enable = qede_allmulticast_enable,
1504         .allmulticast_disable = qede_allmulticast_disable,
1505         .dev_stop = qede_dev_stop,
1506         .dev_close = qede_dev_close,
1507         .stats_get = qede_get_stats,
1508         .stats_reset = qede_reset_stats,
1509         .xstats_get = qede_get_xstats,
1510         .xstats_reset = qede_reset_xstats,
1511         .xstats_get_names = qede_get_xstats_names,
1512         .vlan_offload_set = qede_vlan_offload_set,
1513         .vlan_filter_set = qede_vlan_filter_set,
1514         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1515         .rss_hash_update = qede_rss_hash_update,
1516         .rss_hash_conf_get = qede_rss_hash_conf_get,
1517         .reta_update  = qede_rss_reta_update,
1518         .reta_query  = qede_rss_reta_query,
1519         .mtu_set = qede_set_mtu,
1520 };
1521
1522 static void qede_update_pf_params(struct ecore_dev *edev)
1523 {
1524         struct ecore_pf_params pf_params;
1525         /* 32 rx + 32 tx */
1526         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
1527         pf_params.eth_pf_params.num_cons = 64;
1528         qed_ops->common->update_pf_params(edev, &pf_params);
1529 }
1530
1531 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
1532 {
1533         struct rte_pci_device *pci_dev;
1534         struct rte_pci_addr pci_addr;
1535         struct qede_dev *adapter;
1536         struct ecore_dev *edev;
1537         struct qed_dev_eth_info dev_info;
1538         struct qed_slowpath_params params;
1539         static bool do_once = true;
1540         uint8_t bulletin_change;
1541         uint8_t vf_mac[ETHER_ADDR_LEN];
1542         uint8_t is_mac_forced;
1543         bool is_mac_exist;
1544         /* Fix up ecore debug level */
1545         uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
1546         uint8_t dp_level = ECORE_LEVEL_VERBOSE;
1547         uint32_t max_mac_addrs;
1548         int rc;
1549
1550         /* Extract key data structures */
1551         adapter = eth_dev->data->dev_private;
1552         edev = &adapter->edev;
1553         pci_addr = eth_dev->pci_dev->addr;
1554
1555         PMD_INIT_FUNC_TRACE(edev);
1556
1557         snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
1558                  pci_addr.bus, pci_addr.devid, pci_addr.function,
1559                  eth_dev->data->port_id);
1560
1561         eth_dev->rx_pkt_burst = qede_recv_pkts;
1562         eth_dev->tx_pkt_burst = qede_xmit_pkts;
1563
1564         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1565                 DP_NOTICE(edev, false,
1566                           "Skipping device init from secondary process\n");
1567                 return 0;
1568         }
1569
1570         pci_dev = eth_dev->pci_dev;
1571
1572         rte_eth_copy_pci_info(eth_dev, pci_dev);
1573
1574         /* @DPDK */
1575         edev->vendor_id = pci_dev->id.vendor_id;
1576         edev->device_id = pci_dev->id.device_id;
1577
1578         qed_ops = qed_get_eth_ops();
1579         if (!qed_ops) {
1580                 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
1581                 return -EINVAL;
1582         }
1583
1584         DP_INFO(edev, "Starting qede probe\n");
1585
1586         rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
1587                                     dp_module, dp_level, is_vf);
1588
1589         if (rc != 0) {
1590                 DP_ERR(edev, "qede probe failed rc %d\n", rc);
1591                 return -ENODEV;
1592         }
1593
1594         qede_update_pf_params(edev);
1595
1596         rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
1597                                    qede_interrupt_handler, (void *)eth_dev);
1598
1599         if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
1600                 DP_ERR(edev, "rte_intr_enable() failed\n");
1601                 return -ENODEV;
1602         }
1603
1604         /* Start the Slowpath-process */
1605         memset(&params, 0, sizeof(struct qed_slowpath_params));
1606         params.int_mode = ECORE_INT_MODE_MSIX;
1607         params.drv_major = QEDE_PMD_VERSION_MAJOR;
1608         params.drv_minor = QEDE_PMD_VERSION_MINOR;
1609         params.drv_rev = QEDE_PMD_VERSION_REVISION;
1610         params.drv_eng = QEDE_PMD_VERSION_PATCH;
1611         strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
1612                 QEDE_PMD_DRV_VER_STR_SIZE);
1613
1614         /* For CMT mode device do periodic polling for slowpath events.
1615          * This is required since uio device uses only one MSI-x
1616          * interrupt vector but we need one for each engine.
1617          */
1618         if (edev->num_hwfns > 1 && IS_PF(edev)) {
1619                 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1620                                        qede_poll_sp_sb_cb,
1621                                        (void *)eth_dev);
1622                 if (rc != 0) {
1623                         DP_ERR(edev, "Unable to start periodic"
1624                                      " timer rc %d\n", rc);
1625                         return -EINVAL;
1626                 }
1627         }
1628
1629         rc = qed_ops->common->slowpath_start(edev, &params);
1630         if (rc) {
1631                 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
1632                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1633                                      (void *)eth_dev);
1634                 return -ENODEV;
1635         }
1636
1637         rc = qed_ops->fill_dev_info(edev, &dev_info);
1638         if (rc) {
1639                 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
1640                 qed_ops->common->slowpath_stop(edev);
1641                 qed_ops->common->remove(edev);
1642                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1643                                      (void *)eth_dev);
1644                 return -ENODEV;
1645         }
1646
1647         qede_alloc_etherdev(adapter, &dev_info);
1648
1649         adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION);
1650
1651         if (!is_vf)
1652                 adapter->dev_info.num_mac_addrs =
1653                         (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
1654                                             ECORE_MAC);
1655         else
1656                 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
1657                                              &adapter->dev_info.num_mac_addrs);
1658
1659         /* Allocate memory for storing MAC addr */
1660         eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
1661                                         (ETHER_ADDR_LEN *
1662                                         adapter->dev_info.num_mac_addrs),
1663                                         RTE_CACHE_LINE_SIZE);
1664
1665         if (eth_dev->data->mac_addrs == NULL) {
1666                 DP_ERR(edev, "Failed to allocate MAC address\n");
1667                 qed_ops->common->slowpath_stop(edev);
1668                 qed_ops->common->remove(edev);
1669                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1670                                      (void *)eth_dev);
1671                 return -ENOMEM;
1672         }
1673
1674         if (!is_vf) {
1675                 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
1676                                 hw_info.hw_mac_addr,
1677                                 &eth_dev->data->mac_addrs[0]);
1678                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
1679                                 &adapter->primary_mac);
1680         } else {
1681                 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
1682                                        &bulletin_change);
1683                 if (bulletin_change) {
1684                         is_mac_exist =
1685                             ecore_vf_bulletin_get_forced_mac(
1686                                                 ECORE_LEADING_HWFN(edev),
1687                                                 vf_mac,
1688                                                 &is_mac_forced);
1689                         if (is_mac_exist && is_mac_forced) {
1690                                 DP_INFO(edev, "VF macaddr received from PF\n");
1691                                 ether_addr_copy((struct ether_addr *)&vf_mac,
1692                                                 &eth_dev->data->mac_addrs[0]);
1693                                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
1694                                                 &adapter->primary_mac);
1695                         } else {
1696                                 DP_NOTICE(edev, false,
1697                                           "No VF macaddr assigned\n");
1698                         }
1699                 }
1700         }
1701
1702         eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
1703
1704         if (do_once) {
1705                 qede_print_adapter_info(adapter);
1706                 do_once = false;
1707         }
1708
1709         adapter->state = QEDE_DEV_INIT;
1710
1711         DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
1712                   adapter->primary_mac.addr_bytes[0],
1713                   adapter->primary_mac.addr_bytes[1],
1714                   adapter->primary_mac.addr_bytes[2],
1715                   adapter->primary_mac.addr_bytes[3],
1716                   adapter->primary_mac.addr_bytes[4],
1717                   adapter->primary_mac.addr_bytes[5]);
1718
1719         return rc;
1720 }
1721
1722 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
1723 {
1724         return qede_common_dev_init(eth_dev, 1);
1725 }
1726
1727 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
1728 {
1729         return qede_common_dev_init(eth_dev, 0);
1730 }
1731
1732 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
1733 {
1734         /* only uninitialize in the primary process */
1735         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1736                 return 0;
1737
1738         /* safe to close dev here */
1739         qede_dev_close(eth_dev);
1740
1741         eth_dev->dev_ops = NULL;
1742         eth_dev->rx_pkt_burst = NULL;
1743         eth_dev->tx_pkt_burst = NULL;
1744
1745         if (eth_dev->data->mac_addrs)
1746                 rte_free(eth_dev->data->mac_addrs);
1747
1748         eth_dev->data->mac_addrs = NULL;
1749
1750         return 0;
1751 }
1752
1753 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1754 {
1755         return qede_dev_common_uninit(eth_dev);
1756 }
1757
1758 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1759 {
1760         return qede_dev_common_uninit(eth_dev);
1761 }
1762
1763 static struct rte_pci_id pci_id_qedevf_map[] = {
1764 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1765         {
1766                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
1767         },
1768         {
1769                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
1770         },
1771         {.vendor_id = 0,}
1772 };
1773
1774 static struct rte_pci_id pci_id_qede_map[] = {
1775 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1776         {
1777                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
1778         },
1779         {
1780                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
1781         },
1782         {
1783                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
1784         },
1785         {
1786                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
1787         },
1788         {
1789                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
1790         },
1791         {.vendor_id = 0,}
1792 };
1793
1794 static struct eth_driver rte_qedevf_pmd = {
1795         .pci_drv = {
1796                     .id_table = pci_id_qedevf_map,
1797                     .drv_flags =
1798                     RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1799                     .probe = rte_eth_dev_pci_probe,
1800                     .remove = rte_eth_dev_pci_remove,
1801                    },
1802         .eth_dev_init = qedevf_eth_dev_init,
1803         .eth_dev_uninit = qedevf_eth_dev_uninit,
1804         .dev_private_size = sizeof(struct qede_dev),
1805 };
1806
1807 static struct eth_driver rte_qede_pmd = {
1808         .pci_drv = {
1809                     .id_table = pci_id_qede_map,
1810                     .drv_flags =
1811                     RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1812                     .probe = rte_eth_dev_pci_probe,
1813                     .remove = rte_eth_dev_pci_remove,
1814                    },
1815         .eth_dev_init = qede_eth_dev_init,
1816         .eth_dev_uninit = qede_eth_dev_uninit,
1817         .dev_private_size = sizeof(struct qede_dev),
1818 };
1819
1820 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
1821 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
1822 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
1823 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);