New upstream version 17.11.1
[deb_dpdk.git] / drivers / net / qede / qede_ethdev.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12 #include <rte_kvargs.h>
13
14 /* Globals */
15 static const struct qed_eth_ops *qed_ops;
16 static int64_t timer_period = 1;
17
18 /* VXLAN tunnel classification mapping */
19 const struct _qede_vxlan_tunn_types {
20         uint16_t rte_filter_type;
21         enum ecore_filter_ucast_type qede_type;
22         enum ecore_tunn_clss qede_tunn_clss;
23         const char *string;
24 } qede_tunn_types[] = {
25         {
26                 ETH_TUNNEL_FILTER_OMAC,
27                 ECORE_FILTER_MAC,
28                 ECORE_TUNN_CLSS_MAC_VLAN,
29                 "outer-mac"
30         },
31         {
32                 ETH_TUNNEL_FILTER_TENID,
33                 ECORE_FILTER_VNI,
34                 ECORE_TUNN_CLSS_MAC_VNI,
35                 "vni"
36         },
37         {
38                 ETH_TUNNEL_FILTER_IMAC,
39                 ECORE_FILTER_INNER_MAC,
40                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
41                 "inner-mac"
42         },
43         {
44                 ETH_TUNNEL_FILTER_IVLAN,
45                 ECORE_FILTER_INNER_VLAN,
46                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
47                 "inner-vlan"
48         },
49         {
50                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
51                 ECORE_FILTER_MAC_VNI_PAIR,
52                 ECORE_TUNN_CLSS_MAC_VNI,
53                 "outer-mac and vni"
54         },
55         {
56                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
57                 ECORE_FILTER_UNUSED,
58                 MAX_ECORE_TUNN_CLSS,
59                 "outer-mac and inner-mac"
60         },
61         {
62                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
63                 ECORE_FILTER_UNUSED,
64                 MAX_ECORE_TUNN_CLSS,
65                 "outer-mac and inner-vlan"
66         },
67         {
68                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
69                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
70                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
71                 "vni and inner-mac",
72         },
73         {
74                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
75                 ECORE_FILTER_UNUSED,
76                 MAX_ECORE_TUNN_CLSS,
77                 "vni and inner-vlan",
78         },
79         {
80                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
81                 ECORE_FILTER_INNER_PAIR,
82                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
83                 "inner-mac and inner-vlan",
84         },
85         {
86                 ETH_TUNNEL_FILTER_OIP,
87                 ECORE_FILTER_UNUSED,
88                 MAX_ECORE_TUNN_CLSS,
89                 "outer-IP"
90         },
91         {
92                 ETH_TUNNEL_FILTER_IIP,
93                 ECORE_FILTER_UNUSED,
94                 MAX_ECORE_TUNN_CLSS,
95                 "inner-IP"
96         },
97         {
98                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
99                 ECORE_FILTER_UNUSED,
100                 MAX_ECORE_TUNN_CLSS,
101                 "IMAC_IVLAN"
102         },
103         {
104                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
105                 ECORE_FILTER_UNUSED,
106                 MAX_ECORE_TUNN_CLSS,
107                 "IMAC_IVLAN_TENID"
108         },
109         {
110                 RTE_TUNNEL_FILTER_IMAC_TENID,
111                 ECORE_FILTER_UNUSED,
112                 MAX_ECORE_TUNN_CLSS,
113                 "IMAC_TENID"
114         },
115         {
116                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
117                 ECORE_FILTER_UNUSED,
118                 MAX_ECORE_TUNN_CLSS,
119                 "OMAC_TENID_IMAC"
120         },
121 };
122
123 struct rte_qede_xstats_name_off {
124         char name[RTE_ETH_XSTATS_NAME_SIZE];
125         uint64_t offset;
126 };
127
128 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
129         {"rx_unicast_bytes",
130                 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
131         {"rx_multicast_bytes",
132                 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
133         {"rx_broadcast_bytes",
134                 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
135         {"rx_unicast_packets",
136                 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
137         {"rx_multicast_packets",
138                 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
139         {"rx_broadcast_packets",
140                 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
141
142         {"tx_unicast_bytes",
143                 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
144         {"tx_multicast_bytes",
145                 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
146         {"tx_broadcast_bytes",
147                 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
148         {"tx_unicast_packets",
149                 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
150         {"tx_multicast_packets",
151                 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
152         {"tx_broadcast_packets",
153                 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
154
155         {"rx_64_byte_packets",
156                 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
157         {"rx_65_to_127_byte_packets",
158                 offsetof(struct ecore_eth_stats_common,
159                          rx_65_to_127_byte_packets)},
160         {"rx_128_to_255_byte_packets",
161                 offsetof(struct ecore_eth_stats_common,
162                          rx_128_to_255_byte_packets)},
163         {"rx_256_to_511_byte_packets",
164                 offsetof(struct ecore_eth_stats_common,
165                          rx_256_to_511_byte_packets)},
166         {"rx_512_to_1023_byte_packets",
167                 offsetof(struct ecore_eth_stats_common,
168                          rx_512_to_1023_byte_packets)},
169         {"rx_1024_to_1518_byte_packets",
170                 offsetof(struct ecore_eth_stats_common,
171                          rx_1024_to_1518_byte_packets)},
172         {"tx_64_byte_packets",
173                 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
174         {"tx_65_to_127_byte_packets",
175                 offsetof(struct ecore_eth_stats_common,
176                          tx_65_to_127_byte_packets)},
177         {"tx_128_to_255_byte_packets",
178                 offsetof(struct ecore_eth_stats_common,
179                          tx_128_to_255_byte_packets)},
180         {"tx_256_to_511_byte_packets",
181                 offsetof(struct ecore_eth_stats_common,
182                          tx_256_to_511_byte_packets)},
183         {"tx_512_to_1023_byte_packets",
184                 offsetof(struct ecore_eth_stats_common,
185                          tx_512_to_1023_byte_packets)},
186         {"tx_1024_to_1518_byte_packets",
187                 offsetof(struct ecore_eth_stats_common,
188                          tx_1024_to_1518_byte_packets)},
189
190         {"rx_mac_crtl_frames",
191                 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
192         {"tx_mac_control_frames",
193                 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
194         {"rx_pause_frames",
195                 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
196         {"tx_pause_frames",
197                 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
198         {"rx_priority_flow_control_frames",
199                 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
200         {"tx_priority_flow_control_frames",
201                 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
202
203         {"rx_crc_errors",
204                 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
205         {"rx_align_errors",
206                 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
207         {"rx_carrier_errors",
208                 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
209         {"rx_oversize_packet_errors",
210                 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
211         {"rx_jabber_errors",
212                 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
213         {"rx_undersize_packet_errors",
214                 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
215         {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
216         {"rx_host_buffer_not_available",
217                 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
218         /* Number of packets discarded because they are bigger than MTU */
219         {"rx_packet_too_big_discards",
220                 offsetof(struct ecore_eth_stats_common,
221                          packet_too_big_discard)},
222         {"rx_ttl_zero_discards",
223                 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
224         {"rx_multi_function_tag_filter_discards",
225                 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
226         {"rx_mac_filter_discards",
227                 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
228         {"rx_hw_buffer_truncates",
229                 offsetof(struct ecore_eth_stats_common, brb_truncates)},
230         {"rx_hw_buffer_discards",
231                 offsetof(struct ecore_eth_stats_common, brb_discards)},
232         {"tx_error_drop_packets",
233                 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
234
235         {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
236         {"rx_mac_unicast_packets",
237                 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
238         {"rx_mac_multicast_packets",
239                 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
240         {"rx_mac_broadcast_packets",
241                 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
242         {"rx_mac_frames_ok",
243                 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
244         {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
245         {"tx_mac_unicast_packets",
246                 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
247         {"tx_mac_multicast_packets",
248                 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
249         {"tx_mac_broadcast_packets",
250                 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
251
252         {"lro_coalesced_packets",
253                 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
254         {"lro_coalesced_events",
255                 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
256         {"lro_aborts_num",
257                 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
258         {"lro_not_coalesced_packets",
259                 offsetof(struct ecore_eth_stats_common,
260                          tpa_not_coalesced_pkts)},
261         {"lro_coalesced_bytes",
262                 offsetof(struct ecore_eth_stats_common,
263                          tpa_coalesced_bytes)},
264 };
265
266 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
267         {"rx_1519_to_1522_byte_packets",
268                 offsetof(struct ecore_eth_stats, bb) +
269                 offsetof(struct ecore_eth_stats_bb,
270                          rx_1519_to_1522_byte_packets)},
271         {"rx_1519_to_2047_byte_packets",
272                 offsetof(struct ecore_eth_stats, bb) +
273                 offsetof(struct ecore_eth_stats_bb,
274                          rx_1519_to_2047_byte_packets)},
275         {"rx_2048_to_4095_byte_packets",
276                 offsetof(struct ecore_eth_stats, bb) +
277                 offsetof(struct ecore_eth_stats_bb,
278                          rx_2048_to_4095_byte_packets)},
279         {"rx_4096_to_9216_byte_packets",
280                 offsetof(struct ecore_eth_stats, bb) +
281                 offsetof(struct ecore_eth_stats_bb,
282                          rx_4096_to_9216_byte_packets)},
283         {"rx_9217_to_16383_byte_packets",
284                 offsetof(struct ecore_eth_stats, bb) +
285                 offsetof(struct ecore_eth_stats_bb,
286                          rx_9217_to_16383_byte_packets)},
287
288         {"tx_1519_to_2047_byte_packets",
289                 offsetof(struct ecore_eth_stats, bb) +
290                 offsetof(struct ecore_eth_stats_bb,
291                          tx_1519_to_2047_byte_packets)},
292         {"tx_2048_to_4095_byte_packets",
293                 offsetof(struct ecore_eth_stats, bb) +
294                 offsetof(struct ecore_eth_stats_bb,
295                          tx_2048_to_4095_byte_packets)},
296         {"tx_4096_to_9216_byte_packets",
297                 offsetof(struct ecore_eth_stats, bb) +
298                 offsetof(struct ecore_eth_stats_bb,
299                          tx_4096_to_9216_byte_packets)},
300         {"tx_9217_to_16383_byte_packets",
301                 offsetof(struct ecore_eth_stats, bb) +
302                 offsetof(struct ecore_eth_stats_bb,
303                          tx_9217_to_16383_byte_packets)},
304
305         {"tx_lpi_entry_count",
306                 offsetof(struct ecore_eth_stats, bb) +
307                 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
308         {"tx_total_collisions",
309                 offsetof(struct ecore_eth_stats, bb) +
310                 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
311 };
312
313 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
314         {"rx_1519_to_max_byte_packets",
315                 offsetof(struct ecore_eth_stats, ah) +
316                 offsetof(struct ecore_eth_stats_ah,
317                          rx_1519_to_max_byte_packets)},
318         {"tx_1519_to_max_byte_packets",
319                 offsetof(struct ecore_eth_stats, ah) +
320                 offsetof(struct ecore_eth_stats_ah,
321                          tx_1519_to_max_byte_packets)},
322 };
323
324 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
325         {"rx_q_segments",
326                 offsetof(struct qede_rx_queue, rx_segs)},
327         {"rx_q_hw_errors",
328                 offsetof(struct qede_rx_queue, rx_hw_errors)},
329         {"rx_q_allocation_errors",
330                 offsetof(struct qede_rx_queue, rx_alloc_errors)}
331 };
332
333 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
334 {
335         ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
336 }
337
338 static void
339 qede_interrupt_handler(void *param)
340 {
341         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
342         struct qede_dev *qdev = eth_dev->data->dev_private;
343         struct ecore_dev *edev = &qdev->edev;
344
345         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
346         if (rte_intr_enable(eth_dev->intr_handle))
347                 DP_ERR(edev, "rte_intr_enable failed\n");
348 }
349
350 static void
351 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
352 {
353         rte_memcpy(&qdev->dev_info, info, sizeof(*info));
354         qdev->ops = qed_ops;
355 }
356
357 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
358 static void qede_print_adapter_info(struct qede_dev *qdev)
359 {
360         struct ecore_dev *edev = &qdev->edev;
361         struct qed_dev_info *info = &qdev->dev_info.common;
362         static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
363         static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
364
365         DP_INFO(edev, "*********************************\n");
366         DP_INFO(edev, " DPDK version:%s\n", rte_version());
367         DP_INFO(edev, " Chip details : %s %c%d\n",
368                   ECORE_IS_BB(edev) ? "BB" : "AH",
369                   'A' + edev->chip_rev,
370                   (int)edev->chip_metal);
371         snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
372                  info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
373         snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
374                  ver_str, QEDE_PMD_VERSION);
375         DP_INFO(edev, " Driver version : %s\n", drv_ver);
376         DP_INFO(edev, " Firmware version : %s\n", ver_str);
377
378         snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
379                  "%d.%d.%d.%d",
380                 (info->mfw_rev >> 24) & 0xff,
381                 (info->mfw_rev >> 16) & 0xff,
382                 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
383         DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
384         DP_INFO(edev, " Firmware file : %s\n", fw_file);
385         DP_INFO(edev, "*********************************\n");
386 }
387 #endif
388
389 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
390 {
391 #ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
392         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
393 #endif
394         unsigned int i = 0, j = 0, qid;
395         unsigned int rxq_stat_cntrs, txq_stat_cntrs;
396         struct qede_tx_queue *txq;
397
398         DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
399
400         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
401                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
402         txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
403                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
404
405         for_each_rss(qid) {
406                 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
407                              offsetof(struct qede_rx_queue, rcv_pkts), 0,
408                             sizeof(uint64_t));
409                 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
410                              offsetof(struct qede_rx_queue, rx_hw_errors), 0,
411                             sizeof(uint64_t));
412                 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
413                              offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
414                             sizeof(uint64_t));
415
416                 if (xstats)
417                         for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
418                                 OSAL_MEMSET((((char *)
419                                               (qdev->fp_array[qid].rxq)) +
420                                              qede_rxq_xstats_strings[j].offset),
421                                             0,
422                                             sizeof(uint64_t));
423
424                 i++;
425                 if (i == rxq_stat_cntrs)
426                         break;
427         }
428
429         i = 0;
430
431         for_each_tss(qid) {
432                 txq = qdev->fp_array[qid].txq;
433
434                 OSAL_MEMSET((uint64_t *)(uintptr_t)
435                                 (((uint64_t)(uintptr_t)(txq)) +
436                                  offsetof(struct qede_tx_queue, xmit_pkts)), 0,
437                             sizeof(uint64_t));
438
439                 i++;
440                 if (i == txq_stat_cntrs)
441                         break;
442         }
443 }
444
445 static int
446 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
447 {
448         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
449         struct ecore_sp_vport_start_params params;
450         struct ecore_hwfn *p_hwfn;
451         int rc;
452         int i;
453
454         memset(&params, 0, sizeof(params));
455         params.vport_id = 0;
456         params.mtu = mtu;
457         /* @DPDK - Disable FW placement */
458         params.zero_placement_offset = 1;
459         for_each_hwfn(edev, i) {
460                 p_hwfn = &edev->hwfns[i];
461                 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
462                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
463                 rc = ecore_sp_vport_start(p_hwfn, &params);
464                 if (rc != ECORE_SUCCESS) {
465                         DP_ERR(edev, "Start V-PORT failed %d\n", rc);
466                         return rc;
467                 }
468         }
469         ecore_reset_vport_stats(edev);
470         if (IS_PF(edev))
471                 qede_reset_queue_stats(qdev, true);
472         DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
473
474         return 0;
475 }
476
477 static int
478 qede_stop_vport(struct ecore_dev *edev)
479 {
480         struct ecore_hwfn *p_hwfn;
481         uint8_t vport_id;
482         int rc;
483         int i;
484
485         vport_id = 0;
486         for_each_hwfn(edev, i) {
487                 p_hwfn = &edev->hwfns[i];
488                 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
489                                          vport_id);
490                 if (rc != ECORE_SUCCESS) {
491                         DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
492                         return rc;
493                 }
494         }
495
496         return 0;
497 }
498
499 /* Activate or deactivate vport via vport-update */
500 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
501 {
502         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
503         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
504         struct ecore_sp_vport_update_params params;
505         struct ecore_hwfn *p_hwfn;
506         uint8_t i;
507         int rc = -1;
508
509         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
510         params.vport_id = 0;
511         params.update_vport_active_rx_flg = 1;
512         params.update_vport_active_tx_flg = 1;
513         params.vport_active_rx_flg = flg;
514         params.vport_active_tx_flg = flg;
515         if (!qdev->enable_tx_switching) {
516                 if (IS_VF(edev)) {
517                         params.update_tx_switching_flg = 1;
518                         params.tx_switching_flg = !flg;
519                         DP_INFO(edev, "VF tx-switching is disabled\n");
520                 }
521         }
522         for_each_hwfn(edev, i) {
523                 p_hwfn = &edev->hwfns[i];
524                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
525                 rc = ecore_sp_vport_update(p_hwfn, &params,
526                                 ECORE_SPQ_MODE_EBLOCK, NULL);
527                 if (rc != ECORE_SUCCESS) {
528                         DP_ERR(edev, "Failed to update vport\n");
529                         break;
530                 }
531         }
532         DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
533
534         return rc;
535 }
536
537 static void
538 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
539                            uint16_t mtu, bool enable)
540 {
541         /* Enable LRO in split mode */
542         sge_tpa_params->tpa_ipv4_en_flg = enable;
543         sge_tpa_params->tpa_ipv6_en_flg = enable;
544         sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
545         sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
546         /* set if tpa enable changes */
547         sge_tpa_params->update_tpa_en_flg = 1;
548         /* set if tpa parameters should be handled */
549         sge_tpa_params->update_tpa_param_flg = enable;
550
551         sge_tpa_params->max_buffers_per_cqe = 20;
552         /* Enable TPA in split mode. In this mode each TPA segment
553          * starts on the new BD, so there is one BD per segment.
554          */
555         sge_tpa_params->tpa_pkt_split_flg = 1;
556         sge_tpa_params->tpa_hdr_data_split_flg = 0;
557         sge_tpa_params->tpa_gro_consistent_flg = 0;
558         sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
559         sge_tpa_params->tpa_max_size = 0x7FFF;
560         sge_tpa_params->tpa_min_size_to_start = mtu / 2;
561         sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
562 }
563
564 /* Enable/disable LRO via vport-update */
565 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
566 {
567         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
568         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
569         struct ecore_sp_vport_update_params params;
570         struct ecore_sge_tpa_params tpa_params;
571         struct ecore_hwfn *p_hwfn;
572         int rc;
573         int i;
574
575         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
576         memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
577         qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
578         params.vport_id = 0;
579         params.sge_tpa_params = &tpa_params;
580         for_each_hwfn(edev, i) {
581                 p_hwfn = &edev->hwfns[i];
582                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
583                 rc = ecore_sp_vport_update(p_hwfn, &params,
584                                 ECORE_SPQ_MODE_EBLOCK, NULL);
585                 if (rc != ECORE_SUCCESS) {
586                         DP_ERR(edev, "Failed to update LRO\n");
587                         return -1;
588                 }
589         }
590         qdev->enable_lro = flg;
591         DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
592
593         return 0;
594 }
595
596 /* Update MTU via vport-update without doing port restart.
597  * The vport must be deactivated before calling this API.
598  */
599 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
600 {
601         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
602         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
603         struct ecore_sp_vport_update_params params;
604         struct ecore_hwfn *p_hwfn;
605         int rc;
606         int i;
607
608         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
609         params.vport_id = 0;
610         params.mtu = mtu;
611         params.vport_id = 0;
612         for_each_hwfn(edev, i) {
613                 p_hwfn = &edev->hwfns[i];
614                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
615                 rc = ecore_sp_vport_update(p_hwfn, &params,
616                                 ECORE_SPQ_MODE_EBLOCK, NULL);
617                 if (rc != ECORE_SUCCESS) {
618                         DP_ERR(edev, "Failed to update MTU\n");
619                         return -1;
620                 }
621         }
622         DP_INFO(edev, "MTU updated to %u\n", mtu);
623
624         return 0;
625 }
626
627 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
628 {
629         memset(ucast, 0, sizeof(struct ecore_filter_ucast));
630         ucast->is_rx_filter = true;
631         ucast->is_tx_filter = true;
632         /* ucast->assert_on_error = true; - For debug */
633 }
634
635 static int
636 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
637                              enum qed_filter_rx_mode_type type)
638 {
639         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
640         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
641         struct ecore_filter_accept_flags flags;
642
643         memset(&flags, 0, sizeof(flags));
644
645         flags.update_rx_mode_config = 1;
646         flags.update_tx_mode_config = 1;
647         flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
648                 ECORE_ACCEPT_MCAST_MATCHED |
649                 ECORE_ACCEPT_BCAST;
650
651         flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
652                 ECORE_ACCEPT_MCAST_MATCHED |
653                 ECORE_ACCEPT_BCAST;
654
655         if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
656                 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
657                 if (IS_VF(edev)) {
658                         flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
659                         DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
660                 }
661         } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
662                 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
663         } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
664                                 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
665                 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
666                         ECORE_ACCEPT_MCAST_UNMATCHED;
667         }
668
669         return ecore_filter_accept_cmd(edev, 0, flags, false, false,
670                         ECORE_SPQ_MODE_CB, NULL);
671 }
672
673 static int
674 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
675                   bool enable, bool mask)
676 {
677         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
678         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
679         enum _ecore_status_t rc = ECORE_INVAL;
680         struct ecore_ptt *p_ptt;
681         struct ecore_tunnel_info tunn;
682         struct ecore_hwfn *p_hwfn;
683         int i;
684
685         memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
686         tunn.vxlan.b_update_mode = enable;
687         tunn.vxlan.b_mode_enabled = mask;
688         tunn.b_update_rx_cls = true;
689         tunn.b_update_tx_cls = true;
690         tunn.vxlan.tun_cls = clss;
691
692         for_each_hwfn(edev, i) {
693                 p_hwfn = &edev->hwfns[i];
694                 if (IS_PF(edev)) {
695                         p_ptt = ecore_ptt_acquire(p_hwfn);
696                         if (!p_ptt)
697                                 return -EAGAIN;
698                 } else {
699                         p_ptt = NULL;
700                 }
701                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
702                                 &tunn, ECORE_SPQ_MODE_CB, NULL);
703                 if (rc != ECORE_SUCCESS) {
704                         DP_ERR(edev, "Failed to update tunn_clss %u\n",
705                                         tunn.vxlan.tun_cls);
706                         if (IS_PF(edev))
707                                 ecore_ptt_release(p_hwfn, p_ptt);
708                         break;
709                 }
710         }
711
712         if (rc == ECORE_SUCCESS) {
713                 qdev->vxlan.enable = enable;
714                 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
715                 DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled");
716         }
717
718         return rc;
719 }
720
721 static int
722 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
723                   bool add)
724 {
725         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
726         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
727         struct qede_ucast_entry *tmp = NULL;
728         struct qede_ucast_entry *u;
729         struct ether_addr *mac_addr;
730
731         mac_addr  = (struct ether_addr *)ucast->mac;
732         if (add) {
733                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
734                         if ((memcmp(mac_addr, &tmp->mac,
735                                     ETHER_ADDR_LEN) == 0) &&
736                              ucast->vni == tmp->vni &&
737                              ucast->vlan == tmp->vlan) {
738                                 DP_ERR(edev, "Unicast MAC is already added"
739                                        " with vlan = %u, vni = %u\n",
740                                        ucast->vlan,  ucast->vni);
741                                         return -EEXIST;
742                         }
743                 }
744                 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
745                                RTE_CACHE_LINE_SIZE);
746                 if (!u) {
747                         DP_ERR(edev, "Did not allocate memory for ucast\n");
748                         return -ENOMEM;
749                 }
750                 ether_addr_copy(mac_addr, &u->mac);
751                 u->vlan = ucast->vlan;
752                 u->vni = ucast->vni;
753                 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
754                 qdev->num_uc_addr++;
755         } else {
756                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
757                         if ((memcmp(mac_addr, &tmp->mac,
758                                     ETHER_ADDR_LEN) == 0) &&
759                             ucast->vlan == tmp->vlan      &&
760                             ucast->vni == tmp->vni)
761                         break;
762                 }
763                 if (tmp == NULL) {
764                         DP_INFO(edev, "Unicast MAC is not found\n");
765                         return -EINVAL;
766                 }
767                 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
768                 qdev->num_uc_addr--;
769         }
770
771         return 0;
772 }
773
774 static int
775 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
776                   bool add)
777 {
778         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
779         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
780         struct ether_addr *mac_addr;
781         struct qede_mcast_entry *tmp = NULL;
782         struct qede_mcast_entry *m;
783
784         mac_addr  = (struct ether_addr *)mcast->mac;
785         if (add) {
786                 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
787                         if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
788                                 DP_ERR(edev,
789                                         "Multicast MAC is already added\n");
790                                 return -EEXIST;
791                         }
792                 }
793                 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
794                         RTE_CACHE_LINE_SIZE);
795                 if (!m) {
796                         DP_ERR(edev,
797                                 "Did not allocate memory for mcast\n");
798                         return -ENOMEM;
799                 }
800                 ether_addr_copy(mac_addr, &m->mac);
801                 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
802                 qdev->num_mc_addr++;
803         } else {
804                 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
805                         if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
806                                 break;
807                 }
808                 if (tmp == NULL) {
809                         DP_INFO(edev, "Multicast mac is not found\n");
810                         return -EINVAL;
811                 }
812                 SLIST_REMOVE(&qdev->mc_list_head, tmp,
813                              qede_mcast_entry, list);
814                 qdev->num_mc_addr--;
815         }
816
817         return 0;
818 }
819
820 static enum _ecore_status_t
821 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
822                  bool add)
823 {
824         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
825         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
826         enum _ecore_status_t rc;
827         struct ecore_filter_mcast mcast;
828         struct qede_mcast_entry *tmp;
829         uint16_t j = 0;
830
831         /* Multicast */
832         if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
833                 if (add) {
834                         if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
835                                 DP_ERR(edev,
836                                        "Mcast filter table limit exceeded, "
837                                        "Please enable mcast promisc mode\n");
838                                 return -ECORE_INVAL;
839                         }
840                 }
841                 rc = qede_mcast_filter(eth_dev, ucast, add);
842                 if (rc == 0) {
843                         DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
844                         memset(&mcast, 0, sizeof(mcast));
845                         mcast.num_mc_addrs = qdev->num_mc_addr;
846                         mcast.opcode = ECORE_FILTER_ADD;
847                         SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
848                                 ether_addr_copy(&tmp->mac,
849                                         (struct ether_addr *)&mcast.mac[j]);
850                                 j++;
851                         }
852                         rc = ecore_filter_mcast_cmd(edev, &mcast,
853                                                     ECORE_SPQ_MODE_CB, NULL);
854                 }
855                 if (rc != ECORE_SUCCESS) {
856                         DP_ERR(edev, "Failed to add multicast filter"
857                                " rc = %d, op = %d\n", rc, add);
858                 }
859         } else { /* Unicast */
860                 if (add) {
861                         if (qdev->num_uc_addr >=
862                             qdev->dev_info.num_mac_filters) {
863                                 DP_ERR(edev,
864                                        "Ucast filter table limit exceeded,"
865                                        " Please enable promisc mode\n");
866                                 return -ECORE_INVAL;
867                         }
868                 }
869                 rc = qede_ucast_filter(eth_dev, ucast, add);
870                 if (rc == 0)
871                         rc = ecore_filter_ucast_cmd(edev, ucast,
872                                                     ECORE_SPQ_MODE_CB, NULL);
873                 if (rc != ECORE_SUCCESS) {
874                         DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
875                                rc, add);
876                 }
877         }
878
879         return rc;
880 }
881
882 static int
883 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
884                   __rte_unused uint32_t index, __rte_unused uint32_t pool)
885 {
886         struct ecore_filter_ucast ucast;
887         int re;
888
889         qede_set_ucast_cmn_params(&ucast);
890         ucast.type = ECORE_FILTER_MAC;
891         ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
892         re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
893         return re;
894 }
895
896 static void
897 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
898 {
899         struct qede_dev *qdev = eth_dev->data->dev_private;
900         struct ecore_dev *edev = &qdev->edev;
901         struct ecore_filter_ucast ucast;
902
903         PMD_INIT_FUNC_TRACE(edev);
904
905         if (index >= qdev->dev_info.num_mac_filters) {
906                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
907                        index, qdev->dev_info.num_mac_filters);
908                 return;
909         }
910
911         qede_set_ucast_cmn_params(&ucast);
912         ucast.opcode = ECORE_FILTER_REMOVE;
913         ucast.type = ECORE_FILTER_MAC;
914
915         /* Use the index maintained by rte */
916         ether_addr_copy(&eth_dev->data->mac_addrs[index],
917                         (struct ether_addr *)&ucast.mac);
918
919         ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
920 }
921
922 static void
923 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
924 {
925         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
926         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
927
928         if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
929                                                mac_addr->addr_bytes)) {
930                 DP_ERR(edev, "Setting MAC address is not allowed\n");
931                 ether_addr_copy(&qdev->primary_mac,
932                                 &eth_dev->data->mac_addrs[0]);
933                 return;
934         }
935
936         qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
937 }
938
939 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
940 {
941         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
942         struct ecore_sp_vport_update_params params;
943         struct ecore_hwfn *p_hwfn;
944         uint8_t i;
945         int rc;
946
947         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
948         params.vport_id = 0;
949         params.update_accept_any_vlan_flg = 1;
950         params.accept_any_vlan = flg;
951         for_each_hwfn(edev, i) {
952                 p_hwfn = &edev->hwfns[i];
953                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
954                 rc = ecore_sp_vport_update(p_hwfn, &params,
955                                 ECORE_SPQ_MODE_EBLOCK, NULL);
956                 if (rc != ECORE_SUCCESS) {
957                         DP_ERR(edev, "Failed to configure accept-any-vlan\n");
958                         return;
959                 }
960         }
961
962         DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
963 }
964
965 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
966 {
967         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
968         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
969         struct ecore_sp_vport_update_params params;
970         struct ecore_hwfn *p_hwfn;
971         uint8_t i;
972         int rc;
973
974         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
975         params.vport_id = 0;
976         params.update_inner_vlan_removal_flg = 1;
977         params.inner_vlan_removal_flg = flg;
978         for_each_hwfn(edev, i) {
979                 p_hwfn = &edev->hwfns[i];
980                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
981                 rc = ecore_sp_vport_update(p_hwfn, &params,
982                                 ECORE_SPQ_MODE_EBLOCK, NULL);
983                 if (rc != ECORE_SUCCESS) {
984                         DP_ERR(edev, "Failed to update vport\n");
985                         return -1;
986                 }
987         }
988
989         DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
990         return 0;
991 }
992
993 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
994                                 uint16_t vlan_id, int on)
995 {
996         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
997         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
998         struct qed_dev_eth_info *dev_info = &qdev->dev_info;
999         struct qede_vlan_entry *tmp = NULL;
1000         struct qede_vlan_entry *vlan;
1001         struct ecore_filter_ucast ucast;
1002         int rc;
1003
1004         if (on) {
1005                 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
1006                         DP_ERR(edev, "Reached max VLAN filter limit"
1007                                       " enabling accept_any_vlan\n");
1008                         qede_config_accept_any_vlan(qdev, true);
1009                         return 0;
1010                 }
1011
1012                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1013                         if (tmp->vid == vlan_id) {
1014                                 DP_ERR(edev, "VLAN %u already configured\n",
1015                                        vlan_id);
1016                                 return -EEXIST;
1017                         }
1018                 }
1019
1020                 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
1021                                   RTE_CACHE_LINE_SIZE);
1022
1023                 if (!vlan) {
1024                         DP_ERR(edev, "Did not allocate memory for VLAN\n");
1025                         return -ENOMEM;
1026                 }
1027
1028                 qede_set_ucast_cmn_params(&ucast);
1029                 ucast.opcode = ECORE_FILTER_ADD;
1030                 ucast.type = ECORE_FILTER_VLAN;
1031                 ucast.vlan = vlan_id;
1032                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1033                                             NULL);
1034                 if (rc != 0) {
1035                         DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
1036                                rc);
1037                         rte_free(vlan);
1038                 } else {
1039                         vlan->vid = vlan_id;
1040                         SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
1041                         qdev->configured_vlans++;
1042                         DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
1043                                 vlan_id, qdev->configured_vlans);
1044                 }
1045         } else {
1046                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1047                         if (tmp->vid == vlan_id)
1048                                 break;
1049                 }
1050
1051                 if (!tmp) {
1052                         if (qdev->configured_vlans == 0) {
1053                                 DP_INFO(edev,
1054                                         "No VLAN filters configured yet\n");
1055                                 return 0;
1056                         }
1057
1058                         DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
1059                         return -EINVAL;
1060                 }
1061
1062                 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
1063
1064                 qede_set_ucast_cmn_params(&ucast);
1065                 ucast.opcode = ECORE_FILTER_REMOVE;
1066                 ucast.type = ECORE_FILTER_VLAN;
1067                 ucast.vlan = vlan_id;
1068                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1069                                             NULL);
1070                 if (rc != 0) {
1071                         DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
1072                                vlan_id, rc);
1073                 } else {
1074                         qdev->configured_vlans--;
1075                         DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
1076                                 vlan_id, qdev->configured_vlans);
1077                 }
1078         }
1079
1080         return rc;
1081 }
1082
1083 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1084 {
1085         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1086         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1087         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1088
1089         if (mask & ETH_VLAN_STRIP_MASK) {
1090                 if (rxmode->hw_vlan_strip)
1091                         (void)qede_vlan_stripping(eth_dev, 1);
1092                 else
1093                         (void)qede_vlan_stripping(eth_dev, 0);
1094         }
1095
1096         if (mask & ETH_VLAN_FILTER_MASK) {
1097                 /* VLAN filtering kicks in when a VLAN is added */
1098                 if (rxmode->hw_vlan_filter) {
1099                         qede_vlan_filter_set(eth_dev, 0, 1);
1100                 } else {
1101                         if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
1102                                 DP_ERR(edev,
1103                                   " Please remove existing VLAN filters"
1104                                   " before disabling VLAN filtering\n");
1105                                 /* Signal app that VLAN filtering is still
1106                                  * enabled
1107                                  */
1108                                 rxmode->hw_vlan_filter = true;
1109                         } else {
1110                                 qede_vlan_filter_set(eth_dev, 0, 0);
1111                         }
1112                 }
1113         }
1114
1115         if (mask & ETH_VLAN_EXTEND_MASK)
1116                 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
1117                         " and classification is based on outer tag only\n");
1118
1119         DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
1120                 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
1121
1122         return 0;
1123 }
1124
1125 static void qede_prandom_bytes(uint32_t *buff)
1126 {
1127         uint8_t i;
1128
1129         srand((unsigned int)time(NULL));
1130         for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1131                 buff[i] = rand();
1132 }
1133
1134 int qede_config_rss(struct rte_eth_dev *eth_dev)
1135 {
1136         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1137 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
1138         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1139 #endif
1140         uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1141         struct rte_eth_rss_reta_entry64 reta_conf[2];
1142         struct rte_eth_rss_conf rss_conf;
1143         uint32_t i, id, pos, q;
1144
1145         rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1146         if (!rss_conf.rss_key) {
1147                 DP_INFO(edev, "Applying driver default key\n");
1148                 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1149                 qede_prandom_bytes(&def_rss_key[0]);
1150                 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1151         }
1152
1153         /* Configure RSS hash */
1154         if (qede_rss_hash_update(eth_dev, &rss_conf))
1155                 return -EINVAL;
1156
1157         /* Configure default RETA */
1158         memset(reta_conf, 0, sizeof(reta_conf));
1159         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1160                 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1161
1162         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1163                 id = i / RTE_RETA_GROUP_SIZE;
1164                 pos = i % RTE_RETA_GROUP_SIZE;
1165                 q = i % QEDE_RSS_COUNT(qdev);
1166                 reta_conf[id].reta[pos] = q;
1167         }
1168         if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1169                                  ECORE_RSS_IND_TABLE_SIZE))
1170                 return -EINVAL;
1171
1172         return 0;
1173 }
1174
1175 static void qede_fastpath_start(struct ecore_dev *edev)
1176 {
1177         struct ecore_hwfn *p_hwfn;
1178         int i;
1179
1180         for_each_hwfn(edev, i) {
1181                 p_hwfn = &edev->hwfns[i];
1182                 ecore_hw_start_fastpath(p_hwfn);
1183         }
1184 }
1185
1186 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1187 {
1188         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1189         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1190         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1191
1192         PMD_INIT_FUNC_TRACE(edev);
1193
1194         /* Update MTU only if it has changed */
1195         if (qdev->mtu != qdev->new_mtu) {
1196                 if (qede_update_mtu(eth_dev, qdev->new_mtu))
1197                         goto err;
1198                 qdev->mtu = qdev->new_mtu;
1199         }
1200
1201         /* Configure TPA parameters */
1202         if (rxmode->enable_lro) {
1203                 if (qede_enable_tpa(eth_dev, true))
1204                         return -EINVAL;
1205                 /* Enable scatter mode for LRO */
1206                 if (!rxmode->enable_scatter)
1207                         eth_dev->data->scattered_rx = 1;
1208         }
1209
1210         /* Start queues */
1211         if (qede_start_queues(eth_dev))
1212                 goto err;
1213
1214         /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1215          * enabling RSS. Hence RSS configuration is deferred upto this point.
1216          * Also, we would like to retain similar behavior in PF case, so we
1217          * don't do PF/VF specific check here.
1218          */
1219         if (rxmode->mq_mode == ETH_MQ_RX_RSS)
1220                 if (qede_config_rss(eth_dev))
1221                         goto err;
1222
1223         /* Enable vport*/
1224         if (qede_activate_vport(eth_dev, true))
1225                 goto err;
1226
1227         /* Bring-up the link */
1228         qede_dev_set_link_state(eth_dev, true);
1229
1230         /* Update link status */
1231         qede_link_update(eth_dev, 0);
1232
1233         /* Start/resume traffic */
1234         qede_fastpath_start(edev);
1235
1236         DP_INFO(edev, "Device started\n");
1237
1238         return 0;
1239 err:
1240         DP_ERR(edev, "Device start fails\n");
1241         return -1; /* common error code is < 0 */
1242 }
1243
1244 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1245 {
1246         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1247         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1248
1249         PMD_INIT_FUNC_TRACE(edev);
1250
1251         /* Disable vport */
1252         if (qede_activate_vport(eth_dev, false))
1253                 return;
1254
1255         if (qdev->enable_lro)
1256                 qede_enable_tpa(eth_dev, false);
1257
1258         /* Stop queues */
1259         qede_stop_queues(eth_dev);
1260
1261         /* Disable traffic */
1262         ecore_hw_stop_fastpath(edev); /* TBD - loop */
1263
1264         /* Bring the link down */
1265         qede_dev_set_link_state(eth_dev, false);
1266
1267         DP_INFO(edev, "Device is stopped\n");
1268 }
1269
1270 #define QEDE_TX_SWITCHING               "vf_txswitch"
1271
1272 const char *valid_args[] = {
1273         QEDE_TX_SWITCHING,
1274         NULL,
1275 };
1276
1277 static int qede_args_check(const char *key, const char *val, void *opaque)
1278 {
1279         unsigned long tmp;
1280         int ret = 0;
1281         struct rte_eth_dev *eth_dev = opaque;
1282         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1283 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
1284         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1285 #endif
1286
1287         errno = 0;
1288         tmp = strtoul(val, NULL, 0);
1289         if (errno) {
1290                 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1291                 return errno;
1292         }
1293
1294         if (strcmp(QEDE_TX_SWITCHING, key) == 0)
1295                 qdev->enable_tx_switching = !!tmp;
1296
1297         return ret;
1298 }
1299
1300 static int qede_args(struct rte_eth_dev *eth_dev)
1301 {
1302         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1303         struct rte_kvargs *kvlist;
1304         struct rte_devargs *devargs;
1305         int ret;
1306         int i;
1307
1308         devargs = pci_dev->device.devargs;
1309         if (!devargs)
1310                 return 0; /* return success */
1311
1312         kvlist = rte_kvargs_parse(devargs->args, valid_args);
1313         if (kvlist == NULL)
1314                 return -EINVAL;
1315
1316          /* Process parameters. */
1317         for (i = 0; (valid_args[i] != NULL); ++i) {
1318                 if (rte_kvargs_count(kvlist, valid_args[i])) {
1319                         ret = rte_kvargs_process(kvlist, valid_args[i],
1320                                                  qede_args_check, eth_dev);
1321                         if (ret != ECORE_SUCCESS) {
1322                                 rte_kvargs_free(kvlist);
1323                                 return ret;
1324                         }
1325                 }
1326         }
1327         rte_kvargs_free(kvlist);
1328
1329         return 0;
1330 }
1331
1332 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1333 {
1334         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1335         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1336         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1337         int ret;
1338
1339         PMD_INIT_FUNC_TRACE(edev);
1340
1341         /* Check requirements for 100G mode */
1342         if (ECORE_IS_CMT(edev)) {
1343                 if (eth_dev->data->nb_rx_queues < 2 ||
1344                                 eth_dev->data->nb_tx_queues < 2) {
1345                         DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1346                         return -EINVAL;
1347                 }
1348
1349                 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1350                                 (eth_dev->data->nb_tx_queues % 2 != 0)) {
1351                         DP_ERR(edev,
1352                                         "100G mode needs even no. of RX/TX queues\n");
1353                         return -EINVAL;
1354                 }
1355         }
1356
1357         /* We need to have min 1 RX queue.There is no min check in
1358          * rte_eth_dev_configure(), so we are checking it here.
1359          */
1360         if (eth_dev->data->nb_rx_queues == 0) {
1361                 DP_ERR(edev, "Minimum one RX queue is required\n");
1362                 return -EINVAL;
1363         }
1364
1365         /* Enable Tx switching by default */
1366         qdev->enable_tx_switching = 1;
1367
1368         /* Parse devargs and fix up rxmode */
1369         if (qede_args(eth_dev))
1370                 return -ENOTSUP;
1371
1372         /* Sanity checks and throw warnings */
1373         if (rxmode->enable_scatter)
1374                 eth_dev->data->scattered_rx = 1;
1375
1376         if (!rxmode->hw_strip_crc)
1377                 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
1378
1379         if (!rxmode->hw_ip_checksum)
1380                 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
1381                                 "in hw\n");
1382         if (rxmode->header_split)
1383                 DP_INFO(edev, "Header split enable is not supported\n");
1384         if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
1385                                 ETH_MQ_RX_RSS)) {
1386                 DP_ERR(edev, "Unsupported multi-queue mode\n");
1387                 return -ENOTSUP;
1388         }
1389         /* Flow director mode check */
1390         if (qede_check_fdir_support(eth_dev))
1391                 return -ENOTSUP;
1392
1393         /* Deallocate resources if held previously. It is needed only if the
1394          * queue count has been changed from previous configuration. If its
1395          * going to change then it means RX/TX queue setup will be called
1396          * again and the fastpath pointers will be reinitialized there.
1397          */
1398         if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
1399             qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
1400                 qede_dealloc_fp_resc(eth_dev);
1401                 /* Proceed with updated queue count */
1402                 qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1403                 qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1404                 if (qede_alloc_fp_resc(qdev))
1405                         return -ENOMEM;
1406         }
1407
1408         /* If jumbo enabled adjust MTU */
1409         if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
1410                 eth_dev->data->mtu =
1411                                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1412                                 ETHER_HDR_LEN - ETHER_CRC_LEN;
1413
1414         /* VF's MTU has to be set using vport-start where as
1415          * PF's MTU can be updated via vport-update.
1416          */
1417         if (IS_VF(edev)) {
1418                 if (qede_start_vport(qdev, eth_dev->data->mtu))
1419                         return -1;
1420         } else {
1421                 if (qede_update_mtu(eth_dev, eth_dev->data->mtu))
1422                         return -1;
1423         }
1424
1425         qdev->mtu = eth_dev->data->mtu;
1426         qdev->new_mtu = qdev->mtu;
1427
1428         /* Enable VLAN offloads by default */
1429         ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
1430                         ETH_VLAN_FILTER_MASK |
1431                         ETH_VLAN_EXTEND_MASK);
1432         if (ret)
1433                 return ret;
1434
1435         DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1436                         QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1437
1438         return 0;
1439 }
1440
1441 /* Info about HW descriptor ring limitations */
1442 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1443         .nb_max = 0x8000, /* 32K */
1444         .nb_min = 128,
1445         .nb_align = 128 /* lowest common multiple */
1446 };
1447
1448 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1449         .nb_max = 0x8000, /* 32K */
1450         .nb_min = 256,
1451         .nb_align = 256,
1452         .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1453         .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1454 };
1455
1456 static void
1457 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1458                   struct rte_eth_dev_info *dev_info)
1459 {
1460         struct qede_dev *qdev = eth_dev->data->dev_private;
1461         struct ecore_dev *edev = &qdev->edev;
1462         struct qed_link_output link;
1463         uint32_t speed_cap = 0;
1464
1465         PMD_INIT_FUNC_TRACE(edev);
1466
1467         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1468         dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1469         dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1470         dev_info->rx_desc_lim = qede_rx_desc_lim;
1471         dev_info->tx_desc_lim = qede_tx_desc_lim;
1472
1473         if (IS_PF(edev))
1474                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1475                         QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1476         else
1477                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1478                         QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1479         dev_info->max_tx_queues = dev_info->max_rx_queues;
1480
1481         dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1482         dev_info->max_vfs = 0;
1483         dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1484         dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1485         dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1486
1487         dev_info->default_txconf = (struct rte_eth_txconf) {
1488                 .txq_flags = QEDE_TXQ_FLAGS,
1489         };
1490
1491         dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP  |
1492                                      DEV_RX_OFFLOAD_IPV4_CKSUM  |
1493                                      DEV_RX_OFFLOAD_UDP_CKSUM   |
1494                                      DEV_RX_OFFLOAD_TCP_CKSUM   |
1495                                      DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1496                                      DEV_RX_OFFLOAD_TCP_LRO);
1497
1498         dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1499                                      DEV_TX_OFFLOAD_IPV4_CKSUM  |
1500                                      DEV_TX_OFFLOAD_UDP_CKSUM   |
1501                                      DEV_TX_OFFLOAD_TCP_CKSUM   |
1502                                      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1503                                      DEV_TX_OFFLOAD_TCP_TSO |
1504                                      DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
1505
1506         memset(&link, 0, sizeof(struct qed_link_output));
1507         qdev->ops->common->get_link(edev, &link);
1508         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1509                 speed_cap |= ETH_LINK_SPEED_1G;
1510         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1511                 speed_cap |= ETH_LINK_SPEED_10G;
1512         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1513                 speed_cap |= ETH_LINK_SPEED_25G;
1514         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1515                 speed_cap |= ETH_LINK_SPEED_40G;
1516         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1517                 speed_cap |= ETH_LINK_SPEED_50G;
1518         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1519                 speed_cap |= ETH_LINK_SPEED_100G;
1520         dev_info->speed_capa = speed_cap;
1521 }
1522
1523 /* return 0 means link status changed, -1 means not changed */
1524 int
1525 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1526 {
1527         struct qede_dev *qdev = eth_dev->data->dev_private;
1528         struct ecore_dev *edev = &qdev->edev;
1529         uint16_t link_duplex;
1530         struct qed_link_output link;
1531         struct rte_eth_link *curr = &eth_dev->data->dev_link;
1532
1533         memset(&link, 0, sizeof(struct qed_link_output));
1534         qdev->ops->common->get_link(edev, &link);
1535
1536         /* Link Speed */
1537         curr->link_speed = link.speed;
1538
1539         /* Link Mode */
1540         switch (link.duplex) {
1541         case QEDE_DUPLEX_HALF:
1542                 link_duplex = ETH_LINK_HALF_DUPLEX;
1543                 break;
1544         case QEDE_DUPLEX_FULL:
1545                 link_duplex = ETH_LINK_FULL_DUPLEX;
1546                 break;
1547         case QEDE_DUPLEX_UNKNOWN:
1548         default:
1549                 link_duplex = -1;
1550         }
1551         curr->link_duplex = link_duplex;
1552
1553         /* Link Status */
1554         curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1555
1556         /* AN */
1557         curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1558                              ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1559
1560         DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1561                 curr->link_speed, curr->link_duplex,
1562                 curr->link_autoneg, curr->link_status);
1563
1564         /* return 0 means link status changed, -1 means not changed */
1565         return ((curr->link_status == link.link_up) ? -1 : 0);
1566 }
1567
1568 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1569 {
1570 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1571         struct qede_dev *qdev = eth_dev->data->dev_private;
1572         struct ecore_dev *edev = &qdev->edev;
1573
1574         PMD_INIT_FUNC_TRACE(edev);
1575 #endif
1576
1577         enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1578
1579         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1580                 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1581
1582         qed_configure_filter_rx_mode(eth_dev, type);
1583 }
1584
1585 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1586 {
1587 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1588         struct qede_dev *qdev = eth_dev->data->dev_private;
1589         struct ecore_dev *edev = &qdev->edev;
1590
1591         PMD_INIT_FUNC_TRACE(edev);
1592 #endif
1593
1594         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1595                 qed_configure_filter_rx_mode(eth_dev,
1596                                 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1597         else
1598                 qed_configure_filter_rx_mode(eth_dev,
1599                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1600 }
1601
1602 static void qede_poll_sp_sb_cb(void *param)
1603 {
1604         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1605         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1606         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1607         int rc;
1608
1609         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1610         qede_interrupt_action(&edev->hwfns[1]);
1611
1612         rc = rte_eal_alarm_set(timer_period * US_PER_S,
1613                                qede_poll_sp_sb_cb,
1614                                (void *)eth_dev);
1615         if (rc != 0) {
1616                 DP_ERR(edev, "Unable to start periodic"
1617                              " timer rc %d\n", rc);
1618                 assert(false && "Unable to start periodic timer");
1619         }
1620 }
1621
1622 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1623 {
1624         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1625         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1626         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1627
1628         PMD_INIT_FUNC_TRACE(edev);
1629
1630         /* dev_stop() shall cleanup fp resources in hw but without releasing
1631          * dma memories and sw structures so that dev_start() can be called
1632          * by the app without reconfiguration. However, in dev_close() we
1633          * can release all the resources and device can be brought up newly
1634          */
1635         if (eth_dev->data->dev_started)
1636                 qede_dev_stop(eth_dev);
1637
1638         qede_stop_vport(edev);
1639         qede_fdir_dealloc_resc(eth_dev);
1640         qede_dealloc_fp_resc(eth_dev);
1641
1642         eth_dev->data->nb_rx_queues = 0;
1643         eth_dev->data->nb_tx_queues = 0;
1644
1645         qdev->ops->common->slowpath_stop(edev);
1646         qdev->ops->common->remove(edev);
1647         rte_intr_disable(&pci_dev->intr_handle);
1648         rte_intr_callback_unregister(&pci_dev->intr_handle,
1649                                      qede_interrupt_handler, (void *)eth_dev);
1650         if (ECORE_IS_CMT(edev))
1651                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1652 }
1653
1654 static int
1655 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1656 {
1657         struct qede_dev *qdev = eth_dev->data->dev_private;
1658         struct ecore_dev *edev = &qdev->edev;
1659         struct ecore_eth_stats stats;
1660         unsigned int i = 0, j = 0, qid;
1661         unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1662         struct qede_tx_queue *txq;
1663
1664         ecore_get_vport_stats(edev, &stats);
1665
1666         /* RX Stats */
1667         eth_stats->ipackets = stats.common.rx_ucast_pkts +
1668             stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1669
1670         eth_stats->ibytes = stats.common.rx_ucast_bytes +
1671             stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1672
1673         eth_stats->ierrors = stats.common.rx_crc_errors +
1674             stats.common.rx_align_errors +
1675             stats.common.rx_carrier_errors +
1676             stats.common.rx_oversize_packets +
1677             stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1678
1679         eth_stats->rx_nombuf = stats.common.no_buff_discards;
1680
1681         eth_stats->imissed = stats.common.mftag_filter_discards +
1682             stats.common.mac_filter_discards +
1683             stats.common.no_buff_discards +
1684             stats.common.brb_truncates + stats.common.brb_discards;
1685
1686         /* TX stats */
1687         eth_stats->opackets = stats.common.tx_ucast_pkts +
1688             stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1689
1690         eth_stats->obytes = stats.common.tx_ucast_bytes +
1691             stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1692
1693         eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1694
1695         /* Queue stats */
1696         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1697                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1698         txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1699                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1700         if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1701             (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1702                 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1703                        "Not all the queue stats will be displayed. Set"
1704                        " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1705                        " appropriately and retry.\n");
1706
1707         for_each_rss(qid) {
1708                 eth_stats->q_ipackets[i] =
1709                         *(uint64_t *)(
1710                                 ((char *)(qdev->fp_array[qid].rxq)) +
1711                                 offsetof(struct qede_rx_queue,
1712                                 rcv_pkts));
1713                 eth_stats->q_errors[i] =
1714                         *(uint64_t *)(
1715                                 ((char *)(qdev->fp_array[qid].rxq)) +
1716                                 offsetof(struct qede_rx_queue,
1717                                 rx_hw_errors)) +
1718                         *(uint64_t *)(
1719                                 ((char *)(qdev->fp_array[qid].rxq)) +
1720                                 offsetof(struct qede_rx_queue,
1721                                 rx_alloc_errors));
1722                 i++;
1723                 if (i == rxq_stat_cntrs)
1724                         break;
1725         }
1726
1727         for_each_tss(qid) {
1728                 txq = qdev->fp_array[qid].txq;
1729                 eth_stats->q_opackets[j] =
1730                         *((uint64_t *)(uintptr_t)
1731                                 (((uint64_t)(uintptr_t)(txq)) +
1732                                  offsetof(struct qede_tx_queue,
1733                                           xmit_pkts)));
1734                 j++;
1735                 if (j == txq_stat_cntrs)
1736                         break;
1737         }
1738
1739         return 0;
1740 }
1741
1742 static unsigned
1743 qede_get_xstats_count(struct qede_dev *qdev) {
1744         if (ECORE_IS_BB(&qdev->edev))
1745                 return RTE_DIM(qede_xstats_strings) +
1746                        RTE_DIM(qede_bb_xstats_strings) +
1747                        (RTE_DIM(qede_rxq_xstats_strings) *
1748                         RTE_MIN(QEDE_RSS_COUNT(qdev),
1749                                 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1750         else
1751                 return RTE_DIM(qede_xstats_strings) +
1752                        RTE_DIM(qede_ah_xstats_strings) +
1753                        (RTE_DIM(qede_rxq_xstats_strings) *
1754                         RTE_MIN(QEDE_RSS_COUNT(qdev),
1755                                 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1756 }
1757
1758 static int
1759 qede_get_xstats_names(struct rte_eth_dev *dev,
1760                       struct rte_eth_xstat_name *xstats_names,
1761                       __rte_unused unsigned int limit)
1762 {
1763         struct qede_dev *qdev = dev->data->dev_private;
1764         struct ecore_dev *edev = &qdev->edev;
1765         const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1766         unsigned int i, qid, stat_idx = 0;
1767         unsigned int rxq_stat_cntrs;
1768
1769         if (xstats_names != NULL) {
1770                 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1771                         snprintf(xstats_names[stat_idx].name,
1772                                 sizeof(xstats_names[stat_idx].name),
1773                                 "%s",
1774                                 qede_xstats_strings[i].name);
1775                         stat_idx++;
1776                 }
1777
1778                 if (ECORE_IS_BB(edev)) {
1779                         for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1780                                 snprintf(xstats_names[stat_idx].name,
1781                                         sizeof(xstats_names[stat_idx].name),
1782                                         "%s",
1783                                         qede_bb_xstats_strings[i].name);
1784                                 stat_idx++;
1785                         }
1786                 } else {
1787                         for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1788                                 snprintf(xstats_names[stat_idx].name,
1789                                         sizeof(xstats_names[stat_idx].name),
1790                                         "%s",
1791                                         qede_ah_xstats_strings[i].name);
1792                                 stat_idx++;
1793                         }
1794                 }
1795
1796                 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1797                                          RTE_ETHDEV_QUEUE_STAT_CNTRS);
1798                 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1799                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1800                                 snprintf(xstats_names[stat_idx].name,
1801                                         sizeof(xstats_names[stat_idx].name),
1802                                         "%.4s%d%s",
1803                                         qede_rxq_xstats_strings[i].name, qid,
1804                                         qede_rxq_xstats_strings[i].name + 4);
1805                                 stat_idx++;
1806                         }
1807                 }
1808         }
1809
1810         return stat_cnt;
1811 }
1812
1813 static int
1814 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1815                 unsigned int n)
1816 {
1817         struct qede_dev *qdev = dev->data->dev_private;
1818         struct ecore_dev *edev = &qdev->edev;
1819         struct ecore_eth_stats stats;
1820         const unsigned int num = qede_get_xstats_count(qdev);
1821         unsigned int i, qid, stat_idx = 0;
1822         unsigned int rxq_stat_cntrs;
1823
1824         if (n < num)
1825                 return num;
1826
1827         ecore_get_vport_stats(edev, &stats);
1828
1829         for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1830                 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1831                                              qede_xstats_strings[i].offset);
1832                 xstats[stat_idx].id = stat_idx;
1833                 stat_idx++;
1834         }
1835
1836         if (ECORE_IS_BB(edev)) {
1837                 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1838                         xstats[stat_idx].value =
1839                                         *(uint64_t *)(((char *)&stats) +
1840                                         qede_bb_xstats_strings[i].offset);
1841                         xstats[stat_idx].id = stat_idx;
1842                         stat_idx++;
1843                 }
1844         } else {
1845                 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1846                         xstats[stat_idx].value =
1847                                         *(uint64_t *)(((char *)&stats) +
1848                                         qede_ah_xstats_strings[i].offset);
1849                         xstats[stat_idx].id = stat_idx;
1850                         stat_idx++;
1851                 }
1852         }
1853
1854         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1855                                  RTE_ETHDEV_QUEUE_STAT_CNTRS);
1856         for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1857                 for_each_rss(qid) {
1858                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1859                                 xstats[stat_idx].value = *(uint64_t *)(
1860                                         ((char *)(qdev->fp_array[qid].rxq)) +
1861                                          qede_rxq_xstats_strings[i].offset);
1862                                 xstats[stat_idx].id = stat_idx;
1863                                 stat_idx++;
1864                         }
1865                 }
1866         }
1867
1868         return stat_idx;
1869 }
1870
1871 static void
1872 qede_reset_xstats(struct rte_eth_dev *dev)
1873 {
1874         struct qede_dev *qdev = dev->data->dev_private;
1875         struct ecore_dev *edev = &qdev->edev;
1876
1877         ecore_reset_vport_stats(edev);
1878         qede_reset_queue_stats(qdev, true);
1879 }
1880
1881 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1882 {
1883         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1884         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1885         struct qed_link_params link_params;
1886         int rc;
1887
1888         DP_INFO(edev, "setting link state %d\n", link_up);
1889         memset(&link_params, 0, sizeof(link_params));
1890         link_params.link_up = link_up;
1891         rc = qdev->ops->common->set_link(edev, &link_params);
1892         if (rc != ECORE_SUCCESS)
1893                 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1894
1895         return rc;
1896 }
1897
1898 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1899 {
1900         return qede_dev_set_link_state(eth_dev, true);
1901 }
1902
1903 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1904 {
1905         return qede_dev_set_link_state(eth_dev, false);
1906 }
1907
1908 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1909 {
1910         struct qede_dev *qdev = eth_dev->data->dev_private;
1911         struct ecore_dev *edev = &qdev->edev;
1912
1913         ecore_reset_vport_stats(edev);
1914         qede_reset_queue_stats(qdev, false);
1915 }
1916
1917 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1918 {
1919         enum qed_filter_rx_mode_type type =
1920             QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1921
1922         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1923                 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1924
1925         qed_configure_filter_rx_mode(eth_dev, type);
1926 }
1927
1928 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1929 {
1930         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1931                 qed_configure_filter_rx_mode(eth_dev,
1932                                 QED_FILTER_RX_MODE_TYPE_PROMISC);
1933         else
1934                 qed_configure_filter_rx_mode(eth_dev,
1935                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1936 }
1937
1938 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1939                               struct rte_eth_fc_conf *fc_conf)
1940 {
1941         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1942         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1943         struct qed_link_output current_link;
1944         struct qed_link_params params;
1945
1946         memset(&current_link, 0, sizeof(current_link));
1947         qdev->ops->common->get_link(edev, &current_link);
1948
1949         memset(&params, 0, sizeof(params));
1950         params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1951         if (fc_conf->autoneg) {
1952                 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1953                         DP_ERR(edev, "Autoneg not supported\n");
1954                         return -EINVAL;
1955                 }
1956                 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1957         }
1958
1959         /* Pause is assumed to be supported (SUPPORTED_Pause) */
1960         if (fc_conf->mode == RTE_FC_FULL)
1961                 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1962                                         QED_LINK_PAUSE_RX_ENABLE);
1963         if (fc_conf->mode == RTE_FC_TX_PAUSE)
1964                 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1965         if (fc_conf->mode == RTE_FC_RX_PAUSE)
1966                 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1967
1968         params.link_up = true;
1969         (void)qdev->ops->common->set_link(edev, &params);
1970
1971         return 0;
1972 }
1973
1974 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1975                               struct rte_eth_fc_conf *fc_conf)
1976 {
1977         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1978         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1979         struct qed_link_output current_link;
1980
1981         memset(&current_link, 0, sizeof(current_link));
1982         qdev->ops->common->get_link(edev, &current_link);
1983
1984         if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1985                 fc_conf->autoneg = true;
1986
1987         if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1988                                          QED_LINK_PAUSE_TX_ENABLE))
1989                 fc_conf->mode = RTE_FC_FULL;
1990         else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1991                 fc_conf->mode = RTE_FC_RX_PAUSE;
1992         else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1993                 fc_conf->mode = RTE_FC_TX_PAUSE;
1994         else
1995                 fc_conf->mode = RTE_FC_NONE;
1996
1997         return 0;
1998 }
1999
2000 static const uint32_t *
2001 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
2002 {
2003         static const uint32_t ptypes[] = {
2004                 RTE_PTYPE_L2_ETHER,
2005                 RTE_PTYPE_L2_ETHER_VLAN,
2006                 RTE_PTYPE_L3_IPV4,
2007                 RTE_PTYPE_L3_IPV6,
2008                 RTE_PTYPE_L4_TCP,
2009                 RTE_PTYPE_L4_UDP,
2010                 RTE_PTYPE_TUNNEL_VXLAN,
2011                 RTE_PTYPE_L4_FRAG,
2012                 /* Inner */
2013                 RTE_PTYPE_INNER_L2_ETHER,
2014                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2015                 RTE_PTYPE_INNER_L3_IPV4,
2016                 RTE_PTYPE_INNER_L3_IPV6,
2017                 RTE_PTYPE_INNER_L4_TCP,
2018                 RTE_PTYPE_INNER_L4_UDP,
2019                 RTE_PTYPE_INNER_L4_FRAG,
2020                 RTE_PTYPE_UNKNOWN
2021         };
2022
2023         if (eth_dev->rx_pkt_burst == qede_recv_pkts)
2024                 return ptypes;
2025
2026         return NULL;
2027 }
2028
2029 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
2030 {
2031         *rss_caps = 0;
2032         *rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
2033         *rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
2034         *rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
2035         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
2036         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
2037         *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
2038         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
2039         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
2040 }
2041
2042 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
2043                          struct rte_eth_rss_conf *rss_conf)
2044 {
2045         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2046         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2047         struct ecore_sp_vport_update_params vport_update_params;
2048         struct ecore_rss_params rss_params;
2049         struct ecore_hwfn *p_hwfn;
2050         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2051         uint64_t hf = rss_conf->rss_hf;
2052         uint8_t len = rss_conf->rss_key_len;
2053         uint8_t idx;
2054         uint8_t i;
2055         int rc;
2056
2057         memset(&vport_update_params, 0, sizeof(vport_update_params));
2058         memset(&rss_params, 0, sizeof(rss_params));
2059
2060         DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
2061                 (unsigned long)hf, len, key);
2062
2063         if (hf != 0) {
2064                 /* Enabling RSS */
2065                 DP_INFO(edev, "Enabling rss\n");
2066
2067                 /* RSS caps */
2068                 qede_init_rss_caps(&rss_params.rss_caps, hf);
2069                 rss_params.update_rss_capabilities = 1;
2070
2071                 /* RSS hash key */
2072                 if (key) {
2073                         if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
2074                                 DP_ERR(edev, "RSS key length exceeds limit\n");
2075                                 return -EINVAL;
2076                         }
2077                         DP_INFO(edev, "Applying user supplied hash key\n");
2078                         rss_params.update_rss_key = 1;
2079                         memcpy(&rss_params.rss_key, key, len);
2080                 }
2081                 rss_params.rss_enable = 1;
2082         }
2083
2084         rss_params.update_rss_config = 1;
2085         /* tbl_size has to be set with capabilities */
2086         rss_params.rss_table_size_log = 7;
2087         vport_update_params.vport_id = 0;
2088         /* pass the L2 handles instead of qids */
2089         for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
2090                 idx = qdev->rss_ind_table[i];
2091                 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
2092         }
2093         vport_update_params.rss_params = &rss_params;
2094
2095         for_each_hwfn(edev, i) {
2096                 p_hwfn = &edev->hwfns[i];
2097                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2098                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2099                                            ECORE_SPQ_MODE_EBLOCK, NULL);
2100                 if (rc) {
2101                         DP_ERR(edev, "vport-update for RSS failed\n");
2102                         return rc;
2103                 }
2104         }
2105         qdev->rss_enable = rss_params.rss_enable;
2106
2107         /* Update local structure for hash query */
2108         qdev->rss_conf.rss_hf = hf;
2109         qdev->rss_conf.rss_key_len = len;
2110         if (qdev->rss_enable) {
2111                 if  (qdev->rss_conf.rss_key == NULL) {
2112                         qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2113                         if (qdev->rss_conf.rss_key == NULL) {
2114                                 DP_ERR(edev, "No memory to store RSS key\n");
2115                                 return -ENOMEM;
2116                         }
2117                 }
2118                 if (key && len) {
2119                         DP_INFO(edev, "Storing RSS key\n");
2120                         memcpy(qdev->rss_conf.rss_key, key, len);
2121                 }
2122         } else if (!qdev->rss_enable && len == 0) {
2123                 if (qdev->rss_conf.rss_key) {
2124                         free(qdev->rss_conf.rss_key);
2125                         qdev->rss_conf.rss_key = NULL;
2126                         DP_INFO(edev, "Free RSS key\n");
2127                 }
2128         }
2129
2130         return 0;
2131 }
2132
2133 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2134                            struct rte_eth_rss_conf *rss_conf)
2135 {
2136         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2137
2138         rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2139         rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2140
2141         if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2142                 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2143                        rss_conf->rss_key_len);
2144         return 0;
2145 }
2146
2147 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2148                                     struct ecore_rss_params *rss)
2149 {
2150         int i, fn;
2151         bool rss_mode = 1; /* enable */
2152         struct ecore_queue_cid *cid;
2153         struct ecore_rss_params *t_rss;
2154
2155         /* In regular scenario, we'd simply need to take input handlers.
2156          * But in CMT, we'd have to split the handlers according to the
2157          * engine they were configured on. We'd then have to understand
2158          * whether RSS is really required, since 2-queues on CMT doesn't
2159          * require RSS.
2160          */
2161
2162         /* CMT should be round-robin */
2163         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2164                 cid = rss->rss_ind_table[i];
2165
2166                 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2167                         t_rss = &rss[0];
2168                 else
2169                         t_rss = &rss[1];
2170
2171                 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2172         }
2173
2174         t_rss = &rss[1];
2175         t_rss->update_rss_ind_table = 1;
2176         t_rss->rss_table_size_log = 7;
2177         t_rss->update_rss_config = 1;
2178
2179         /* Make sure RSS is actually required */
2180         for_each_hwfn(edev, fn) {
2181                 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2182                      i++) {
2183                         if (rss[fn].rss_ind_table[i] !=
2184                             rss[fn].rss_ind_table[0])
2185                                 break;
2186                 }
2187
2188                 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2189                         DP_INFO(edev,
2190                                 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2191                         rss_mode = 0;
2192                         goto out;
2193                 }
2194         }
2195
2196 out:
2197         t_rss->rss_enable = rss_mode;
2198
2199         return rss_mode;
2200 }
2201
2202 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2203                          struct rte_eth_rss_reta_entry64 *reta_conf,
2204                          uint16_t reta_size)
2205 {
2206         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2207         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2208         struct ecore_sp_vport_update_params vport_update_params;
2209         struct ecore_rss_params *params;
2210         struct ecore_hwfn *p_hwfn;
2211         uint16_t i, idx, shift;
2212         uint8_t entry;
2213         int rc = 0;
2214
2215         if (reta_size > ETH_RSS_RETA_SIZE_128) {
2216                 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2217                        reta_size);
2218                 return -EINVAL;
2219         }
2220
2221         memset(&vport_update_params, 0, sizeof(vport_update_params));
2222         params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2223                              RTE_CACHE_LINE_SIZE);
2224         if (params == NULL) {
2225                 DP_ERR(edev, "failed to allocate memory\n");
2226                 return -ENOMEM;
2227         }
2228
2229         for (i = 0; i < reta_size; i++) {
2230                 idx = i / RTE_RETA_GROUP_SIZE;
2231                 shift = i % RTE_RETA_GROUP_SIZE;
2232                 if (reta_conf[idx].mask & (1ULL << shift)) {
2233                         entry = reta_conf[idx].reta[shift];
2234                         /* Pass rxq handles to ecore */
2235                         params->rss_ind_table[i] =
2236                                         qdev->fp_array[entry].rxq->handle;
2237                         /* Update the local copy for RETA query command */
2238                         qdev->rss_ind_table[i] = entry;
2239                 }
2240         }
2241
2242         params->update_rss_ind_table = 1;
2243         params->rss_table_size_log = 7;
2244         params->update_rss_config = 1;
2245
2246         /* Fix up RETA for CMT mode device */
2247         if (ECORE_IS_CMT(edev))
2248                 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2249                                                             params);
2250         vport_update_params.vport_id = 0;
2251         /* Use the current value of rss_enable */
2252         params->rss_enable = qdev->rss_enable;
2253         vport_update_params.rss_params = params;
2254
2255         for_each_hwfn(edev, i) {
2256                 p_hwfn = &edev->hwfns[i];
2257                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2258                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2259                                            ECORE_SPQ_MODE_EBLOCK, NULL);
2260                 if (rc) {
2261                         DP_ERR(edev, "vport-update for RSS failed\n");
2262                         goto out;
2263                 }
2264         }
2265
2266 out:
2267         rte_free(params);
2268         return rc;
2269 }
2270
2271 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2272                                struct rte_eth_rss_reta_entry64 *reta_conf,
2273                                uint16_t reta_size)
2274 {
2275         struct qede_dev *qdev = eth_dev->data->dev_private;
2276         struct ecore_dev *edev = &qdev->edev;
2277         uint16_t i, idx, shift;
2278         uint8_t entry;
2279
2280         if (reta_size > ETH_RSS_RETA_SIZE_128) {
2281                 DP_ERR(edev, "reta_size %d is not supported\n",
2282                        reta_size);
2283                 return -EINVAL;
2284         }
2285
2286         for (i = 0; i < reta_size; i++) {
2287                 idx = i / RTE_RETA_GROUP_SIZE;
2288                 shift = i % RTE_RETA_GROUP_SIZE;
2289                 if (reta_conf[idx].mask & (1ULL << shift)) {
2290                         entry = qdev->rss_ind_table[i];
2291                         reta_conf[idx].reta[shift] = entry;
2292                 }
2293         }
2294
2295         return 0;
2296 }
2297
2298
2299
2300 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2301 {
2302         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2303         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2304         struct rte_eth_dev_info dev_info = {0};
2305         struct qede_fastpath *fp;
2306         uint32_t max_rx_pkt_len;
2307         uint32_t frame_size;
2308         uint16_t rx_buf_size;
2309         uint16_t bufsz;
2310         bool restart = false;
2311         int i;
2312
2313         PMD_INIT_FUNC_TRACE(edev);
2314         if (IS_VF(edev))
2315                 return -ENOTSUP;
2316         qede_dev_info_get(dev, &dev_info);
2317         max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2318         frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
2319         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2320                 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
2321                        mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
2322                         ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
2323                 return -EINVAL;
2324         }
2325         if (!dev->data->scattered_rx &&
2326             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2327                 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2328                         dev->data->min_rx_buf_size);
2329                 return -EINVAL;
2330         }
2331         /* Temporarily replace I/O functions with dummy ones. It cannot
2332          * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2333          */
2334         dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2335         dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2336         if (dev->data->dev_started) {
2337                 dev->data->dev_started = 0;
2338                 qede_dev_stop(dev);
2339                 restart = true;
2340         }
2341         rte_delay_ms(1000);
2342         qdev->new_mtu = mtu;
2343         /* Fix up RX buf size for all queues of the port */
2344         for_each_rss(i) {
2345                 fp = &qdev->fp_array[i];
2346                 if (fp->rxq != NULL) {
2347                         bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2348                                 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2349                         if (dev->data->scattered_rx)
2350                                 rx_buf_size = bufsz + ETHER_HDR_LEN +
2351                                               ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
2352                         else
2353                                 rx_buf_size = frame_size;
2354                         rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
2355                         fp->rxq->rx_buf_size = rx_buf_size;
2356                         DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
2357                 }
2358         }
2359         if (max_rx_pkt_len > ETHER_MAX_LEN)
2360                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
2361         else
2362                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
2363         if (!dev->data->dev_started && restart) {
2364                 qede_dev_start(dev);
2365                 dev->data->dev_started = 1;
2366         }
2367         /* update max frame size */
2368         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
2369         /* Reassign back */
2370         dev->rx_pkt_burst = qede_recv_pkts;
2371         dev->tx_pkt_burst = qede_xmit_pkts;
2372
2373         return 0;
2374 }
2375
2376 static int
2377 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
2378                        struct rte_eth_udp_tunnel *tunnel_udp,
2379                        bool add)
2380 {
2381         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2382         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2383         struct ecore_tunnel_info tunn; /* @DPDK */
2384         struct ecore_hwfn *p_hwfn;
2385         struct ecore_ptt *p_ptt;
2386         uint16_t udp_port;
2387         int rc, i;
2388
2389         PMD_INIT_FUNC_TRACE(edev);
2390
2391         memset(&tunn, 0, sizeof(tunn));
2392         if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
2393                 /* Enable VxLAN tunnel if needed before UDP port update using
2394                  * default MAC/VLAN classification.
2395                  */
2396                 if (add) {
2397                         if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
2398                                 DP_INFO(edev,
2399                                         "UDP port %u was already configured\n",
2400                                         tunnel_udp->udp_port);
2401                                 return ECORE_SUCCESS;
2402                         }
2403                         /* Enable VXLAN if it was not enabled while adding
2404                          * VXLAN filter.
2405                          */
2406                         if (!qdev->vxlan.enable) {
2407                                 rc = qede_vxlan_enable(eth_dev,
2408                                         ECORE_TUNN_CLSS_MAC_VLAN, true, true);
2409                                 if (rc != ECORE_SUCCESS) {
2410                                         DP_ERR(edev, "Failed to enable VXLAN "
2411                                                 "prior to updating UDP port\n");
2412                                         return rc;
2413                                 }
2414                         }
2415                         udp_port = tunnel_udp->udp_port;
2416                 } else {
2417                         if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
2418                                 DP_ERR(edev, "UDP port %u doesn't exist\n",
2419                                         tunnel_udp->udp_port);
2420                                 return ECORE_INVAL;
2421                         }
2422                         udp_port = 0;
2423                 }
2424
2425                 tunn.vxlan_port.b_update_port = true;
2426                 tunn.vxlan_port.port = udp_port;
2427                 for_each_hwfn(edev, i) {
2428                         p_hwfn = &edev->hwfns[i];
2429                         if (IS_PF(edev)) {
2430                                 p_ptt = ecore_ptt_acquire(p_hwfn);
2431                                 if (!p_ptt)
2432                                         return -EAGAIN;
2433                         } else {
2434                                 p_ptt = NULL;
2435                         }
2436                         rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2437                                                 ECORE_SPQ_MODE_CB, NULL);
2438                         if (rc != ECORE_SUCCESS) {
2439                                 DP_ERR(edev, "Unable to config UDP port %u\n",
2440                                        tunn.vxlan_port.port);
2441                                 if (IS_PF(edev))
2442                                         ecore_ptt_release(p_hwfn, p_ptt);
2443                                 return rc;
2444                         }
2445                 }
2446
2447                 qdev->vxlan.udp_port = udp_port;
2448                 /* If the request is to delete UDP port and if the number of
2449                  * VXLAN filters have reached 0 then VxLAN offload can be be
2450                  * disabled.
2451                  */
2452                 if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
2453                         return qede_vxlan_enable(eth_dev,
2454                                         ECORE_TUNN_CLSS_MAC_VLAN, false, true);
2455         }
2456
2457         return 0;
2458 }
2459
2460 static int
2461 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
2462                       struct rte_eth_udp_tunnel *tunnel_udp)
2463 {
2464         return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
2465 }
2466
2467 static int
2468 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
2469                       struct rte_eth_udp_tunnel *tunnel_udp)
2470 {
2471         return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
2472 }
2473
2474 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
2475                                        uint32_t *clss, char *str)
2476 {
2477         uint16_t j;
2478         *clss = MAX_ECORE_TUNN_CLSS;
2479
2480         for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
2481                 if (filter == qede_tunn_types[j].rte_filter_type) {
2482                         *type = qede_tunn_types[j].qede_type;
2483                         *clss = qede_tunn_types[j].qede_tunn_clss;
2484                         strcpy(str, qede_tunn_types[j].string);
2485                         return;
2486                 }
2487         }
2488 }
2489
2490 static int
2491 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2492                               const struct rte_eth_tunnel_filter_conf *conf,
2493                               uint32_t type)
2494 {
2495         /* Init commmon ucast params first */
2496         qede_set_ucast_cmn_params(ucast);
2497
2498         /* Copy out the required fields based on classification type */
2499         ucast->type = type;
2500
2501         switch (type) {
2502         case ECORE_FILTER_VNI:
2503                 ucast->vni = conf->tenant_id;
2504         break;
2505         case ECORE_FILTER_INNER_VLAN:
2506                 ucast->vlan = conf->inner_vlan;
2507         break;
2508         case ECORE_FILTER_MAC:
2509                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2510                        ETHER_ADDR_LEN);
2511         break;
2512         case ECORE_FILTER_INNER_MAC:
2513                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2514                        ETHER_ADDR_LEN);
2515         break;
2516         case ECORE_FILTER_MAC_VNI_PAIR:
2517                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2518                         ETHER_ADDR_LEN);
2519                 ucast->vni = conf->tenant_id;
2520         break;
2521         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2522                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2523                         ETHER_ADDR_LEN);
2524                 ucast->vni = conf->tenant_id;
2525         break;
2526         case ECORE_FILTER_INNER_PAIR:
2527                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2528                         ETHER_ADDR_LEN);
2529                 ucast->vlan = conf->inner_vlan;
2530         break;
2531         default:
2532                 return -EINVAL;
2533         }
2534
2535         return ECORE_SUCCESS;
2536 }
2537
2538 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
2539                                   enum rte_filter_op filter_op,
2540                                   const struct rte_eth_tunnel_filter_conf *conf)
2541 {
2542         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2543         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2544         enum ecore_filter_ucast_type type;
2545         enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
2546         struct ecore_filter_ucast ucast = {0};
2547         char str[80];
2548         uint16_t filter_type = 0;
2549         int rc;
2550
2551         PMD_INIT_FUNC_TRACE(edev);
2552
2553         switch (filter_op) {
2554         case RTE_ETH_FILTER_ADD:
2555                 if (IS_VF(edev))
2556                         return qede_vxlan_enable(eth_dev,
2557                                         ECORE_TUNN_CLSS_MAC_VLAN, true, true);
2558
2559                 filter_type = conf->filter_type;
2560                 /* Determine if the given filter classification is supported */
2561                 qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
2562                 if (clss == MAX_ECORE_TUNN_CLSS) {
2563                         DP_ERR(edev, "Unsupported filter type\n");
2564                         return -EINVAL;
2565                 }
2566                 /* Init tunnel ucast params */
2567                 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2568                 if (rc != ECORE_SUCCESS) {
2569                         DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
2570                         conf->filter_type);
2571                         return rc;
2572                 }
2573                 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2574                         str, filter_op, ucast.type);
2575
2576                 ucast.opcode = ECORE_FILTER_ADD;
2577
2578                 /* Skip MAC/VLAN if filter is based on VNI */
2579                 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2580                         rc = qede_mac_int_ops(eth_dev, &ucast, 1);
2581                         if (rc == 0) {
2582                                 /* Enable accept anyvlan */
2583                                 qede_config_accept_any_vlan(qdev, true);
2584                         }
2585                 } else {
2586                         rc = qede_ucast_filter(eth_dev, &ucast, 1);
2587                         if (rc == 0)
2588                                 rc = ecore_filter_ucast_cmd(edev, &ucast,
2589                                                     ECORE_SPQ_MODE_CB, NULL);
2590                 }
2591
2592                 if (rc != ECORE_SUCCESS)
2593                         return rc;
2594
2595                 qdev->vxlan.num_filters++;
2596                 qdev->vxlan.filter_type = filter_type;
2597                 if (!qdev->vxlan.enable)
2598                         return qede_vxlan_enable(eth_dev, clss, true, true);
2599
2600         break;
2601         case RTE_ETH_FILTER_DELETE:
2602                 if (IS_VF(edev))
2603                         return qede_vxlan_enable(eth_dev,
2604                                 ECORE_TUNN_CLSS_MAC_VLAN, false, true);
2605
2606                 filter_type = conf->filter_type;
2607                 /* Determine if the given filter classification is supported */
2608                 qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
2609                 if (clss == MAX_ECORE_TUNN_CLSS) {
2610                         DP_ERR(edev, "Unsupported filter type\n");
2611                         return -EINVAL;
2612                 }
2613                 /* Init tunnel ucast params */
2614                 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2615                 if (rc != ECORE_SUCCESS) {
2616                         DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
2617                         conf->filter_type);
2618                         return rc;
2619                 }
2620                 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2621                         str, filter_op, ucast.type);
2622
2623                 ucast.opcode = ECORE_FILTER_REMOVE;
2624
2625                 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2626                         rc = qede_mac_int_ops(eth_dev, &ucast, 0);
2627                 } else {
2628                         rc = qede_ucast_filter(eth_dev, &ucast, 0);
2629                         if (rc == 0)
2630                                 rc = ecore_filter_ucast_cmd(edev, &ucast,
2631                                                     ECORE_SPQ_MODE_CB, NULL);
2632                 }
2633                 if (rc != ECORE_SUCCESS)
2634                         return rc;
2635
2636                 qdev->vxlan.num_filters--;
2637
2638                 /* Disable VXLAN if VXLAN filters become 0 */
2639                 if (qdev->vxlan.num_filters == 0)
2640                         return qede_vxlan_enable(eth_dev, clss, false, true);
2641         break;
2642         default:
2643                 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2644                 return -EINVAL;
2645         }
2646
2647         return 0;
2648 }
2649
2650 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2651                          enum rte_filter_type filter_type,
2652                          enum rte_filter_op filter_op,
2653                          void *arg)
2654 {
2655         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2656         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2657         struct rte_eth_tunnel_filter_conf *filter_conf =
2658                         (struct rte_eth_tunnel_filter_conf *)arg;
2659
2660         switch (filter_type) {
2661         case RTE_ETH_FILTER_TUNNEL:
2662                 switch (filter_conf->tunnel_type) {
2663                 case RTE_TUNNEL_TYPE_VXLAN:
2664                         DP_INFO(edev,
2665                                 "Packet steering to the specified Rx queue"
2666                                 " is not supported with VXLAN tunneling");
2667                         return(qede_vxlan_tunn_config(eth_dev, filter_op,
2668                                                       filter_conf));
2669                 /* Place holders for future tunneling support */
2670                 case RTE_TUNNEL_TYPE_GENEVE:
2671                 case RTE_TUNNEL_TYPE_TEREDO:
2672                 case RTE_TUNNEL_TYPE_NVGRE:
2673                 case RTE_TUNNEL_TYPE_IP_IN_GRE:
2674                 case RTE_L2_TUNNEL_TYPE_E_TAG:
2675                         DP_ERR(edev, "Unsupported tunnel type %d\n",
2676                                 filter_conf->tunnel_type);
2677                         return -EINVAL;
2678                 case RTE_TUNNEL_TYPE_NONE:
2679                 default:
2680                         return 0;
2681                 }
2682                 break;
2683         case RTE_ETH_FILTER_FDIR:
2684                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2685         case RTE_ETH_FILTER_NTUPLE:
2686                 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2687         case RTE_ETH_FILTER_MACVLAN:
2688         case RTE_ETH_FILTER_ETHERTYPE:
2689         case RTE_ETH_FILTER_FLEXIBLE:
2690         case RTE_ETH_FILTER_SYN:
2691         case RTE_ETH_FILTER_HASH:
2692         case RTE_ETH_FILTER_L2_TUNNEL:
2693         case RTE_ETH_FILTER_MAX:
2694         default:
2695                 DP_ERR(edev, "Unsupported filter type %d\n",
2696                         filter_type);
2697                 return -EINVAL;
2698         }
2699
2700         return 0;
2701 }
2702
2703 static const struct eth_dev_ops qede_eth_dev_ops = {
2704         .dev_configure = qede_dev_configure,
2705         .dev_infos_get = qede_dev_info_get,
2706         .rx_queue_setup = qede_rx_queue_setup,
2707         .rx_queue_release = qede_rx_queue_release,
2708         .tx_queue_setup = qede_tx_queue_setup,
2709         .tx_queue_release = qede_tx_queue_release,
2710         .dev_start = qede_dev_start,
2711         .dev_set_link_up = qede_dev_set_link_up,
2712         .dev_set_link_down = qede_dev_set_link_down,
2713         .link_update = qede_link_update,
2714         .promiscuous_enable = qede_promiscuous_enable,
2715         .promiscuous_disable = qede_promiscuous_disable,
2716         .allmulticast_enable = qede_allmulticast_enable,
2717         .allmulticast_disable = qede_allmulticast_disable,
2718         .dev_stop = qede_dev_stop,
2719         .dev_close = qede_dev_close,
2720         .stats_get = qede_get_stats,
2721         .stats_reset = qede_reset_stats,
2722         .xstats_get = qede_get_xstats,
2723         .xstats_reset = qede_reset_xstats,
2724         .xstats_get_names = qede_get_xstats_names,
2725         .mac_addr_add = qede_mac_addr_add,
2726         .mac_addr_remove = qede_mac_addr_remove,
2727         .mac_addr_set = qede_mac_addr_set,
2728         .vlan_offload_set = qede_vlan_offload_set,
2729         .vlan_filter_set = qede_vlan_filter_set,
2730         .flow_ctrl_set = qede_flow_ctrl_set,
2731         .flow_ctrl_get = qede_flow_ctrl_get,
2732         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2733         .rss_hash_update = qede_rss_hash_update,
2734         .rss_hash_conf_get = qede_rss_hash_conf_get,
2735         .reta_update  = qede_rss_reta_update,
2736         .reta_query  = qede_rss_reta_query,
2737         .mtu_set = qede_set_mtu,
2738         .filter_ctrl = qede_dev_filter_ctrl,
2739         .udp_tunnel_port_add = qede_udp_dst_port_add,
2740         .udp_tunnel_port_del = qede_udp_dst_port_del,
2741 };
2742
2743 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2744         .dev_configure = qede_dev_configure,
2745         .dev_infos_get = qede_dev_info_get,
2746         .rx_queue_setup = qede_rx_queue_setup,
2747         .rx_queue_release = qede_rx_queue_release,
2748         .tx_queue_setup = qede_tx_queue_setup,
2749         .tx_queue_release = qede_tx_queue_release,
2750         .dev_start = qede_dev_start,
2751         .dev_set_link_up = qede_dev_set_link_up,
2752         .dev_set_link_down = qede_dev_set_link_down,
2753         .link_update = qede_link_update,
2754         .promiscuous_enable = qede_promiscuous_enable,
2755         .promiscuous_disable = qede_promiscuous_disable,
2756         .allmulticast_enable = qede_allmulticast_enable,
2757         .allmulticast_disable = qede_allmulticast_disable,
2758         .dev_stop = qede_dev_stop,
2759         .dev_close = qede_dev_close,
2760         .stats_get = qede_get_stats,
2761         .stats_reset = qede_reset_stats,
2762         .xstats_get = qede_get_xstats,
2763         .xstats_reset = qede_reset_xstats,
2764         .xstats_get_names = qede_get_xstats_names,
2765         .vlan_offload_set = qede_vlan_offload_set,
2766         .vlan_filter_set = qede_vlan_filter_set,
2767         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2768         .rss_hash_update = qede_rss_hash_update,
2769         .rss_hash_conf_get = qede_rss_hash_conf_get,
2770         .reta_update  = qede_rss_reta_update,
2771         .reta_query  = qede_rss_reta_query,
2772         .mtu_set = qede_set_mtu,
2773         .udp_tunnel_port_add = qede_udp_dst_port_add,
2774         .udp_tunnel_port_del = qede_udp_dst_port_del,
2775 };
2776
2777 static void qede_update_pf_params(struct ecore_dev *edev)
2778 {
2779         struct ecore_pf_params pf_params;
2780
2781         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2782         pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2783         pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2784         qed_ops->common->update_pf_params(edev, &pf_params);
2785 }
2786
2787 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2788 {
2789         struct rte_pci_device *pci_dev;
2790         struct rte_pci_addr pci_addr;
2791         struct qede_dev *adapter;
2792         struct ecore_dev *edev;
2793         struct qed_dev_eth_info dev_info;
2794         struct qed_slowpath_params params;
2795         static bool do_once = true;
2796         uint8_t bulletin_change;
2797         uint8_t vf_mac[ETHER_ADDR_LEN];
2798         uint8_t is_mac_forced;
2799         bool is_mac_exist;
2800         /* Fix up ecore debug level */
2801         uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2802         uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2803         int rc;
2804
2805         /* Extract key data structures */
2806         adapter = eth_dev->data->dev_private;
2807         adapter->ethdev = eth_dev;
2808         edev = &adapter->edev;
2809         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2810         pci_addr = pci_dev->addr;
2811
2812         PMD_INIT_FUNC_TRACE(edev);
2813
2814         snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2815                  pci_addr.bus, pci_addr.devid, pci_addr.function,
2816                  eth_dev->data->port_id);
2817
2818         eth_dev->rx_pkt_burst = qede_recv_pkts;
2819         eth_dev->tx_pkt_burst = qede_xmit_pkts;
2820         eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2821
2822         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2823                 DP_ERR(edev, "Skipping device init from secondary process\n");
2824                 return 0;
2825         }
2826
2827         rte_eth_copy_pci_info(eth_dev, pci_dev);
2828
2829         /* @DPDK */
2830         edev->vendor_id = pci_dev->id.vendor_id;
2831         edev->device_id = pci_dev->id.device_id;
2832
2833         qed_ops = qed_get_eth_ops();
2834         if (!qed_ops) {
2835                 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2836                 return -EINVAL;
2837         }
2838
2839         DP_INFO(edev, "Starting qede probe\n");
2840         rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2841                                     dp_level, is_vf);
2842         if (rc != 0) {
2843                 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2844                 return -ENODEV;
2845         }
2846         qede_update_pf_params(edev);
2847         rte_intr_callback_register(&pci_dev->intr_handle,
2848                                    qede_interrupt_handler, (void *)eth_dev);
2849         if (rte_intr_enable(&pci_dev->intr_handle)) {
2850                 DP_ERR(edev, "rte_intr_enable() failed\n");
2851                 return -ENODEV;
2852         }
2853
2854         /* Start the Slowpath-process */
2855         memset(&params, 0, sizeof(struct qed_slowpath_params));
2856         params.int_mode = ECORE_INT_MODE_MSIX;
2857         params.drv_major = QEDE_PMD_VERSION_MAJOR;
2858         params.drv_minor = QEDE_PMD_VERSION_MINOR;
2859         params.drv_rev = QEDE_PMD_VERSION_REVISION;
2860         params.drv_eng = QEDE_PMD_VERSION_PATCH;
2861         strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2862                 QEDE_PMD_DRV_VER_STR_SIZE);
2863
2864         /* For CMT mode device do periodic polling for slowpath events.
2865          * This is required since uio device uses only one MSI-x
2866          * interrupt vector but we need one for each engine.
2867          */
2868         if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2869                 rc = rte_eal_alarm_set(timer_period * US_PER_S,
2870                                        qede_poll_sp_sb_cb,
2871                                        (void *)eth_dev);
2872                 if (rc != 0) {
2873                         DP_ERR(edev, "Unable to start periodic"
2874                                      " timer rc %d\n", rc);
2875                         return -EINVAL;
2876                 }
2877         }
2878
2879         rc = qed_ops->common->slowpath_start(edev, &params);
2880         if (rc) {
2881                 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2882                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2883                                      (void *)eth_dev);
2884                 return -ENODEV;
2885         }
2886
2887         rc = qed_ops->fill_dev_info(edev, &dev_info);
2888         if (rc) {
2889                 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2890                 qed_ops->common->slowpath_stop(edev);
2891                 qed_ops->common->remove(edev);
2892                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2893                                      (void *)eth_dev);
2894                 return -ENODEV;
2895         }
2896
2897         qede_alloc_etherdev(adapter, &dev_info);
2898
2899         adapter->ops->common->set_name(edev, edev->name);
2900
2901         if (!is_vf)
2902                 adapter->dev_info.num_mac_filters =
2903                         (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2904                                             ECORE_MAC);
2905         else
2906                 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2907                                 (uint32_t *)&adapter->dev_info.num_mac_filters);
2908
2909         /* Allocate memory for storing MAC addr */
2910         eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2911                                         (ETHER_ADDR_LEN *
2912                                         adapter->dev_info.num_mac_filters),
2913                                         RTE_CACHE_LINE_SIZE);
2914
2915         if (eth_dev->data->mac_addrs == NULL) {
2916                 DP_ERR(edev, "Failed to allocate MAC address\n");
2917                 qed_ops->common->slowpath_stop(edev);
2918                 qed_ops->common->remove(edev);
2919                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2920                                      (void *)eth_dev);
2921                 return -ENOMEM;
2922         }
2923
2924         if (!is_vf) {
2925                 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2926                                 hw_info.hw_mac_addr,
2927                                 &eth_dev->data->mac_addrs[0]);
2928                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2929                                 &adapter->primary_mac);
2930         } else {
2931                 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2932                                        &bulletin_change);
2933                 if (bulletin_change) {
2934                         is_mac_exist =
2935                             ecore_vf_bulletin_get_forced_mac(
2936                                                 ECORE_LEADING_HWFN(edev),
2937                                                 vf_mac,
2938                                                 &is_mac_forced);
2939                         if (is_mac_exist && is_mac_forced) {
2940                                 DP_INFO(edev, "VF macaddr received from PF\n");
2941                                 ether_addr_copy((struct ether_addr *)&vf_mac,
2942                                                 &eth_dev->data->mac_addrs[0]);
2943                                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2944                                                 &adapter->primary_mac);
2945                         } else {
2946                                 DP_ERR(edev, "No VF macaddr assigned\n");
2947                         }
2948                 }
2949         }
2950
2951         eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2952
2953         if (do_once) {
2954 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
2955                 qede_print_adapter_info(adapter);
2956 #endif
2957                 do_once = false;
2958         }
2959
2960         adapter->num_tx_queues = 0;
2961         adapter->num_rx_queues = 0;
2962         SLIST_INIT(&adapter->fdir_info.fdir_list_head);
2963         SLIST_INIT(&adapter->vlan_list_head);
2964         SLIST_INIT(&adapter->uc_list_head);
2965         adapter->mtu = ETHER_MTU;
2966         adapter->new_mtu = ETHER_MTU;
2967         if (!is_vf)
2968                 if (qede_start_vport(adapter, adapter->mtu))
2969                         return -1;
2970
2971         DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2972                 adapter->primary_mac.addr_bytes[0],
2973                 adapter->primary_mac.addr_bytes[1],
2974                 adapter->primary_mac.addr_bytes[2],
2975                 adapter->primary_mac.addr_bytes[3],
2976                 adapter->primary_mac.addr_bytes[4],
2977                 adapter->primary_mac.addr_bytes[5]);
2978
2979         DP_INFO(edev, "Device initialized\n");
2980
2981         return 0;
2982 }
2983
2984 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2985 {
2986         return qede_common_dev_init(eth_dev, 1);
2987 }
2988
2989 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2990 {
2991         return qede_common_dev_init(eth_dev, 0);
2992 }
2993
2994 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2995 {
2996 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
2997         struct qede_dev *qdev = eth_dev->data->dev_private;
2998         struct ecore_dev *edev = &qdev->edev;
2999
3000         PMD_INIT_FUNC_TRACE(edev);
3001 #endif
3002
3003         /* only uninitialize in the primary process */
3004         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3005                 return 0;
3006
3007         /* safe to close dev here */
3008         qede_dev_close(eth_dev);
3009
3010         eth_dev->dev_ops = NULL;
3011         eth_dev->rx_pkt_burst = NULL;
3012         eth_dev->tx_pkt_burst = NULL;
3013
3014         if (eth_dev->data->mac_addrs)
3015                 rte_free(eth_dev->data->mac_addrs);
3016
3017         eth_dev->data->mac_addrs = NULL;
3018
3019         return 0;
3020 }
3021
3022 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3023 {
3024         return qede_dev_common_uninit(eth_dev);
3025 }
3026
3027 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3028 {
3029         return qede_dev_common_uninit(eth_dev);
3030 }
3031
3032 static const struct rte_pci_id pci_id_qedevf_map[] = {
3033 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3034         {
3035                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
3036         },
3037         {
3038                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
3039         },
3040         {
3041                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
3042         },
3043         {.vendor_id = 0,}
3044 };
3045
3046 static const struct rte_pci_id pci_id_qede_map[] = {
3047 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3048         {
3049                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
3050         },
3051         {
3052                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
3053         },
3054         {
3055                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
3056         },
3057         {
3058                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
3059         },
3060         {
3061                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
3062         },
3063         {
3064                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
3065         },
3066         {
3067                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
3068         },
3069         {
3070                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
3071         },
3072         {
3073                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
3074         },
3075         {
3076                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
3077         },
3078         {.vendor_id = 0,}
3079 };
3080
3081 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3082         struct rte_pci_device *pci_dev)
3083 {
3084         return rte_eth_dev_pci_generic_probe(pci_dev,
3085                 sizeof(struct qede_dev), qedevf_eth_dev_init);
3086 }
3087
3088 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3089 {
3090         return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
3091 }
3092
3093 static struct rte_pci_driver rte_qedevf_pmd = {
3094         .id_table = pci_id_qedevf_map,
3095         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3096         .probe = qedevf_eth_dev_pci_probe,
3097         .remove = qedevf_eth_dev_pci_remove,
3098 };
3099
3100 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3101         struct rte_pci_device *pci_dev)
3102 {
3103         return rte_eth_dev_pci_generic_probe(pci_dev,
3104                 sizeof(struct qede_dev), qede_eth_dev_init);
3105 }
3106
3107 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3108 {
3109         return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
3110 }
3111
3112 static struct rte_pci_driver rte_qede_pmd = {
3113         .id_table = pci_id_qede_map,
3114         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3115         .probe = qede_eth_dev_pci_probe,
3116         .remove = qede_eth_dev_pci_remove,
3117 };
3118
3119 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
3120 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
3121 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
3122 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
3123 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
3124 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");