New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / net / qede / qede_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include "qede_ethdev.h"
8 #include <rte_alarm.h>
9 #include <rte_version.h>
10 #include <rte_kvargs.h>
11
12 /* Globals */
13 int qede_logtype_init;
14 int qede_logtype_driver;
15
16 static const struct qed_eth_ops *qed_ops;
17 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
19
20 #define QEDE_SP_TIMER_PERIOD    10000 /* 100ms */
21
22 struct rte_qede_xstats_name_off {
23         char name[RTE_ETH_XSTATS_NAME_SIZE];
24         uint64_t offset;
25 };
26
27 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
28         {"rx_unicast_bytes",
29                 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
30         {"rx_multicast_bytes",
31                 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
32         {"rx_broadcast_bytes",
33                 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
34         {"rx_unicast_packets",
35                 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
36         {"rx_multicast_packets",
37                 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
38         {"rx_broadcast_packets",
39                 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
40
41         {"tx_unicast_bytes",
42                 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
43         {"tx_multicast_bytes",
44                 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
45         {"tx_broadcast_bytes",
46                 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
47         {"tx_unicast_packets",
48                 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
49         {"tx_multicast_packets",
50                 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
51         {"tx_broadcast_packets",
52                 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
53
54         {"rx_64_byte_packets",
55                 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
56         {"rx_65_to_127_byte_packets",
57                 offsetof(struct ecore_eth_stats_common,
58                          rx_65_to_127_byte_packets)},
59         {"rx_128_to_255_byte_packets",
60                 offsetof(struct ecore_eth_stats_common,
61                          rx_128_to_255_byte_packets)},
62         {"rx_256_to_511_byte_packets",
63                 offsetof(struct ecore_eth_stats_common,
64                          rx_256_to_511_byte_packets)},
65         {"rx_512_to_1023_byte_packets",
66                 offsetof(struct ecore_eth_stats_common,
67                          rx_512_to_1023_byte_packets)},
68         {"rx_1024_to_1518_byte_packets",
69                 offsetof(struct ecore_eth_stats_common,
70                          rx_1024_to_1518_byte_packets)},
71         {"tx_64_byte_packets",
72                 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
73         {"tx_65_to_127_byte_packets",
74                 offsetof(struct ecore_eth_stats_common,
75                          tx_65_to_127_byte_packets)},
76         {"tx_128_to_255_byte_packets",
77                 offsetof(struct ecore_eth_stats_common,
78                          tx_128_to_255_byte_packets)},
79         {"tx_256_to_511_byte_packets",
80                 offsetof(struct ecore_eth_stats_common,
81                          tx_256_to_511_byte_packets)},
82         {"tx_512_to_1023_byte_packets",
83                 offsetof(struct ecore_eth_stats_common,
84                          tx_512_to_1023_byte_packets)},
85         {"tx_1024_to_1518_byte_packets",
86                 offsetof(struct ecore_eth_stats_common,
87                          tx_1024_to_1518_byte_packets)},
88
89         {"rx_mac_crtl_frames",
90                 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
91         {"tx_mac_control_frames",
92                 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
93         {"rx_pause_frames",
94                 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
95         {"tx_pause_frames",
96                 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
97         {"rx_priority_flow_control_frames",
98                 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
99         {"tx_priority_flow_control_frames",
100                 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
101
102         {"rx_crc_errors",
103                 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
104         {"rx_align_errors",
105                 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
106         {"rx_carrier_errors",
107                 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
108         {"rx_oversize_packet_errors",
109                 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
110         {"rx_jabber_errors",
111                 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
112         {"rx_undersize_packet_errors",
113                 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
114         {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
115         {"rx_host_buffer_not_available",
116                 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
117         /* Number of packets discarded because they are bigger than MTU */
118         {"rx_packet_too_big_discards",
119                 offsetof(struct ecore_eth_stats_common,
120                          packet_too_big_discard)},
121         {"rx_ttl_zero_discards",
122                 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
123         {"rx_multi_function_tag_filter_discards",
124                 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
125         {"rx_mac_filter_discards",
126                 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
127         {"rx_hw_buffer_truncates",
128                 offsetof(struct ecore_eth_stats_common, brb_truncates)},
129         {"rx_hw_buffer_discards",
130                 offsetof(struct ecore_eth_stats_common, brb_discards)},
131         {"tx_error_drop_packets",
132                 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
133
134         {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
135         {"rx_mac_unicast_packets",
136                 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
137         {"rx_mac_multicast_packets",
138                 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
139         {"rx_mac_broadcast_packets",
140                 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
141         {"rx_mac_frames_ok",
142                 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
143         {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
144         {"tx_mac_unicast_packets",
145                 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
146         {"tx_mac_multicast_packets",
147                 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
148         {"tx_mac_broadcast_packets",
149                 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
150
151         {"lro_coalesced_packets",
152                 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
153         {"lro_coalesced_events",
154                 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
155         {"lro_aborts_num",
156                 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
157         {"lro_not_coalesced_packets",
158                 offsetof(struct ecore_eth_stats_common,
159                          tpa_not_coalesced_pkts)},
160         {"lro_coalesced_bytes",
161                 offsetof(struct ecore_eth_stats_common,
162                          tpa_coalesced_bytes)},
163 };
164
165 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
166         {"rx_1519_to_1522_byte_packets",
167                 offsetof(struct ecore_eth_stats, bb) +
168                 offsetof(struct ecore_eth_stats_bb,
169                          rx_1519_to_1522_byte_packets)},
170         {"rx_1519_to_2047_byte_packets",
171                 offsetof(struct ecore_eth_stats, bb) +
172                 offsetof(struct ecore_eth_stats_bb,
173                          rx_1519_to_2047_byte_packets)},
174         {"rx_2048_to_4095_byte_packets",
175                 offsetof(struct ecore_eth_stats, bb) +
176                 offsetof(struct ecore_eth_stats_bb,
177                          rx_2048_to_4095_byte_packets)},
178         {"rx_4096_to_9216_byte_packets",
179                 offsetof(struct ecore_eth_stats, bb) +
180                 offsetof(struct ecore_eth_stats_bb,
181                          rx_4096_to_9216_byte_packets)},
182         {"rx_9217_to_16383_byte_packets",
183                 offsetof(struct ecore_eth_stats, bb) +
184                 offsetof(struct ecore_eth_stats_bb,
185                          rx_9217_to_16383_byte_packets)},
186
187         {"tx_1519_to_2047_byte_packets",
188                 offsetof(struct ecore_eth_stats, bb) +
189                 offsetof(struct ecore_eth_stats_bb,
190                          tx_1519_to_2047_byte_packets)},
191         {"tx_2048_to_4095_byte_packets",
192                 offsetof(struct ecore_eth_stats, bb) +
193                 offsetof(struct ecore_eth_stats_bb,
194                          tx_2048_to_4095_byte_packets)},
195         {"tx_4096_to_9216_byte_packets",
196                 offsetof(struct ecore_eth_stats, bb) +
197                 offsetof(struct ecore_eth_stats_bb,
198                          tx_4096_to_9216_byte_packets)},
199         {"tx_9217_to_16383_byte_packets",
200                 offsetof(struct ecore_eth_stats, bb) +
201                 offsetof(struct ecore_eth_stats_bb,
202                          tx_9217_to_16383_byte_packets)},
203
204         {"tx_lpi_entry_count",
205                 offsetof(struct ecore_eth_stats, bb) +
206                 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
207         {"tx_total_collisions",
208                 offsetof(struct ecore_eth_stats, bb) +
209                 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
210 };
211
212 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
213         {"rx_1519_to_max_byte_packets",
214                 offsetof(struct ecore_eth_stats, ah) +
215                 offsetof(struct ecore_eth_stats_ah,
216                          rx_1519_to_max_byte_packets)},
217         {"tx_1519_to_max_byte_packets",
218                 offsetof(struct ecore_eth_stats, ah) +
219                 offsetof(struct ecore_eth_stats_ah,
220                          tx_1519_to_max_byte_packets)},
221 };
222
223 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
224         {"rx_q_segments",
225                 offsetof(struct qede_rx_queue, rx_segs)},
226         {"rx_q_hw_errors",
227                 offsetof(struct qede_rx_queue, rx_hw_errors)},
228         {"rx_q_allocation_errors",
229                 offsetof(struct qede_rx_queue, rx_alloc_errors)}
230 };
231
232 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
233 {
234         ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
235 }
236
237 static void
238 qede_interrupt_handler_intx(void *param)
239 {
240         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
241         struct qede_dev *qdev = eth_dev->data->dev_private;
242         struct ecore_dev *edev = &qdev->edev;
243         u64 status;
244
245         /* Check if our device actually raised an interrupt */
246         status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
247         if (status & 0x1) {
248                 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
249
250                 if (rte_intr_enable(eth_dev->intr_handle))
251                         DP_ERR(edev, "rte_intr_enable failed\n");
252         }
253 }
254
255 static void
256 qede_interrupt_handler(void *param)
257 {
258         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
259         struct qede_dev *qdev = eth_dev->data->dev_private;
260         struct ecore_dev *edev = &qdev->edev;
261
262         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
263         if (rte_intr_enable(eth_dev->intr_handle))
264                 DP_ERR(edev, "rte_intr_enable failed\n");
265 }
266
267 static void
268 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
269 {
270         rte_memcpy(&qdev->dev_info, info, sizeof(*info));
271         qdev->ops = qed_ops;
272 }
273
274 static void qede_print_adapter_info(struct qede_dev *qdev)
275 {
276         struct ecore_dev *edev = &qdev->edev;
277         struct qed_dev_info *info = &qdev->dev_info.common;
278         static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
279         static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
280
281         DP_INFO(edev, "*********************************\n");
282         DP_INFO(edev, " DPDK version:%s\n", rte_version());
283         DP_INFO(edev, " Chip details : %s %c%d\n",
284                   ECORE_IS_BB(edev) ? "BB" : "AH",
285                   'A' + edev->chip_rev,
286                   (int)edev->chip_metal);
287         snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
288                  info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
289         snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
290                  ver_str, QEDE_PMD_VERSION);
291         DP_INFO(edev, " Driver version : %s\n", drv_ver);
292         DP_INFO(edev, " Firmware version : %s\n", ver_str);
293
294         snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
295                  "%d.%d.%d.%d",
296                 (info->mfw_rev >> 24) & 0xff,
297                 (info->mfw_rev >> 16) & 0xff,
298                 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
299         DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
300         DP_INFO(edev, " Firmware file : %s\n", qede_fw_file);
301         DP_INFO(edev, "*********************************\n");
302 }
303
304 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
305 {
306         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
307         unsigned int i = 0, j = 0, qid;
308         unsigned int rxq_stat_cntrs, txq_stat_cntrs;
309         struct qede_tx_queue *txq;
310
311         DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
312
313         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
314                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
315         txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
316                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
317
318         for_each_rss(qid) {
319                 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
320                              offsetof(struct qede_rx_queue, rcv_pkts), 0,
321                             sizeof(uint64_t));
322                 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
323                              offsetof(struct qede_rx_queue, rx_hw_errors), 0,
324                             sizeof(uint64_t));
325                 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
326                              offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
327                             sizeof(uint64_t));
328
329                 if (xstats)
330                         for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
331                                 OSAL_MEMSET((((char *)
332                                               (qdev->fp_array[qid].rxq)) +
333                                              qede_rxq_xstats_strings[j].offset),
334                                             0,
335                                             sizeof(uint64_t));
336
337                 i++;
338                 if (i == rxq_stat_cntrs)
339                         break;
340         }
341
342         i = 0;
343
344         for_each_tss(qid) {
345                 txq = qdev->fp_array[qid].txq;
346
347                 OSAL_MEMSET((uint64_t *)(uintptr_t)
348                                 (((uint64_t)(uintptr_t)(txq)) +
349                                  offsetof(struct qede_tx_queue, xmit_pkts)), 0,
350                             sizeof(uint64_t));
351
352                 i++;
353                 if (i == txq_stat_cntrs)
354                         break;
355         }
356 }
357
358 static int
359 qede_stop_vport(struct ecore_dev *edev)
360 {
361         struct ecore_hwfn *p_hwfn;
362         uint8_t vport_id;
363         int rc;
364         int i;
365
366         vport_id = 0;
367         for_each_hwfn(edev, i) {
368                 p_hwfn = &edev->hwfns[i];
369                 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
370                                          vport_id);
371                 if (rc != ECORE_SUCCESS) {
372                         DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
373                         return rc;
374                 }
375         }
376
377         DP_INFO(edev, "vport stopped\n");
378
379         return 0;
380 }
381
382 static int
383 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
384 {
385         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
386         struct ecore_sp_vport_start_params params;
387         struct ecore_hwfn *p_hwfn;
388         int rc;
389         int i;
390
391         if (qdev->vport_started)
392                 qede_stop_vport(edev);
393
394         memset(&params, 0, sizeof(params));
395         params.vport_id = 0;
396         params.mtu = mtu;
397         /* @DPDK - Disable FW placement */
398         params.zero_placement_offset = 1;
399         for_each_hwfn(edev, i) {
400                 p_hwfn = &edev->hwfns[i];
401                 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
402                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
403                 rc = ecore_sp_vport_start(p_hwfn, &params);
404                 if (rc != ECORE_SUCCESS) {
405                         DP_ERR(edev, "Start V-PORT failed %d\n", rc);
406                         return rc;
407                 }
408         }
409         ecore_reset_vport_stats(edev);
410         qdev->vport_started = true;
411         DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
412
413         return 0;
414 }
415
416 #define QEDE_NPAR_TX_SWITCHING          "npar_tx_switching"
417 #define QEDE_VF_TX_SWITCHING            "vf_tx_switching"
418
419 /* Activate or deactivate vport via vport-update */
420 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
421 {
422         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
423         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
424         struct ecore_sp_vport_update_params params;
425         struct ecore_hwfn *p_hwfn;
426         uint8_t i;
427         int rc = -1;
428
429         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
430         params.vport_id = 0;
431         params.update_vport_active_rx_flg = 1;
432         params.update_vport_active_tx_flg = 1;
433         params.vport_active_rx_flg = flg;
434         params.vport_active_tx_flg = flg;
435         if (~qdev->enable_tx_switching & flg) {
436                 params.update_tx_switching_flg = 1;
437                 params.tx_switching_flg = !flg;
438         }
439         for_each_hwfn(edev, i) {
440                 p_hwfn = &edev->hwfns[i];
441                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
442                 rc = ecore_sp_vport_update(p_hwfn, &params,
443                                 ECORE_SPQ_MODE_EBLOCK, NULL);
444                 if (rc != ECORE_SUCCESS) {
445                         DP_ERR(edev, "Failed to update vport\n");
446                         break;
447                 }
448         }
449         DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
450
451         return rc;
452 }
453
454 static void
455 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
456                            uint16_t mtu, bool enable)
457 {
458         /* Enable LRO in split mode */
459         sge_tpa_params->tpa_ipv4_en_flg = enable;
460         sge_tpa_params->tpa_ipv6_en_flg = enable;
461         sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
462         sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
463         /* set if tpa enable changes */
464         sge_tpa_params->update_tpa_en_flg = 1;
465         /* set if tpa parameters should be handled */
466         sge_tpa_params->update_tpa_param_flg = enable;
467
468         sge_tpa_params->max_buffers_per_cqe = 20;
469         /* Enable TPA in split mode. In this mode each TPA segment
470          * starts on the new BD, so there is one BD per segment.
471          */
472         sge_tpa_params->tpa_pkt_split_flg = 1;
473         sge_tpa_params->tpa_hdr_data_split_flg = 0;
474         sge_tpa_params->tpa_gro_consistent_flg = 0;
475         sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
476         sge_tpa_params->tpa_max_size = 0x7FFF;
477         sge_tpa_params->tpa_min_size_to_start = mtu / 2;
478         sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
479 }
480
481 /* Enable/disable LRO via vport-update */
482 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
483 {
484         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
485         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
486         struct ecore_sp_vport_update_params params;
487         struct ecore_sge_tpa_params tpa_params;
488         struct ecore_hwfn *p_hwfn;
489         int rc;
490         int i;
491
492         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
493         memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
494         qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
495         params.vport_id = 0;
496         params.sge_tpa_params = &tpa_params;
497         for_each_hwfn(edev, i) {
498                 p_hwfn = &edev->hwfns[i];
499                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
500                 rc = ecore_sp_vport_update(p_hwfn, &params,
501                                 ECORE_SPQ_MODE_EBLOCK, NULL);
502                 if (rc != ECORE_SUCCESS) {
503                         DP_ERR(edev, "Failed to update LRO\n");
504                         return -1;
505                 }
506         }
507         qdev->enable_lro = flg;
508         eth_dev->data->lro = flg;
509
510         DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
511
512         return 0;
513 }
514
515 static int
516 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
517                              enum qed_filter_rx_mode_type type)
518 {
519         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
520         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
521         struct ecore_filter_accept_flags flags;
522
523         memset(&flags, 0, sizeof(flags));
524
525         flags.update_rx_mode_config = 1;
526         flags.update_tx_mode_config = 1;
527         flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
528                 ECORE_ACCEPT_MCAST_MATCHED |
529                 ECORE_ACCEPT_BCAST;
530
531         flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
532                 ECORE_ACCEPT_MCAST_MATCHED |
533                 ECORE_ACCEPT_BCAST;
534
535         if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
536                 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
537                 if (IS_VF(edev)) {
538                         flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
539                         DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
540                 }
541         } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
542                 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
543         } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
544                                 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
545                 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
546                         ECORE_ACCEPT_MCAST_UNMATCHED;
547         }
548
549         return ecore_filter_accept_cmd(edev, 0, flags, false, false,
550                         ECORE_SPQ_MODE_CB, NULL);
551 }
552
553 int
554 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
555                   bool add)
556 {
557         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
558         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
559         struct qede_ucast_entry *tmp = NULL;
560         struct qede_ucast_entry *u;
561         struct ether_addr *mac_addr;
562
563         mac_addr  = (struct ether_addr *)ucast->mac;
564         if (add) {
565                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
566                         if ((memcmp(mac_addr, &tmp->mac,
567                                     ETHER_ADDR_LEN) == 0) &&
568                              ucast->vni == tmp->vni &&
569                              ucast->vlan == tmp->vlan) {
570                                 DP_INFO(edev, "Unicast MAC is already added"
571                                         " with vlan = %u, vni = %u\n",
572                                         ucast->vlan,  ucast->vni);
573                                         return 0;
574                         }
575                 }
576                 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
577                                RTE_CACHE_LINE_SIZE);
578                 if (!u) {
579                         DP_ERR(edev, "Did not allocate memory for ucast\n");
580                         return -ENOMEM;
581                 }
582                 ether_addr_copy(mac_addr, &u->mac);
583                 u->vlan = ucast->vlan;
584                 u->vni = ucast->vni;
585                 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
586                 qdev->num_uc_addr++;
587         } else {
588                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
589                         if ((memcmp(mac_addr, &tmp->mac,
590                                     ETHER_ADDR_LEN) == 0) &&
591                             ucast->vlan == tmp->vlan      &&
592                             ucast->vni == tmp->vni)
593                         break;
594                 }
595                 if (tmp == NULL) {
596                         DP_INFO(edev, "Unicast MAC is not found\n");
597                         return -EINVAL;
598                 }
599                 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
600                 qdev->num_uc_addr--;
601         }
602
603         return 0;
604 }
605
606 static int
607 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
608                        uint32_t mc_addrs_num)
609 {
610         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
611         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
612         struct ecore_filter_mcast mcast;
613         struct qede_mcast_entry *m = NULL;
614         uint8_t i;
615         int rc;
616
617         for (i = 0; i < mc_addrs_num; i++) {
618                 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
619                                RTE_CACHE_LINE_SIZE);
620                 if (!m) {
621                         DP_ERR(edev, "Did not allocate memory for mcast\n");
622                         return -ENOMEM;
623                 }
624                 ether_addr_copy(&mc_addrs[i], &m->mac);
625                 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
626         }
627         memset(&mcast, 0, sizeof(mcast));
628         mcast.num_mc_addrs = mc_addrs_num;
629         mcast.opcode = ECORE_FILTER_ADD;
630         for (i = 0; i < mc_addrs_num; i++)
631                 ether_addr_copy(&mc_addrs[i], (struct ether_addr *)
632                                                         &mcast.mac[i]);
633         rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
634         if (rc != ECORE_SUCCESS) {
635                 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
636                 return -1;
637         }
638
639         return 0;
640 }
641
642 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
643 {
644         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
645         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
646         struct qede_mcast_entry *tmp = NULL;
647         struct ecore_filter_mcast mcast;
648         int j;
649         int rc;
650
651         memset(&mcast, 0, sizeof(mcast));
652         mcast.num_mc_addrs = qdev->num_mc_addr;
653         mcast.opcode = ECORE_FILTER_REMOVE;
654         j = 0;
655         SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
656                 ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]);
657                 j++;
658         }
659         rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
660         if (rc != ECORE_SUCCESS) {
661                 DP_ERR(edev, "Failed to delete multicast filter\n");
662                 return -1;
663         }
664         /* Init the list */
665         while (!SLIST_EMPTY(&qdev->mc_list_head)) {
666                 tmp = SLIST_FIRST(&qdev->mc_list_head);
667                 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
668         }
669         SLIST_INIT(&qdev->mc_list_head);
670
671         return 0;
672 }
673
674 enum _ecore_status_t
675 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
676                  bool add)
677 {
678         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
679         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
680         enum _ecore_status_t rc = ECORE_INVAL;
681
682         if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
683                 DP_ERR(edev, "Ucast filter table limit exceeded,"
684                               " Please enable promisc mode\n");
685                         return ECORE_INVAL;
686         }
687
688         rc = qede_ucast_filter(eth_dev, ucast, add);
689         if (rc == 0)
690                 rc = ecore_filter_ucast_cmd(edev, ucast,
691                                             ECORE_SPQ_MODE_CB, NULL);
692         /* Indicate error only for add filter operation.
693          * Delete filter operations are not severe.
694          */
695         if ((rc != ECORE_SUCCESS) && add)
696                 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
697                        rc, add);
698
699         return rc;
700 }
701
702 static int
703 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
704                   __rte_unused uint32_t index, __rte_unused uint32_t pool)
705 {
706         struct ecore_filter_ucast ucast;
707         int re;
708
709         if (!is_valid_assigned_ether_addr(mac_addr))
710                 return -EINVAL;
711
712         qede_set_ucast_cmn_params(&ucast);
713         ucast.opcode = ECORE_FILTER_ADD;
714         ucast.type = ECORE_FILTER_MAC;
715         ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
716         re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
717         return re;
718 }
719
720 static void
721 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
722 {
723         struct qede_dev *qdev = eth_dev->data->dev_private;
724         struct ecore_dev *edev = &qdev->edev;
725         struct ecore_filter_ucast ucast;
726
727         PMD_INIT_FUNC_TRACE(edev);
728
729         if (index >= qdev->dev_info.num_mac_filters) {
730                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
731                        index, qdev->dev_info.num_mac_filters);
732                 return;
733         }
734
735         if (!is_valid_assigned_ether_addr(&eth_dev->data->mac_addrs[index]))
736                 return;
737
738         qede_set_ucast_cmn_params(&ucast);
739         ucast.opcode = ECORE_FILTER_REMOVE;
740         ucast.type = ECORE_FILTER_MAC;
741
742         /* Use the index maintained by rte */
743         ether_addr_copy(&eth_dev->data->mac_addrs[index],
744                         (struct ether_addr *)&ucast.mac);
745
746         qede_mac_int_ops(eth_dev, &ucast, false);
747 }
748
749 static int
750 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
751 {
752         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
753         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
754
755         if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
756                                                mac_addr->addr_bytes)) {
757                 DP_ERR(edev, "Setting MAC address is not allowed\n");
758                 return -EPERM;
759         }
760
761         qede_mac_addr_remove(eth_dev, 0);
762
763         return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
764 }
765
766 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
767 {
768         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
769         struct ecore_sp_vport_update_params params;
770         struct ecore_hwfn *p_hwfn;
771         uint8_t i;
772         int rc;
773
774         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
775         params.vport_id = 0;
776         params.update_accept_any_vlan_flg = 1;
777         params.accept_any_vlan = flg;
778         for_each_hwfn(edev, i) {
779                 p_hwfn = &edev->hwfns[i];
780                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
781                 rc = ecore_sp_vport_update(p_hwfn, &params,
782                                 ECORE_SPQ_MODE_EBLOCK, NULL);
783                 if (rc != ECORE_SUCCESS) {
784                         DP_ERR(edev, "Failed to configure accept-any-vlan\n");
785                         return;
786                 }
787         }
788
789         DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
790 }
791
792 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
793 {
794         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
795         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
796         struct ecore_sp_vport_update_params params;
797         struct ecore_hwfn *p_hwfn;
798         uint8_t i;
799         int rc;
800
801         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
802         params.vport_id = 0;
803         params.update_inner_vlan_removal_flg = 1;
804         params.inner_vlan_removal_flg = flg;
805         for_each_hwfn(edev, i) {
806                 p_hwfn = &edev->hwfns[i];
807                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
808                 rc = ecore_sp_vport_update(p_hwfn, &params,
809                                 ECORE_SPQ_MODE_EBLOCK, NULL);
810                 if (rc != ECORE_SUCCESS) {
811                         DP_ERR(edev, "Failed to update vport\n");
812                         return -1;
813                 }
814         }
815
816         DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
817         return 0;
818 }
819
820 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
821                                 uint16_t vlan_id, int on)
822 {
823         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
824         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
825         struct qed_dev_eth_info *dev_info = &qdev->dev_info;
826         struct qede_vlan_entry *tmp = NULL;
827         struct qede_vlan_entry *vlan;
828         struct ecore_filter_ucast ucast;
829         int rc;
830
831         if (on) {
832                 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
833                         DP_ERR(edev, "Reached max VLAN filter limit"
834                                       " enabling accept_any_vlan\n");
835                         qede_config_accept_any_vlan(qdev, true);
836                         return 0;
837                 }
838
839                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
840                         if (tmp->vid == vlan_id) {
841                                 DP_INFO(edev, "VLAN %u already configured\n",
842                                         vlan_id);
843                                 return 0;
844                         }
845                 }
846
847                 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
848                                   RTE_CACHE_LINE_SIZE);
849
850                 if (!vlan) {
851                         DP_ERR(edev, "Did not allocate memory for VLAN\n");
852                         return -ENOMEM;
853                 }
854
855                 qede_set_ucast_cmn_params(&ucast);
856                 ucast.opcode = ECORE_FILTER_ADD;
857                 ucast.type = ECORE_FILTER_VLAN;
858                 ucast.vlan = vlan_id;
859                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
860                                             NULL);
861                 if (rc != 0) {
862                         DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
863                                rc);
864                         rte_free(vlan);
865                 } else {
866                         vlan->vid = vlan_id;
867                         SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
868                         qdev->configured_vlans++;
869                         DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
870                                 vlan_id, qdev->configured_vlans);
871                 }
872         } else {
873                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
874                         if (tmp->vid == vlan_id)
875                                 break;
876                 }
877
878                 if (!tmp) {
879                         if (qdev->configured_vlans == 0) {
880                                 DP_INFO(edev,
881                                         "No VLAN filters configured yet\n");
882                                 return 0;
883                         }
884
885                         DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
886                         return -EINVAL;
887                 }
888
889                 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
890
891                 qede_set_ucast_cmn_params(&ucast);
892                 ucast.opcode = ECORE_FILTER_REMOVE;
893                 ucast.type = ECORE_FILTER_VLAN;
894                 ucast.vlan = vlan_id;
895                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
896                                             NULL);
897                 if (rc != 0) {
898                         DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
899                                vlan_id, rc);
900                 } else {
901                         qdev->configured_vlans--;
902                         DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
903                                 vlan_id, qdev->configured_vlans);
904                 }
905         }
906
907         return rc;
908 }
909
910 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
911 {
912         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
913         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
914         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
915
916         if (mask & ETH_VLAN_STRIP_MASK) {
917                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
918                         (void)qede_vlan_stripping(eth_dev, 1);
919                 else
920                         (void)qede_vlan_stripping(eth_dev, 0);
921         }
922
923         if (mask & ETH_VLAN_FILTER_MASK) {
924                 /* VLAN filtering kicks in when a VLAN is added */
925                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
926                         qede_vlan_filter_set(eth_dev, 0, 1);
927                 } else {
928                         if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
929                                 DP_ERR(edev,
930                                   " Please remove existing VLAN filters"
931                                   " before disabling VLAN filtering\n");
932                                 /* Signal app that VLAN filtering is still
933                                  * enabled
934                                  */
935                                 eth_dev->data->dev_conf.rxmode.offloads |=
936                                                 DEV_RX_OFFLOAD_VLAN_FILTER;
937                         } else {
938                                 qede_vlan_filter_set(eth_dev, 0, 0);
939                         }
940                 }
941         }
942
943         if (mask & ETH_VLAN_EXTEND_MASK)
944                 DP_ERR(edev, "Extend VLAN not supported\n");
945
946         qdev->vlan_offload_mask = mask;
947
948         DP_INFO(edev, "VLAN offload mask %d\n", mask);
949
950         return 0;
951 }
952
953 static void qede_prandom_bytes(uint32_t *buff)
954 {
955         uint8_t i;
956
957         srand((unsigned int)time(NULL));
958         for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
959                 buff[i] = rand();
960 }
961
962 int qede_config_rss(struct rte_eth_dev *eth_dev)
963 {
964         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
965         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
966         uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
967         struct rte_eth_rss_reta_entry64 reta_conf[2];
968         struct rte_eth_rss_conf rss_conf;
969         uint32_t i, id, pos, q;
970
971         rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
972         if (!rss_conf.rss_key) {
973                 DP_INFO(edev, "Applying driver default key\n");
974                 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
975                 qede_prandom_bytes(&def_rss_key[0]);
976                 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
977         }
978
979         /* Configure RSS hash */
980         if (qede_rss_hash_update(eth_dev, &rss_conf))
981                 return -EINVAL;
982
983         /* Configure default RETA */
984         memset(reta_conf, 0, sizeof(reta_conf));
985         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
986                 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
987
988         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
989                 id = i / RTE_RETA_GROUP_SIZE;
990                 pos = i % RTE_RETA_GROUP_SIZE;
991                 q = i % QEDE_RSS_COUNT(qdev);
992                 reta_conf[id].reta[pos] = q;
993         }
994         if (qede_rss_reta_update(eth_dev, &reta_conf[0],
995                                  ECORE_RSS_IND_TABLE_SIZE))
996                 return -EINVAL;
997
998         return 0;
999 }
1000
1001 static void qede_fastpath_start(struct ecore_dev *edev)
1002 {
1003         struct ecore_hwfn *p_hwfn;
1004         int i;
1005
1006         for_each_hwfn(edev, i) {
1007                 p_hwfn = &edev->hwfns[i];
1008                 ecore_hw_start_fastpath(p_hwfn);
1009         }
1010 }
1011
1012 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1013 {
1014         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1015         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1016         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1017
1018         PMD_INIT_FUNC_TRACE(edev);
1019
1020         /* Update MTU only if it has changed */
1021         if (eth_dev->data->mtu != qdev->mtu) {
1022                 if (qede_update_mtu(eth_dev, qdev->mtu))
1023                         goto err;
1024         }
1025
1026         /* Configure TPA parameters */
1027         if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1028                 if (qede_enable_tpa(eth_dev, true))
1029                         return -EINVAL;
1030                 /* Enable scatter mode for LRO */
1031                 if (!eth_dev->data->scattered_rx)
1032                         rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
1033         }
1034
1035         /* Start queues */
1036         if (qede_start_queues(eth_dev))
1037                 goto err;
1038
1039         if (IS_PF(edev))
1040                 qede_reset_queue_stats(qdev, true);
1041
1042         /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1043          * enabling RSS. Hence RSS configuration is deferred upto this point.
1044          * Also, we would like to retain similar behavior in PF case, so we
1045          * don't do PF/VF specific check here.
1046          */
1047         if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1048                 if (qede_config_rss(eth_dev))
1049                         goto err;
1050
1051         /* Enable vport*/
1052         if (qede_activate_vport(eth_dev, true))
1053                 goto err;
1054
1055         /* Update link status */
1056         qede_link_update(eth_dev, 0);
1057
1058         /* Start/resume traffic */
1059         qede_fastpath_start(edev);
1060
1061         DP_INFO(edev, "Device started\n");
1062
1063         return 0;
1064 err:
1065         DP_ERR(edev, "Device start fails\n");
1066         return -1; /* common error code is < 0 */
1067 }
1068
1069 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1070 {
1071         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1072         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1073
1074         PMD_INIT_FUNC_TRACE(edev);
1075
1076         /* Disable vport */
1077         if (qede_activate_vport(eth_dev, false))
1078                 return;
1079
1080         if (qdev->enable_lro)
1081                 qede_enable_tpa(eth_dev, false);
1082
1083         /* Stop queues */
1084         qede_stop_queues(eth_dev);
1085
1086         /* Disable traffic */
1087         ecore_hw_stop_fastpath(edev); /* TBD - loop */
1088
1089         DP_INFO(edev, "Device is stopped\n");
1090 }
1091
1092 static const char * const valid_args[] = {
1093         QEDE_NPAR_TX_SWITCHING,
1094         QEDE_VF_TX_SWITCHING,
1095         NULL,
1096 };
1097
1098 static int qede_args_check(const char *key, const char *val, void *opaque)
1099 {
1100         unsigned long tmp;
1101         int ret = 0;
1102         struct rte_eth_dev *eth_dev = opaque;
1103         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1104         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1105
1106         errno = 0;
1107         tmp = strtoul(val, NULL, 0);
1108         if (errno) {
1109                 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1110                 return errno;
1111         }
1112
1113         if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
1114             ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
1115                 qdev->enable_tx_switching = !!tmp;
1116                 DP_INFO(edev, "Disabling %s tx-switching\n",
1117                         strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
1118                         "VF" : "NPAR");
1119         }
1120
1121         return ret;
1122 }
1123
1124 static int qede_args(struct rte_eth_dev *eth_dev)
1125 {
1126         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1127         struct rte_kvargs *kvlist;
1128         struct rte_devargs *devargs;
1129         int ret;
1130         int i;
1131
1132         devargs = pci_dev->device.devargs;
1133         if (!devargs)
1134                 return 0; /* return success */
1135
1136         kvlist = rte_kvargs_parse(devargs->args, valid_args);
1137         if (kvlist == NULL)
1138                 return -EINVAL;
1139
1140          /* Process parameters. */
1141         for (i = 0; (valid_args[i] != NULL); ++i) {
1142                 if (rte_kvargs_count(kvlist, valid_args[i])) {
1143                         ret = rte_kvargs_process(kvlist, valid_args[i],
1144                                                  qede_args_check, eth_dev);
1145                         if (ret != ECORE_SUCCESS) {
1146                                 rte_kvargs_free(kvlist);
1147                                 return ret;
1148                         }
1149                 }
1150         }
1151         rte_kvargs_free(kvlist);
1152
1153         return 0;
1154 }
1155
1156 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1157 {
1158         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1159         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1160         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1161         int ret;
1162
1163         PMD_INIT_FUNC_TRACE(edev);
1164
1165         /* Check requirements for 100G mode */
1166         if (ECORE_IS_CMT(edev)) {
1167                 if (eth_dev->data->nb_rx_queues < 2 ||
1168                     eth_dev->data->nb_tx_queues < 2) {
1169                         DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1170                         return -EINVAL;
1171                 }
1172
1173                 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1174                     (eth_dev->data->nb_tx_queues % 2 != 0)) {
1175                         DP_ERR(edev,
1176                                "100G mode needs even no. of RX/TX queues\n");
1177                         return -EINVAL;
1178                 }
1179         }
1180
1181         /* We need to have min 1 RX queue.There is no min check in
1182          * rte_eth_dev_configure(), so we are checking it here.
1183          */
1184         if (eth_dev->data->nb_rx_queues == 0) {
1185                 DP_ERR(edev, "Minimum one RX queue is required\n");
1186                 return -EINVAL;
1187         }
1188
1189         /* Enable Tx switching by default */
1190         qdev->enable_tx_switching = 1;
1191
1192         /* Parse devargs and fix up rxmode */
1193         if (qede_args(eth_dev))
1194                 DP_NOTICE(edev, false,
1195                           "Invalid devargs supplied, requested change will not take effect\n");
1196
1197         if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
1198               rxmode->mq_mode == ETH_MQ_RX_RSS)) {
1199                 DP_ERR(edev, "Unsupported multi-queue mode\n");
1200                 return -ENOTSUP;
1201         }
1202         /* Flow director mode check */
1203         if (qede_check_fdir_support(eth_dev))
1204                 return -ENOTSUP;
1205
1206         qede_dealloc_fp_resc(eth_dev);
1207         qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1208         qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1209         if (qede_alloc_fp_resc(qdev))
1210                 return -ENOMEM;
1211
1212         /* If jumbo enabled adjust MTU */
1213         if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1214                 eth_dev->data->mtu =
1215                         eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1216                         ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
1217
1218         if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
1219                 eth_dev->data->scattered_rx = 1;
1220
1221         if (qede_start_vport(qdev, eth_dev->data->mtu))
1222                 return -1;
1223
1224         qdev->mtu = eth_dev->data->mtu;
1225
1226         /* Enable VLAN offloads by default */
1227         ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
1228                                              ETH_VLAN_FILTER_MASK);
1229         if (ret)
1230                 return ret;
1231
1232         DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1233                         QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1234
1235         return 0;
1236 }
1237
1238 /* Info about HW descriptor ring limitations */
1239 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1240         .nb_max = 0x8000, /* 32K */
1241         .nb_min = 128,
1242         .nb_align = 128 /* lowest common multiple */
1243 };
1244
1245 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1246         .nb_max = 0x8000, /* 32K */
1247         .nb_min = 256,
1248         .nb_align = 256,
1249         .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1250         .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1251 };
1252
1253 static void
1254 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1255                   struct rte_eth_dev_info *dev_info)
1256 {
1257         struct qede_dev *qdev = eth_dev->data->dev_private;
1258         struct ecore_dev *edev = &qdev->edev;
1259         struct qed_link_output link;
1260         uint32_t speed_cap = 0;
1261
1262         PMD_INIT_FUNC_TRACE(edev);
1263
1264         dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1265         dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1266         dev_info->rx_desc_lim = qede_rx_desc_lim;
1267         dev_info->tx_desc_lim = qede_tx_desc_lim;
1268
1269         if (IS_PF(edev))
1270                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1271                         QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1272         else
1273                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1274                         QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1275         dev_info->max_tx_queues = dev_info->max_rx_queues;
1276
1277         dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1278         dev_info->max_vfs = 0;
1279         dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1280         dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1281         dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1282         dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM  |
1283                                      DEV_RX_OFFLOAD_UDP_CKSUM   |
1284                                      DEV_RX_OFFLOAD_TCP_CKSUM   |
1285                                      DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1286                                      DEV_RX_OFFLOAD_TCP_LRO     |
1287                                      DEV_RX_OFFLOAD_KEEP_CRC    |
1288                                      DEV_RX_OFFLOAD_SCATTER     |
1289                                      DEV_RX_OFFLOAD_JUMBO_FRAME |
1290                                      DEV_RX_OFFLOAD_VLAN_FILTER |
1291                                      DEV_RX_OFFLOAD_VLAN_STRIP);
1292         dev_info->rx_queue_offload_capa = 0;
1293
1294         /* TX offloads are on a per-packet basis, so it is applicable
1295          * to both at port and queue levels.
1296          */
1297         dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1298                                      DEV_TX_OFFLOAD_IPV4_CKSUM  |
1299                                      DEV_TX_OFFLOAD_UDP_CKSUM   |
1300                                      DEV_TX_OFFLOAD_TCP_CKSUM   |
1301                                      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1302                                      DEV_TX_OFFLOAD_MULTI_SEGS  |
1303                                      DEV_TX_OFFLOAD_TCP_TSO     |
1304                                      DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1305                                      DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1306         dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
1307
1308         dev_info->default_txconf = (struct rte_eth_txconf) {
1309                 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
1310         };
1311
1312         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1313                 /* Packets are always dropped if no descriptors are available */
1314                 .rx_drop_en = 1,
1315                 .offloads = 0,
1316         };
1317
1318         memset(&link, 0, sizeof(struct qed_link_output));
1319         qdev->ops->common->get_link(edev, &link);
1320         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1321                 speed_cap |= ETH_LINK_SPEED_1G;
1322         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1323                 speed_cap |= ETH_LINK_SPEED_10G;
1324         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1325                 speed_cap |= ETH_LINK_SPEED_25G;
1326         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1327                 speed_cap |= ETH_LINK_SPEED_40G;
1328         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1329                 speed_cap |= ETH_LINK_SPEED_50G;
1330         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1331                 speed_cap |= ETH_LINK_SPEED_100G;
1332         dev_info->speed_capa = speed_cap;
1333 }
1334
1335 /* return 0 means link status changed, -1 means not changed */
1336 int
1337 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1338 {
1339         struct qede_dev *qdev = eth_dev->data->dev_private;
1340         struct ecore_dev *edev = &qdev->edev;
1341         struct qed_link_output q_link;
1342         struct rte_eth_link link;
1343         uint16_t link_duplex;
1344
1345         memset(&q_link, 0, sizeof(q_link));
1346         memset(&link, 0, sizeof(link));
1347
1348         qdev->ops->common->get_link(edev, &q_link);
1349
1350         /* Link Speed */
1351         link.link_speed = q_link.speed;
1352
1353         /* Link Mode */
1354         switch (q_link.duplex) {
1355         case QEDE_DUPLEX_HALF:
1356                 link_duplex = ETH_LINK_HALF_DUPLEX;
1357                 break;
1358         case QEDE_DUPLEX_FULL:
1359                 link_duplex = ETH_LINK_FULL_DUPLEX;
1360                 break;
1361         case QEDE_DUPLEX_UNKNOWN:
1362         default:
1363                 link_duplex = -1;
1364         }
1365         link.link_duplex = link_duplex;
1366
1367         /* Link Status */
1368         link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1369
1370         /* AN */
1371         link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1372                              ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1373
1374         DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1375                 link.link_speed, link.link_duplex,
1376                 link.link_autoneg, link.link_status);
1377
1378         return rte_eth_linkstatus_set(eth_dev, &link);
1379 }
1380
1381 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1382 {
1383         struct qede_dev *qdev = eth_dev->data->dev_private;
1384         struct ecore_dev *edev = &qdev->edev;
1385         enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1386
1387         PMD_INIT_FUNC_TRACE(edev);
1388
1389         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1390                 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1391
1392         qed_configure_filter_rx_mode(eth_dev, type);
1393 }
1394
1395 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1396 {
1397         struct qede_dev *qdev = eth_dev->data->dev_private;
1398         struct ecore_dev *edev = &qdev->edev;
1399
1400         PMD_INIT_FUNC_TRACE(edev);
1401
1402         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1403                 qed_configure_filter_rx_mode(eth_dev,
1404                                 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1405         else
1406                 qed_configure_filter_rx_mode(eth_dev,
1407                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1408 }
1409
1410 static void qede_poll_sp_sb_cb(void *param)
1411 {
1412         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1413         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1414         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1415         int rc;
1416
1417         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1418         qede_interrupt_action(&edev->hwfns[1]);
1419
1420         rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
1421                                qede_poll_sp_sb_cb,
1422                                (void *)eth_dev);
1423         if (rc != 0) {
1424                 DP_ERR(edev, "Unable to start periodic"
1425                              " timer rc %d\n", rc);
1426                 assert(false && "Unable to start periodic timer");
1427         }
1428 }
1429
1430 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1431 {
1432         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1433         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1434         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1435
1436         PMD_INIT_FUNC_TRACE(edev);
1437
1438         /* dev_stop() shall cleanup fp resources in hw but without releasing
1439          * dma memories and sw structures so that dev_start() can be called
1440          * by the app without reconfiguration. However, in dev_close() we
1441          * can release all the resources and device can be brought up newly
1442          */
1443         if (eth_dev->data->dev_started)
1444                 qede_dev_stop(eth_dev);
1445
1446         qede_stop_vport(edev);
1447         qdev->vport_started = false;
1448         qede_fdir_dealloc_resc(eth_dev);
1449         qede_dealloc_fp_resc(eth_dev);
1450
1451         eth_dev->data->nb_rx_queues = 0;
1452         eth_dev->data->nb_tx_queues = 0;
1453
1454         /* Bring the link down */
1455         qede_dev_set_link_state(eth_dev, false);
1456         qdev->ops->common->slowpath_stop(edev);
1457         qdev->ops->common->remove(edev);
1458         rte_intr_disable(&pci_dev->intr_handle);
1459
1460         switch (pci_dev->intr_handle.type) {
1461         case RTE_INTR_HANDLE_UIO_INTX:
1462         case RTE_INTR_HANDLE_VFIO_LEGACY:
1463                 rte_intr_callback_unregister(&pci_dev->intr_handle,
1464                                              qede_interrupt_handler_intx,
1465                                              (void *)eth_dev);
1466                 break;
1467         default:
1468                 rte_intr_callback_unregister(&pci_dev->intr_handle,
1469                                            qede_interrupt_handler,
1470                                            (void *)eth_dev);
1471         }
1472
1473         if (ECORE_IS_CMT(edev))
1474                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1475 }
1476
1477 static int
1478 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1479 {
1480         struct qede_dev *qdev = eth_dev->data->dev_private;
1481         struct ecore_dev *edev = &qdev->edev;
1482         struct ecore_eth_stats stats;
1483         unsigned int i = 0, j = 0, qid;
1484         unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1485         struct qede_tx_queue *txq;
1486
1487         ecore_get_vport_stats(edev, &stats);
1488
1489         /* RX Stats */
1490         eth_stats->ipackets = stats.common.rx_ucast_pkts +
1491             stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1492
1493         eth_stats->ibytes = stats.common.rx_ucast_bytes +
1494             stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1495
1496         eth_stats->ierrors = stats.common.rx_crc_errors +
1497             stats.common.rx_align_errors +
1498             stats.common.rx_carrier_errors +
1499             stats.common.rx_oversize_packets +
1500             stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1501
1502         eth_stats->rx_nombuf = stats.common.no_buff_discards;
1503
1504         eth_stats->imissed = stats.common.mftag_filter_discards +
1505             stats.common.mac_filter_discards +
1506             stats.common.no_buff_discards +
1507             stats.common.brb_truncates + stats.common.brb_discards;
1508
1509         /* TX stats */
1510         eth_stats->opackets = stats.common.tx_ucast_pkts +
1511             stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1512
1513         eth_stats->obytes = stats.common.tx_ucast_bytes +
1514             stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1515
1516         eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1517
1518         /* Queue stats */
1519         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1520                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1521         txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1522                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1523         if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1524             (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1525                 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1526                        "Not all the queue stats will be displayed. Set"
1527                        " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1528                        " appropriately and retry.\n");
1529
1530         for_each_rss(qid) {
1531                 eth_stats->q_ipackets[i] =
1532                         *(uint64_t *)(
1533                                 ((char *)(qdev->fp_array[qid].rxq)) +
1534                                 offsetof(struct qede_rx_queue,
1535                                 rcv_pkts));
1536                 eth_stats->q_errors[i] =
1537                         *(uint64_t *)(
1538                                 ((char *)(qdev->fp_array[qid].rxq)) +
1539                                 offsetof(struct qede_rx_queue,
1540                                 rx_hw_errors)) +
1541                         *(uint64_t *)(
1542                                 ((char *)(qdev->fp_array[qid].rxq)) +
1543                                 offsetof(struct qede_rx_queue,
1544                                 rx_alloc_errors));
1545                 i++;
1546                 if (i == rxq_stat_cntrs)
1547                         break;
1548         }
1549
1550         for_each_tss(qid) {
1551                 txq = qdev->fp_array[qid].txq;
1552                 eth_stats->q_opackets[j] =
1553                         *((uint64_t *)(uintptr_t)
1554                                 (((uint64_t)(uintptr_t)(txq)) +
1555                                  offsetof(struct qede_tx_queue,
1556                                           xmit_pkts)));
1557                 j++;
1558                 if (j == txq_stat_cntrs)
1559                         break;
1560         }
1561
1562         return 0;
1563 }
1564
1565 static unsigned
1566 qede_get_xstats_count(struct qede_dev *qdev) {
1567         if (ECORE_IS_BB(&qdev->edev))
1568                 return RTE_DIM(qede_xstats_strings) +
1569                        RTE_DIM(qede_bb_xstats_strings) +
1570                        (RTE_DIM(qede_rxq_xstats_strings) *
1571                         RTE_MIN(QEDE_RSS_COUNT(qdev),
1572                                 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1573         else
1574                 return RTE_DIM(qede_xstats_strings) +
1575                        RTE_DIM(qede_ah_xstats_strings) +
1576                        (RTE_DIM(qede_rxq_xstats_strings) *
1577                         RTE_MIN(QEDE_RSS_COUNT(qdev),
1578                                 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1579 }
1580
1581 static int
1582 qede_get_xstats_names(struct rte_eth_dev *dev,
1583                       struct rte_eth_xstat_name *xstats_names,
1584                       __rte_unused unsigned int limit)
1585 {
1586         struct qede_dev *qdev = dev->data->dev_private;
1587         struct ecore_dev *edev = &qdev->edev;
1588         const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1589         unsigned int i, qid, stat_idx = 0;
1590         unsigned int rxq_stat_cntrs;
1591
1592         if (xstats_names != NULL) {
1593                 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1594                         snprintf(xstats_names[stat_idx].name,
1595                                 sizeof(xstats_names[stat_idx].name),
1596                                 "%s",
1597                                 qede_xstats_strings[i].name);
1598                         stat_idx++;
1599                 }
1600
1601                 if (ECORE_IS_BB(edev)) {
1602                         for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1603                                 snprintf(xstats_names[stat_idx].name,
1604                                         sizeof(xstats_names[stat_idx].name),
1605                                         "%s",
1606                                         qede_bb_xstats_strings[i].name);
1607                                 stat_idx++;
1608                         }
1609                 } else {
1610                         for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1611                                 snprintf(xstats_names[stat_idx].name,
1612                                         sizeof(xstats_names[stat_idx].name),
1613                                         "%s",
1614                                         qede_ah_xstats_strings[i].name);
1615                                 stat_idx++;
1616                         }
1617                 }
1618
1619                 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1620                                          RTE_ETHDEV_QUEUE_STAT_CNTRS);
1621                 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1622                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1623                                 snprintf(xstats_names[stat_idx].name,
1624                                         sizeof(xstats_names[stat_idx].name),
1625                                         "%.4s%d%s",
1626                                         qede_rxq_xstats_strings[i].name, qid,
1627                                         qede_rxq_xstats_strings[i].name + 4);
1628                                 stat_idx++;
1629                         }
1630                 }
1631         }
1632
1633         return stat_cnt;
1634 }
1635
1636 static int
1637 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1638                 unsigned int n)
1639 {
1640         struct qede_dev *qdev = dev->data->dev_private;
1641         struct ecore_dev *edev = &qdev->edev;
1642         struct ecore_eth_stats stats;
1643         const unsigned int num = qede_get_xstats_count(qdev);
1644         unsigned int i, qid, stat_idx = 0;
1645         unsigned int rxq_stat_cntrs;
1646
1647         if (n < num)
1648                 return num;
1649
1650         ecore_get_vport_stats(edev, &stats);
1651
1652         for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1653                 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1654                                              qede_xstats_strings[i].offset);
1655                 xstats[stat_idx].id = stat_idx;
1656                 stat_idx++;
1657         }
1658
1659         if (ECORE_IS_BB(edev)) {
1660                 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1661                         xstats[stat_idx].value =
1662                                         *(uint64_t *)(((char *)&stats) +
1663                                         qede_bb_xstats_strings[i].offset);
1664                         xstats[stat_idx].id = stat_idx;
1665                         stat_idx++;
1666                 }
1667         } else {
1668                 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1669                         xstats[stat_idx].value =
1670                                         *(uint64_t *)(((char *)&stats) +
1671                                         qede_ah_xstats_strings[i].offset);
1672                         xstats[stat_idx].id = stat_idx;
1673                         stat_idx++;
1674                 }
1675         }
1676
1677         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1678                                  RTE_ETHDEV_QUEUE_STAT_CNTRS);
1679         for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1680                 for_each_rss(qid) {
1681                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1682                                 xstats[stat_idx].value = *(uint64_t *)(
1683                                         ((char *)(qdev->fp_array[qid].rxq)) +
1684                                          qede_rxq_xstats_strings[i].offset);
1685                                 xstats[stat_idx].id = stat_idx;
1686                                 stat_idx++;
1687                         }
1688                 }
1689         }
1690
1691         return stat_idx;
1692 }
1693
1694 static void
1695 qede_reset_xstats(struct rte_eth_dev *dev)
1696 {
1697         struct qede_dev *qdev = dev->data->dev_private;
1698         struct ecore_dev *edev = &qdev->edev;
1699
1700         ecore_reset_vport_stats(edev);
1701         qede_reset_queue_stats(qdev, true);
1702 }
1703
1704 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1705 {
1706         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1707         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1708         struct qed_link_params link_params;
1709         int rc;
1710
1711         DP_INFO(edev, "setting link state %d\n", link_up);
1712         memset(&link_params, 0, sizeof(link_params));
1713         link_params.link_up = link_up;
1714         rc = qdev->ops->common->set_link(edev, &link_params);
1715         if (rc != ECORE_SUCCESS)
1716                 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1717
1718         return rc;
1719 }
1720
1721 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1722 {
1723         return qede_dev_set_link_state(eth_dev, true);
1724 }
1725
1726 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1727 {
1728         return qede_dev_set_link_state(eth_dev, false);
1729 }
1730
1731 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1732 {
1733         struct qede_dev *qdev = eth_dev->data->dev_private;
1734         struct ecore_dev *edev = &qdev->edev;
1735
1736         ecore_reset_vport_stats(edev);
1737         qede_reset_queue_stats(qdev, false);
1738 }
1739
1740 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1741 {
1742         enum qed_filter_rx_mode_type type =
1743             QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1744
1745         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1746                 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1747
1748         qed_configure_filter_rx_mode(eth_dev, type);
1749 }
1750
1751 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1752 {
1753         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1754                 qed_configure_filter_rx_mode(eth_dev,
1755                                 QED_FILTER_RX_MODE_TYPE_PROMISC);
1756         else
1757                 qed_configure_filter_rx_mode(eth_dev,
1758                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1759 }
1760
1761 static int
1762 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
1763                       uint32_t mc_addrs_num)
1764 {
1765         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1766         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1767         uint8_t i;
1768
1769         if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
1770                 DP_ERR(edev, "Reached max multicast filters limit,"
1771                              "Please enable multicast promisc mode\n");
1772                 return -ENOSPC;
1773         }
1774
1775         for (i = 0; i < mc_addrs_num; i++) {
1776                 if (!is_multicast_ether_addr(&mc_addrs[i])) {
1777                         DP_ERR(edev, "Not a valid multicast MAC\n");
1778                         return -EINVAL;
1779                 }
1780         }
1781
1782         /* Flush all existing entries */
1783         if (qede_del_mcast_filters(eth_dev))
1784                 return -1;
1785
1786         /* Set new mcast list */
1787         return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
1788 }
1789
1790 /* Update MTU via vport-update without doing port restart.
1791  * The vport must be deactivated before calling this API.
1792  */
1793 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
1794 {
1795         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1796         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1797         struct ecore_hwfn *p_hwfn;
1798         int rc;
1799         int i;
1800
1801         if (IS_PF(edev)) {
1802                 struct ecore_sp_vport_update_params params;
1803
1804                 memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
1805                 params.vport_id = 0;
1806                 params.mtu = mtu;
1807                 params.vport_id = 0;
1808                 for_each_hwfn(edev, i) {
1809                         p_hwfn = &edev->hwfns[i];
1810                         params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1811                         rc = ecore_sp_vport_update(p_hwfn, &params,
1812                                         ECORE_SPQ_MODE_EBLOCK, NULL);
1813                         if (rc != ECORE_SUCCESS)
1814                                 goto err;
1815                 }
1816         } else {
1817                 for_each_hwfn(edev, i) {
1818                         p_hwfn = &edev->hwfns[i];
1819                         rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
1820                         if (rc == ECORE_INVAL) {
1821                                 DP_INFO(edev, "VF MTU Update TLV not supported\n");
1822                                 /* Recreate vport */
1823                                 rc = qede_start_vport(qdev, mtu);
1824                                 if (rc != ECORE_SUCCESS)
1825                                         goto err;
1826
1827                                 /* Restore config lost due to vport stop */
1828                                 if (eth_dev->data->promiscuous)
1829                                         qede_promiscuous_enable(eth_dev);
1830                                 else
1831                                         qede_promiscuous_disable(eth_dev);
1832
1833                                 if (eth_dev->data->all_multicast)
1834                                         qede_allmulticast_enable(eth_dev);
1835                                 else
1836                                         qede_allmulticast_disable(eth_dev);
1837
1838                                 qede_vlan_offload_set(eth_dev,
1839                                                       qdev->vlan_offload_mask);
1840                         } else if (rc != ECORE_SUCCESS) {
1841                                 goto err;
1842                         }
1843                 }
1844         }
1845         DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
1846
1847         return 0;
1848
1849 err:
1850         DP_ERR(edev, "Failed to update MTU\n");
1851         return -1;
1852 }
1853
1854 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1855                               struct rte_eth_fc_conf *fc_conf)
1856 {
1857         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1858         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1859         struct qed_link_output current_link;
1860         struct qed_link_params params;
1861
1862         memset(&current_link, 0, sizeof(current_link));
1863         qdev->ops->common->get_link(edev, &current_link);
1864
1865         memset(&params, 0, sizeof(params));
1866         params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1867         if (fc_conf->autoneg) {
1868                 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1869                         DP_ERR(edev, "Autoneg not supported\n");
1870                         return -EINVAL;
1871                 }
1872                 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1873         }
1874
1875         /* Pause is assumed to be supported (SUPPORTED_Pause) */
1876         if (fc_conf->mode == RTE_FC_FULL)
1877                 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1878                                         QED_LINK_PAUSE_RX_ENABLE);
1879         if (fc_conf->mode == RTE_FC_TX_PAUSE)
1880                 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1881         if (fc_conf->mode == RTE_FC_RX_PAUSE)
1882                 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1883
1884         params.link_up = true;
1885         (void)qdev->ops->common->set_link(edev, &params);
1886
1887         return 0;
1888 }
1889
1890 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1891                               struct rte_eth_fc_conf *fc_conf)
1892 {
1893         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1894         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1895         struct qed_link_output current_link;
1896
1897         memset(&current_link, 0, sizeof(current_link));
1898         qdev->ops->common->get_link(edev, &current_link);
1899
1900         if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1901                 fc_conf->autoneg = true;
1902
1903         if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1904                                          QED_LINK_PAUSE_TX_ENABLE))
1905                 fc_conf->mode = RTE_FC_FULL;
1906         else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1907                 fc_conf->mode = RTE_FC_RX_PAUSE;
1908         else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1909                 fc_conf->mode = RTE_FC_TX_PAUSE;
1910         else
1911                 fc_conf->mode = RTE_FC_NONE;
1912
1913         return 0;
1914 }
1915
1916 static const uint32_t *
1917 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1918 {
1919         static const uint32_t ptypes[] = {
1920                 RTE_PTYPE_L2_ETHER,
1921                 RTE_PTYPE_L2_ETHER_VLAN,
1922                 RTE_PTYPE_L3_IPV4,
1923                 RTE_PTYPE_L3_IPV6,
1924                 RTE_PTYPE_L4_TCP,
1925                 RTE_PTYPE_L4_UDP,
1926                 RTE_PTYPE_TUNNEL_VXLAN,
1927                 RTE_PTYPE_L4_FRAG,
1928                 RTE_PTYPE_TUNNEL_GENEVE,
1929                 RTE_PTYPE_TUNNEL_GRE,
1930                 /* Inner */
1931                 RTE_PTYPE_INNER_L2_ETHER,
1932                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1933                 RTE_PTYPE_INNER_L3_IPV4,
1934                 RTE_PTYPE_INNER_L3_IPV6,
1935                 RTE_PTYPE_INNER_L4_TCP,
1936                 RTE_PTYPE_INNER_L4_UDP,
1937                 RTE_PTYPE_INNER_L4_FRAG,
1938                 RTE_PTYPE_UNKNOWN
1939         };
1940
1941         if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1942                 return ptypes;
1943
1944         return NULL;
1945 }
1946
1947 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1948 {
1949         *rss_caps = 0;
1950         *rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
1951         *rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
1952         *rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
1953         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
1954         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
1955         *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
1956         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
1957         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
1958 }
1959
1960 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1961                          struct rte_eth_rss_conf *rss_conf)
1962 {
1963         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1964         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1965         struct ecore_sp_vport_update_params vport_update_params;
1966         struct ecore_rss_params rss_params;
1967         struct ecore_hwfn *p_hwfn;
1968         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1969         uint64_t hf = rss_conf->rss_hf;
1970         uint8_t len = rss_conf->rss_key_len;
1971         uint8_t idx;
1972         uint8_t i;
1973         int rc;
1974
1975         memset(&vport_update_params, 0, sizeof(vport_update_params));
1976         memset(&rss_params, 0, sizeof(rss_params));
1977
1978         DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
1979                 (unsigned long)hf, len, key);
1980
1981         if (hf != 0) {
1982                 /* Enabling RSS */
1983                 DP_INFO(edev, "Enabling rss\n");
1984
1985                 /* RSS caps */
1986                 qede_init_rss_caps(&rss_params.rss_caps, hf);
1987                 rss_params.update_rss_capabilities = 1;
1988
1989                 /* RSS hash key */
1990                 if (key) {
1991                         if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
1992                                 DP_ERR(edev, "RSS key length exceeds limit\n");
1993                                 return -EINVAL;
1994                         }
1995                         DP_INFO(edev, "Applying user supplied hash key\n");
1996                         rss_params.update_rss_key = 1;
1997                         memcpy(&rss_params.rss_key, key, len);
1998                 }
1999                 rss_params.rss_enable = 1;
2000         }
2001
2002         rss_params.update_rss_config = 1;
2003         /* tbl_size has to be set with capabilities */
2004         rss_params.rss_table_size_log = 7;
2005         vport_update_params.vport_id = 0;
2006         /* pass the L2 handles instead of qids */
2007         for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
2008                 idx = i % QEDE_RSS_COUNT(qdev);
2009                 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
2010         }
2011         vport_update_params.rss_params = &rss_params;
2012
2013         for_each_hwfn(edev, i) {
2014                 p_hwfn = &edev->hwfns[i];
2015                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2016                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2017                                            ECORE_SPQ_MODE_EBLOCK, NULL);
2018                 if (rc) {
2019                         DP_ERR(edev, "vport-update for RSS failed\n");
2020                         return rc;
2021                 }
2022         }
2023         qdev->rss_enable = rss_params.rss_enable;
2024
2025         /* Update local structure for hash query */
2026         qdev->rss_conf.rss_hf = hf;
2027         qdev->rss_conf.rss_key_len = len;
2028         if (qdev->rss_enable) {
2029                 if  (qdev->rss_conf.rss_key == NULL) {
2030                         qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2031                         if (qdev->rss_conf.rss_key == NULL) {
2032                                 DP_ERR(edev, "No memory to store RSS key\n");
2033                                 return -ENOMEM;
2034                         }
2035                 }
2036                 if (key && len) {
2037                         DP_INFO(edev, "Storing RSS key\n");
2038                         memcpy(qdev->rss_conf.rss_key, key, len);
2039                 }
2040         } else if (!qdev->rss_enable && len == 0) {
2041                 if (qdev->rss_conf.rss_key) {
2042                         free(qdev->rss_conf.rss_key);
2043                         qdev->rss_conf.rss_key = NULL;
2044                         DP_INFO(edev, "Free RSS key\n");
2045                 }
2046         }
2047
2048         return 0;
2049 }
2050
2051 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2052                            struct rte_eth_rss_conf *rss_conf)
2053 {
2054         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2055
2056         rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2057         rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2058
2059         if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2060                 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2061                        rss_conf->rss_key_len);
2062         return 0;
2063 }
2064
2065 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2066                                     struct ecore_rss_params *rss)
2067 {
2068         int i, fn;
2069         bool rss_mode = 1; /* enable */
2070         struct ecore_queue_cid *cid;
2071         struct ecore_rss_params *t_rss;
2072
2073         /* In regular scenario, we'd simply need to take input handlers.
2074          * But in CMT, we'd have to split the handlers according to the
2075          * engine they were configured on. We'd then have to understand
2076          * whether RSS is really required, since 2-queues on CMT doesn't
2077          * require RSS.
2078          */
2079
2080         /* CMT should be round-robin */
2081         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2082                 cid = rss->rss_ind_table[i];
2083
2084                 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2085                         t_rss = &rss[0];
2086                 else
2087                         t_rss = &rss[1];
2088
2089                 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2090         }
2091
2092         t_rss = &rss[1];
2093         t_rss->update_rss_ind_table = 1;
2094         t_rss->rss_table_size_log = 7;
2095         t_rss->update_rss_config = 1;
2096
2097         /* Make sure RSS is actually required */
2098         for_each_hwfn(edev, fn) {
2099                 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2100                      i++) {
2101                         if (rss[fn].rss_ind_table[i] !=
2102                             rss[fn].rss_ind_table[0])
2103                                 break;
2104                 }
2105
2106                 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2107                         DP_INFO(edev,
2108                                 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2109                         rss_mode = 0;
2110                         goto out;
2111                 }
2112         }
2113
2114 out:
2115         t_rss->rss_enable = rss_mode;
2116
2117         return rss_mode;
2118 }
2119
2120 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2121                          struct rte_eth_rss_reta_entry64 *reta_conf,
2122                          uint16_t reta_size)
2123 {
2124         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2125         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2126         struct ecore_sp_vport_update_params vport_update_params;
2127         struct ecore_rss_params *params;
2128         struct ecore_hwfn *p_hwfn;
2129         uint16_t i, idx, shift;
2130         uint8_t entry;
2131         int rc = 0;
2132
2133         if (reta_size > ETH_RSS_RETA_SIZE_128) {
2134                 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2135                        reta_size);
2136                 return -EINVAL;
2137         }
2138
2139         memset(&vport_update_params, 0, sizeof(vport_update_params));
2140         params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2141                              RTE_CACHE_LINE_SIZE);
2142         if (params == NULL) {
2143                 DP_ERR(edev, "failed to allocate memory\n");
2144                 return -ENOMEM;
2145         }
2146
2147         for (i = 0; i < reta_size; i++) {
2148                 idx = i / RTE_RETA_GROUP_SIZE;
2149                 shift = i % RTE_RETA_GROUP_SIZE;
2150                 if (reta_conf[idx].mask & (1ULL << shift)) {
2151                         entry = reta_conf[idx].reta[shift];
2152                         /* Pass rxq handles to ecore */
2153                         params->rss_ind_table[i] =
2154                                         qdev->fp_array[entry].rxq->handle;
2155                         /* Update the local copy for RETA query command */
2156                         qdev->rss_ind_table[i] = entry;
2157                 }
2158         }
2159
2160         params->update_rss_ind_table = 1;
2161         params->rss_table_size_log = 7;
2162         params->update_rss_config = 1;
2163
2164         /* Fix up RETA for CMT mode device */
2165         if (ECORE_IS_CMT(edev))
2166                 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2167                                                             params);
2168         vport_update_params.vport_id = 0;
2169         /* Use the current value of rss_enable */
2170         params->rss_enable = qdev->rss_enable;
2171         vport_update_params.rss_params = params;
2172
2173         for_each_hwfn(edev, i) {
2174                 p_hwfn = &edev->hwfns[i];
2175                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2176                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2177                                            ECORE_SPQ_MODE_EBLOCK, NULL);
2178                 if (rc) {
2179                         DP_ERR(edev, "vport-update for RSS failed\n");
2180                         goto out;
2181                 }
2182         }
2183
2184 out:
2185         rte_free(params);
2186         return rc;
2187 }
2188
2189 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2190                                struct rte_eth_rss_reta_entry64 *reta_conf,
2191                                uint16_t reta_size)
2192 {
2193         struct qede_dev *qdev = eth_dev->data->dev_private;
2194         struct ecore_dev *edev = &qdev->edev;
2195         uint16_t i, idx, shift;
2196         uint8_t entry;
2197
2198         if (reta_size > ETH_RSS_RETA_SIZE_128) {
2199                 DP_ERR(edev, "reta_size %d is not supported\n",
2200                        reta_size);
2201                 return -EINVAL;
2202         }
2203
2204         for (i = 0; i < reta_size; i++) {
2205                 idx = i / RTE_RETA_GROUP_SIZE;
2206                 shift = i % RTE_RETA_GROUP_SIZE;
2207                 if (reta_conf[idx].mask & (1ULL << shift)) {
2208                         entry = qdev->rss_ind_table[i];
2209                         reta_conf[idx].reta[shift] = entry;
2210                 }
2211         }
2212
2213         return 0;
2214 }
2215
2216
2217
2218 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2219 {
2220         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2221         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2222         struct rte_eth_dev_info dev_info = {0};
2223         struct qede_fastpath *fp;
2224         uint32_t max_rx_pkt_len;
2225         uint32_t frame_size;
2226         uint16_t bufsz;
2227         bool restart = false;
2228         int i, rc;
2229
2230         PMD_INIT_FUNC_TRACE(edev);
2231         qede_dev_info_get(dev, &dev_info);
2232         max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
2233         frame_size = max_rx_pkt_len;
2234         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2235                 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
2236                        mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
2237                        QEDE_ETH_OVERHEAD);
2238                 return -EINVAL;
2239         }
2240         if (!dev->data->scattered_rx &&
2241             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2242                 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2243                         dev->data->min_rx_buf_size);
2244                 return -EINVAL;
2245         }
2246         /* Temporarily replace I/O functions with dummy ones. It cannot
2247          * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2248          */
2249         dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2250         dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2251         if (dev->data->dev_started) {
2252                 dev->data->dev_started = 0;
2253                 qede_dev_stop(dev);
2254                 restart = true;
2255         }
2256         rte_delay_ms(1000);
2257         qdev->mtu = mtu;
2258
2259         /* Fix up RX buf size for all queues of the port */
2260         for_each_rss(i) {
2261                 fp = &qdev->fp_array[i];
2262                 if (fp->rxq != NULL) {
2263                         bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2264                                 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2265                         /* cache align the mbuf size to simplfy rx_buf_size
2266                          * calculation
2267                          */
2268                         bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
2269                         rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
2270                         if (rc < 0)
2271                                 return rc;
2272
2273                         fp->rxq->rx_buf_size = rc;
2274                 }
2275         }
2276         if (max_rx_pkt_len > ETHER_MAX_LEN)
2277                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2278         else
2279                 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2280
2281         if (!dev->data->dev_started && restart) {
2282                 qede_dev_start(dev);
2283                 dev->data->dev_started = 1;
2284         }
2285
2286         /* update max frame size */
2287         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
2288         /* Reassign back */
2289         dev->rx_pkt_burst = qede_recv_pkts;
2290         dev->tx_pkt_burst = qede_xmit_pkts;
2291
2292         return 0;
2293 }
2294
2295 static int
2296 qede_dev_reset(struct rte_eth_dev *dev)
2297 {
2298         int ret;
2299
2300         ret = qede_eth_dev_uninit(dev);
2301         if (ret)
2302                 return ret;
2303
2304         return qede_eth_dev_init(dev);
2305 }
2306
2307 static const struct eth_dev_ops qede_eth_dev_ops = {
2308         .dev_configure = qede_dev_configure,
2309         .dev_infos_get = qede_dev_info_get,
2310         .rx_queue_setup = qede_rx_queue_setup,
2311         .rx_queue_release = qede_rx_queue_release,
2312         .rx_descriptor_status = qede_rx_descriptor_status,
2313         .tx_queue_setup = qede_tx_queue_setup,
2314         .tx_queue_release = qede_tx_queue_release,
2315         .dev_start = qede_dev_start,
2316         .dev_reset = qede_dev_reset,
2317         .dev_set_link_up = qede_dev_set_link_up,
2318         .dev_set_link_down = qede_dev_set_link_down,
2319         .link_update = qede_link_update,
2320         .promiscuous_enable = qede_promiscuous_enable,
2321         .promiscuous_disable = qede_promiscuous_disable,
2322         .allmulticast_enable = qede_allmulticast_enable,
2323         .allmulticast_disable = qede_allmulticast_disable,
2324         .set_mc_addr_list = qede_set_mc_addr_list,
2325         .dev_stop = qede_dev_stop,
2326         .dev_close = qede_dev_close,
2327         .stats_get = qede_get_stats,
2328         .stats_reset = qede_reset_stats,
2329         .xstats_get = qede_get_xstats,
2330         .xstats_reset = qede_reset_xstats,
2331         .xstats_get_names = qede_get_xstats_names,
2332         .mac_addr_add = qede_mac_addr_add,
2333         .mac_addr_remove = qede_mac_addr_remove,
2334         .mac_addr_set = qede_mac_addr_set,
2335         .vlan_offload_set = qede_vlan_offload_set,
2336         .vlan_filter_set = qede_vlan_filter_set,
2337         .flow_ctrl_set = qede_flow_ctrl_set,
2338         .flow_ctrl_get = qede_flow_ctrl_get,
2339         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2340         .rss_hash_update = qede_rss_hash_update,
2341         .rss_hash_conf_get = qede_rss_hash_conf_get,
2342         .reta_update  = qede_rss_reta_update,
2343         .reta_query  = qede_rss_reta_query,
2344         .mtu_set = qede_set_mtu,
2345         .filter_ctrl = qede_dev_filter_ctrl,
2346         .udp_tunnel_port_add = qede_udp_dst_port_add,
2347         .udp_tunnel_port_del = qede_udp_dst_port_del,
2348 };
2349
2350 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2351         .dev_configure = qede_dev_configure,
2352         .dev_infos_get = qede_dev_info_get,
2353         .rx_queue_setup = qede_rx_queue_setup,
2354         .rx_queue_release = qede_rx_queue_release,
2355         .rx_descriptor_status = qede_rx_descriptor_status,
2356         .tx_queue_setup = qede_tx_queue_setup,
2357         .tx_queue_release = qede_tx_queue_release,
2358         .dev_start = qede_dev_start,
2359         .dev_reset = qede_dev_reset,
2360         .dev_set_link_up = qede_dev_set_link_up,
2361         .dev_set_link_down = qede_dev_set_link_down,
2362         .link_update = qede_link_update,
2363         .promiscuous_enable = qede_promiscuous_enable,
2364         .promiscuous_disable = qede_promiscuous_disable,
2365         .allmulticast_enable = qede_allmulticast_enable,
2366         .allmulticast_disable = qede_allmulticast_disable,
2367         .set_mc_addr_list = qede_set_mc_addr_list,
2368         .dev_stop = qede_dev_stop,
2369         .dev_close = qede_dev_close,
2370         .stats_get = qede_get_stats,
2371         .stats_reset = qede_reset_stats,
2372         .xstats_get = qede_get_xstats,
2373         .xstats_reset = qede_reset_xstats,
2374         .xstats_get_names = qede_get_xstats_names,
2375         .vlan_offload_set = qede_vlan_offload_set,
2376         .vlan_filter_set = qede_vlan_filter_set,
2377         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2378         .rss_hash_update = qede_rss_hash_update,
2379         .rss_hash_conf_get = qede_rss_hash_conf_get,
2380         .reta_update  = qede_rss_reta_update,
2381         .reta_query  = qede_rss_reta_query,
2382         .mtu_set = qede_set_mtu,
2383         .udp_tunnel_port_add = qede_udp_dst_port_add,
2384         .udp_tunnel_port_del = qede_udp_dst_port_del,
2385         .mac_addr_add = qede_mac_addr_add,
2386         .mac_addr_remove = qede_mac_addr_remove,
2387         .mac_addr_set = qede_mac_addr_set,
2388 };
2389
2390 static void qede_update_pf_params(struct ecore_dev *edev)
2391 {
2392         struct ecore_pf_params pf_params;
2393
2394         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2395         pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2396         pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2397         qed_ops->common->update_pf_params(edev, &pf_params);
2398 }
2399
2400 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2401 {
2402         struct rte_pci_device *pci_dev;
2403         struct rte_pci_addr pci_addr;
2404         struct qede_dev *adapter;
2405         struct ecore_dev *edev;
2406         struct qed_dev_eth_info dev_info;
2407         struct qed_slowpath_params params;
2408         static bool do_once = true;
2409         uint8_t bulletin_change;
2410         uint8_t vf_mac[ETHER_ADDR_LEN];
2411         uint8_t is_mac_forced;
2412         bool is_mac_exist;
2413         /* Fix up ecore debug level */
2414         uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2415         uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2416         uint32_t int_mode;
2417         int rc;
2418
2419         /* Extract key data structures */
2420         adapter = eth_dev->data->dev_private;
2421         adapter->ethdev = eth_dev;
2422         edev = &adapter->edev;
2423         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2424         pci_addr = pci_dev->addr;
2425
2426         PMD_INIT_FUNC_TRACE(edev);
2427
2428         snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2429                  pci_addr.bus, pci_addr.devid, pci_addr.function,
2430                  eth_dev->data->port_id);
2431
2432         eth_dev->rx_pkt_burst = qede_recv_pkts;
2433         eth_dev->tx_pkt_burst = qede_xmit_pkts;
2434         eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2435
2436         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2437                 DP_ERR(edev, "Skipping device init from secondary process\n");
2438                 return 0;
2439         }
2440
2441         rte_eth_copy_pci_info(eth_dev, pci_dev);
2442
2443         /* @DPDK */
2444         edev->vendor_id = pci_dev->id.vendor_id;
2445         edev->device_id = pci_dev->id.device_id;
2446
2447         qed_ops = qed_get_eth_ops();
2448         if (!qed_ops) {
2449                 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2450                 return -EINVAL;
2451         }
2452
2453         DP_INFO(edev, "Starting qede probe\n");
2454         rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2455                                     dp_level, is_vf);
2456         if (rc != 0) {
2457                 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2458                 return -ENODEV;
2459         }
2460         qede_update_pf_params(edev);
2461
2462         switch (pci_dev->intr_handle.type) {
2463         case RTE_INTR_HANDLE_UIO_INTX:
2464         case RTE_INTR_HANDLE_VFIO_LEGACY:
2465                 int_mode = ECORE_INT_MODE_INTA;
2466                 rte_intr_callback_register(&pci_dev->intr_handle,
2467                                            qede_interrupt_handler_intx,
2468                                            (void *)eth_dev);
2469                 break;
2470         default:
2471                 int_mode = ECORE_INT_MODE_MSIX;
2472                 rte_intr_callback_register(&pci_dev->intr_handle,
2473                                            qede_interrupt_handler,
2474                                            (void *)eth_dev);
2475         }
2476
2477         if (rte_intr_enable(&pci_dev->intr_handle)) {
2478                 DP_ERR(edev, "rte_intr_enable() failed\n");
2479                 return -ENODEV;
2480         }
2481
2482         /* Start the Slowpath-process */
2483         memset(&params, 0, sizeof(struct qed_slowpath_params));
2484
2485         params.int_mode = int_mode;
2486         params.drv_major = QEDE_PMD_VERSION_MAJOR;
2487         params.drv_minor = QEDE_PMD_VERSION_MINOR;
2488         params.drv_rev = QEDE_PMD_VERSION_REVISION;
2489         params.drv_eng = QEDE_PMD_VERSION_PATCH;
2490         strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2491                 QEDE_PMD_DRV_VER_STR_SIZE);
2492
2493         /* For CMT mode device do periodic polling for slowpath events.
2494          * This is required since uio device uses only one MSI-x
2495          * interrupt vector but we need one for each engine.
2496          */
2497         if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2498                 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
2499                                        qede_poll_sp_sb_cb,
2500                                        (void *)eth_dev);
2501                 if (rc != 0) {
2502                         DP_ERR(edev, "Unable to start periodic"
2503                                      " timer rc %d\n", rc);
2504                         return -EINVAL;
2505                 }
2506         }
2507
2508         rc = qed_ops->common->slowpath_start(edev, &params);
2509         if (rc) {
2510                 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2511                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2512                                      (void *)eth_dev);
2513                 return -ENODEV;
2514         }
2515
2516         rc = qed_ops->fill_dev_info(edev, &dev_info);
2517         if (rc) {
2518                 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2519                 qed_ops->common->slowpath_stop(edev);
2520                 qed_ops->common->remove(edev);
2521                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2522                                      (void *)eth_dev);
2523                 return -ENODEV;
2524         }
2525
2526         qede_alloc_etherdev(adapter, &dev_info);
2527
2528         adapter->ops->common->set_name(edev, edev->name);
2529
2530         if (!is_vf)
2531                 adapter->dev_info.num_mac_filters =
2532                         (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2533                                             ECORE_MAC);
2534         else
2535                 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2536                                 (uint32_t *)&adapter->dev_info.num_mac_filters);
2537
2538         /* Allocate memory for storing MAC addr */
2539         eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2540                                         (ETHER_ADDR_LEN *
2541                                         adapter->dev_info.num_mac_filters),
2542                                         RTE_CACHE_LINE_SIZE);
2543
2544         if (eth_dev->data->mac_addrs == NULL) {
2545                 DP_ERR(edev, "Failed to allocate MAC address\n");
2546                 qed_ops->common->slowpath_stop(edev);
2547                 qed_ops->common->remove(edev);
2548                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2549                                      (void *)eth_dev);
2550                 return -ENOMEM;
2551         }
2552
2553         if (!is_vf) {
2554                 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2555                                 hw_info.hw_mac_addr,
2556                                 &eth_dev->data->mac_addrs[0]);
2557                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2558                                 &adapter->primary_mac);
2559         } else {
2560                 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2561                                        &bulletin_change);
2562                 if (bulletin_change) {
2563                         is_mac_exist =
2564                             ecore_vf_bulletin_get_forced_mac(
2565                                                 ECORE_LEADING_HWFN(edev),
2566                                                 vf_mac,
2567                                                 &is_mac_forced);
2568                         if (is_mac_exist) {
2569                                 DP_INFO(edev, "VF macaddr received from PF\n");
2570                                 ether_addr_copy((struct ether_addr *)&vf_mac,
2571                                                 &eth_dev->data->mac_addrs[0]);
2572                                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2573                                                 &adapter->primary_mac);
2574                         } else {
2575                                 DP_ERR(edev, "No VF macaddr assigned\n");
2576                         }
2577                 }
2578         }
2579
2580         eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2581
2582         if (do_once) {
2583                 qede_print_adapter_info(adapter);
2584                 do_once = false;
2585         }
2586
2587         /* Bring-up the link */
2588         qede_dev_set_link_state(eth_dev, true);
2589
2590         adapter->num_tx_queues = 0;
2591         adapter->num_rx_queues = 0;
2592         SLIST_INIT(&adapter->arfs_info.arfs_list_head);
2593         SLIST_INIT(&adapter->vlan_list_head);
2594         SLIST_INIT(&adapter->uc_list_head);
2595         SLIST_INIT(&adapter->mc_list_head);
2596         adapter->mtu = ETHER_MTU;
2597         adapter->vport_started = false;
2598
2599         /* VF tunnel offloads is enabled by default in PF driver */
2600         adapter->vxlan.num_filters = 0;
2601         adapter->geneve.num_filters = 0;
2602         adapter->ipgre.num_filters = 0;
2603         if (is_vf) {
2604                 adapter->vxlan.enable = true;
2605                 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
2606                                              ETH_TUNNEL_FILTER_IVLAN;
2607                 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
2608                 adapter->geneve.enable = true;
2609                 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
2610                                               ETH_TUNNEL_FILTER_IVLAN;
2611                 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
2612                 adapter->ipgre.enable = true;
2613                 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
2614                                              ETH_TUNNEL_FILTER_IVLAN;
2615         } else {
2616                 adapter->vxlan.enable = false;
2617                 adapter->geneve.enable = false;
2618                 adapter->ipgre.enable = false;
2619         }
2620
2621         DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2622                 adapter->primary_mac.addr_bytes[0],
2623                 adapter->primary_mac.addr_bytes[1],
2624                 adapter->primary_mac.addr_bytes[2],
2625                 adapter->primary_mac.addr_bytes[3],
2626                 adapter->primary_mac.addr_bytes[4],
2627                 adapter->primary_mac.addr_bytes[5]);
2628
2629         DP_INFO(edev, "Device initialized\n");
2630
2631         return 0;
2632 }
2633
2634 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2635 {
2636         return qede_common_dev_init(eth_dev, 1);
2637 }
2638
2639 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2640 {
2641         return qede_common_dev_init(eth_dev, 0);
2642 }
2643
2644 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2645 {
2646         struct qede_dev *qdev = eth_dev->data->dev_private;
2647         struct ecore_dev *edev = &qdev->edev;
2648
2649         PMD_INIT_FUNC_TRACE(edev);
2650
2651         /* only uninitialize in the primary process */
2652         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2653                 return 0;
2654
2655         /* safe to close dev here */
2656         qede_dev_close(eth_dev);
2657
2658         eth_dev->dev_ops = NULL;
2659         eth_dev->rx_pkt_burst = NULL;
2660         eth_dev->tx_pkt_burst = NULL;
2661
2662         return 0;
2663 }
2664
2665 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2666 {
2667         return qede_dev_common_uninit(eth_dev);
2668 }
2669
2670 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2671 {
2672         return qede_dev_common_uninit(eth_dev);
2673 }
2674
2675 static const struct rte_pci_id pci_id_qedevf_map[] = {
2676 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2677         {
2678                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2679         },
2680         {
2681                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2682         },
2683         {
2684                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2685         },
2686         {.vendor_id = 0,}
2687 };
2688
2689 static const struct rte_pci_id pci_id_qede_map[] = {
2690 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2691         {
2692                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2693         },
2694         {
2695                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2696         },
2697         {
2698                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2699         },
2700         {
2701                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2702         },
2703         {
2704                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2705         },
2706         {
2707                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2708         },
2709         {
2710                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2711         },
2712         {
2713                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2714         },
2715         {
2716                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2717         },
2718         {
2719                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2720         },
2721         {.vendor_id = 0,}
2722 };
2723
2724 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2725         struct rte_pci_device *pci_dev)
2726 {
2727         return rte_eth_dev_pci_generic_probe(pci_dev,
2728                 sizeof(struct qede_dev), qedevf_eth_dev_init);
2729 }
2730
2731 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2732 {
2733         return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2734 }
2735
2736 static struct rte_pci_driver rte_qedevf_pmd = {
2737         .id_table = pci_id_qedevf_map,
2738         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2739         .probe = qedevf_eth_dev_pci_probe,
2740         .remove = qedevf_eth_dev_pci_remove,
2741 };
2742
2743 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2744         struct rte_pci_device *pci_dev)
2745 {
2746         return rte_eth_dev_pci_generic_probe(pci_dev,
2747                 sizeof(struct qede_dev), qede_eth_dev_init);
2748 }
2749
2750 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2751 {
2752         return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2753 }
2754
2755 static struct rte_pci_driver rte_qede_pmd = {
2756         .id_table = pci_id_qede_map,
2757         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2758         .probe = qede_eth_dev_pci_probe,
2759         .remove = qede_eth_dev_pci_remove,
2760 };
2761
2762 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2763 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2764 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2765 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2766 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2767 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
2768
2769 RTE_INIT(qede_init_log)
2770 {
2771         qede_logtype_init = rte_log_register("pmd.net.qede.init");
2772         if (qede_logtype_init >= 0)
2773                 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
2774         qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
2775         if (qede_logtype_driver >= 0)
2776                 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);
2777 }