2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12 #include <rte_kvargs.h>
15 static const struct qed_eth_ops *qed_ops;
16 static int64_t timer_period = 1;
18 /* VXLAN tunnel classification mapping */
19 const struct _qede_vxlan_tunn_types {
20 uint16_t rte_filter_type;
21 enum ecore_filter_ucast_type qede_type;
22 enum ecore_tunn_clss qede_tunn_clss;
24 } qede_tunn_types[] = {
26 ETH_TUNNEL_FILTER_OMAC,
28 ECORE_TUNN_CLSS_MAC_VLAN,
32 ETH_TUNNEL_FILTER_TENID,
34 ECORE_TUNN_CLSS_MAC_VNI,
38 ETH_TUNNEL_FILTER_IMAC,
39 ECORE_FILTER_INNER_MAC,
40 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
44 ETH_TUNNEL_FILTER_IVLAN,
45 ECORE_FILTER_INNER_VLAN,
46 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
50 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
51 ECORE_FILTER_MAC_VNI_PAIR,
52 ECORE_TUNN_CLSS_MAC_VNI,
56 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
59 "outer-mac and inner-mac"
62 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
65 "outer-mac and inner-vlan"
68 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
69 ECORE_FILTER_INNER_MAC_VNI_PAIR,
70 ECORE_TUNN_CLSS_INNER_MAC_VNI,
74 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
80 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
81 ECORE_FILTER_INNER_PAIR,
82 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
83 "inner-mac and inner-vlan",
86 ETH_TUNNEL_FILTER_OIP,
92 ETH_TUNNEL_FILTER_IIP,
98 RTE_TUNNEL_FILTER_IMAC_IVLAN,
104 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
110 RTE_TUNNEL_FILTER_IMAC_TENID,
116 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
123 struct rte_qede_xstats_name_off {
124 char name[RTE_ETH_XSTATS_NAME_SIZE];
128 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
130 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
131 {"rx_multicast_bytes",
132 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
133 {"rx_broadcast_bytes",
134 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
135 {"rx_unicast_packets",
136 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
137 {"rx_multicast_packets",
138 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
139 {"rx_broadcast_packets",
140 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
143 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
144 {"tx_multicast_bytes",
145 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
146 {"tx_broadcast_bytes",
147 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
148 {"tx_unicast_packets",
149 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
150 {"tx_multicast_packets",
151 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
152 {"tx_broadcast_packets",
153 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
155 {"rx_64_byte_packets",
156 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
157 {"rx_65_to_127_byte_packets",
158 offsetof(struct ecore_eth_stats_common,
159 rx_65_to_127_byte_packets)},
160 {"rx_128_to_255_byte_packets",
161 offsetof(struct ecore_eth_stats_common,
162 rx_128_to_255_byte_packets)},
163 {"rx_256_to_511_byte_packets",
164 offsetof(struct ecore_eth_stats_common,
165 rx_256_to_511_byte_packets)},
166 {"rx_512_to_1023_byte_packets",
167 offsetof(struct ecore_eth_stats_common,
168 rx_512_to_1023_byte_packets)},
169 {"rx_1024_to_1518_byte_packets",
170 offsetof(struct ecore_eth_stats_common,
171 rx_1024_to_1518_byte_packets)},
172 {"tx_64_byte_packets",
173 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
174 {"tx_65_to_127_byte_packets",
175 offsetof(struct ecore_eth_stats_common,
176 tx_65_to_127_byte_packets)},
177 {"tx_128_to_255_byte_packets",
178 offsetof(struct ecore_eth_stats_common,
179 tx_128_to_255_byte_packets)},
180 {"tx_256_to_511_byte_packets",
181 offsetof(struct ecore_eth_stats_common,
182 tx_256_to_511_byte_packets)},
183 {"tx_512_to_1023_byte_packets",
184 offsetof(struct ecore_eth_stats_common,
185 tx_512_to_1023_byte_packets)},
186 {"tx_1024_to_1518_byte_packets",
187 offsetof(struct ecore_eth_stats_common,
188 tx_1024_to_1518_byte_packets)},
190 {"rx_mac_crtl_frames",
191 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
192 {"tx_mac_control_frames",
193 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
195 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
197 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
198 {"rx_priority_flow_control_frames",
199 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
200 {"tx_priority_flow_control_frames",
201 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
204 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
206 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
207 {"rx_carrier_errors",
208 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
209 {"rx_oversize_packet_errors",
210 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
212 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
213 {"rx_undersize_packet_errors",
214 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
215 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
216 {"rx_host_buffer_not_available",
217 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
218 /* Number of packets discarded because they are bigger than MTU */
219 {"rx_packet_too_big_discards",
220 offsetof(struct ecore_eth_stats_common,
221 packet_too_big_discard)},
222 {"rx_ttl_zero_discards",
223 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
224 {"rx_multi_function_tag_filter_discards",
225 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
226 {"rx_mac_filter_discards",
227 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
228 {"rx_hw_buffer_truncates",
229 offsetof(struct ecore_eth_stats_common, brb_truncates)},
230 {"rx_hw_buffer_discards",
231 offsetof(struct ecore_eth_stats_common, brb_discards)},
232 {"tx_error_drop_packets",
233 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
235 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
236 {"rx_mac_unicast_packets",
237 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
238 {"rx_mac_multicast_packets",
239 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
240 {"rx_mac_broadcast_packets",
241 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
243 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
244 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
245 {"tx_mac_unicast_packets",
246 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
247 {"tx_mac_multicast_packets",
248 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
249 {"tx_mac_broadcast_packets",
250 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
252 {"lro_coalesced_packets",
253 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
254 {"lro_coalesced_events",
255 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
257 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
258 {"lro_not_coalesced_packets",
259 offsetof(struct ecore_eth_stats_common,
260 tpa_not_coalesced_pkts)},
261 {"lro_coalesced_bytes",
262 offsetof(struct ecore_eth_stats_common,
263 tpa_coalesced_bytes)},
266 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
267 {"rx_1519_to_1522_byte_packets",
268 offsetof(struct ecore_eth_stats, bb) +
269 offsetof(struct ecore_eth_stats_bb,
270 rx_1519_to_1522_byte_packets)},
271 {"rx_1519_to_2047_byte_packets",
272 offsetof(struct ecore_eth_stats, bb) +
273 offsetof(struct ecore_eth_stats_bb,
274 rx_1519_to_2047_byte_packets)},
275 {"rx_2048_to_4095_byte_packets",
276 offsetof(struct ecore_eth_stats, bb) +
277 offsetof(struct ecore_eth_stats_bb,
278 rx_2048_to_4095_byte_packets)},
279 {"rx_4096_to_9216_byte_packets",
280 offsetof(struct ecore_eth_stats, bb) +
281 offsetof(struct ecore_eth_stats_bb,
282 rx_4096_to_9216_byte_packets)},
283 {"rx_9217_to_16383_byte_packets",
284 offsetof(struct ecore_eth_stats, bb) +
285 offsetof(struct ecore_eth_stats_bb,
286 rx_9217_to_16383_byte_packets)},
288 {"tx_1519_to_2047_byte_packets",
289 offsetof(struct ecore_eth_stats, bb) +
290 offsetof(struct ecore_eth_stats_bb,
291 tx_1519_to_2047_byte_packets)},
292 {"tx_2048_to_4095_byte_packets",
293 offsetof(struct ecore_eth_stats, bb) +
294 offsetof(struct ecore_eth_stats_bb,
295 tx_2048_to_4095_byte_packets)},
296 {"tx_4096_to_9216_byte_packets",
297 offsetof(struct ecore_eth_stats, bb) +
298 offsetof(struct ecore_eth_stats_bb,
299 tx_4096_to_9216_byte_packets)},
300 {"tx_9217_to_16383_byte_packets",
301 offsetof(struct ecore_eth_stats, bb) +
302 offsetof(struct ecore_eth_stats_bb,
303 tx_9217_to_16383_byte_packets)},
305 {"tx_lpi_entry_count",
306 offsetof(struct ecore_eth_stats, bb) +
307 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
308 {"tx_total_collisions",
309 offsetof(struct ecore_eth_stats, bb) +
310 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
313 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
314 {"rx_1519_to_max_byte_packets",
315 offsetof(struct ecore_eth_stats, ah) +
316 offsetof(struct ecore_eth_stats_ah,
317 rx_1519_to_max_byte_packets)},
318 {"tx_1519_to_max_byte_packets",
319 offsetof(struct ecore_eth_stats, ah) +
320 offsetof(struct ecore_eth_stats_ah,
321 tx_1519_to_max_byte_packets)},
324 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
326 offsetof(struct qede_rx_queue, rx_segs)},
328 offsetof(struct qede_rx_queue, rx_hw_errors)},
329 {"rx_q_allocation_errors",
330 offsetof(struct qede_rx_queue, rx_alloc_errors)}
333 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
335 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
339 qede_interrupt_handler_intx(void *param)
341 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
342 struct qede_dev *qdev = eth_dev->data->dev_private;
343 struct ecore_dev *edev = &qdev->edev;
346 /* Check if our device actually raised an interrupt */
347 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
349 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
351 if (rte_intr_enable(eth_dev->intr_handle))
352 DP_ERR(edev, "rte_intr_enable failed\n");
357 qede_interrupt_handler(void *param)
359 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
360 struct qede_dev *qdev = eth_dev->data->dev_private;
361 struct ecore_dev *edev = &qdev->edev;
363 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
364 if (rte_intr_enable(eth_dev->intr_handle))
365 DP_ERR(edev, "rte_intr_enable failed\n");
369 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
371 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
375 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
376 static void qede_print_adapter_info(struct qede_dev *qdev)
378 struct ecore_dev *edev = &qdev->edev;
379 struct qed_dev_info *info = &qdev->dev_info.common;
380 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
381 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
383 DP_INFO(edev, "*********************************\n");
384 DP_INFO(edev, " DPDK version:%s\n", rte_version());
385 DP_INFO(edev, " Chip details : %s %c%d\n",
386 ECORE_IS_BB(edev) ? "BB" : "AH",
387 'A' + edev->chip_rev,
388 (int)edev->chip_metal);
389 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
390 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
391 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
392 ver_str, QEDE_PMD_VERSION);
393 DP_INFO(edev, " Driver version : %s\n", drv_ver);
394 DP_INFO(edev, " Firmware version : %s\n", ver_str);
396 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
398 (info->mfw_rev >> 24) & 0xff,
399 (info->mfw_rev >> 16) & 0xff,
400 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
401 DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
402 DP_INFO(edev, " Firmware file : %s\n", fw_file);
403 DP_INFO(edev, "*********************************\n");
407 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
409 #ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
410 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
412 unsigned int i = 0, j = 0, qid;
413 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
414 struct qede_tx_queue *txq;
416 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
418 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
419 RTE_ETHDEV_QUEUE_STAT_CNTRS);
420 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
421 RTE_ETHDEV_QUEUE_STAT_CNTRS);
424 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
425 offsetof(struct qede_rx_queue, rcv_pkts), 0,
427 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
428 offsetof(struct qede_rx_queue, rx_hw_errors), 0,
430 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
431 offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
435 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
436 OSAL_MEMSET((((char *)
437 (qdev->fp_array[qid].rxq)) +
438 qede_rxq_xstats_strings[j].offset),
443 if (i == rxq_stat_cntrs)
450 txq = qdev->fp_array[qid].txq;
452 OSAL_MEMSET((uint64_t *)(uintptr_t)
453 (((uint64_t)(uintptr_t)(txq)) +
454 offsetof(struct qede_tx_queue, xmit_pkts)), 0,
458 if (i == txq_stat_cntrs)
464 qede_stop_vport(struct ecore_dev *edev)
466 struct ecore_hwfn *p_hwfn;
472 for_each_hwfn(edev, i) {
473 p_hwfn = &edev->hwfns[i];
474 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
476 if (rc != ECORE_SUCCESS) {
477 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
482 DP_INFO(edev, "vport stopped\n");
488 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
490 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
491 struct ecore_sp_vport_start_params params;
492 struct ecore_hwfn *p_hwfn;
496 if (qdev->vport_started)
497 qede_stop_vport(edev);
499 memset(¶ms, 0, sizeof(params));
502 /* @DPDK - Disable FW placement */
503 params.zero_placement_offset = 1;
504 for_each_hwfn(edev, i) {
505 p_hwfn = &edev->hwfns[i];
506 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
507 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
508 rc = ecore_sp_vport_start(p_hwfn, ¶ms);
509 if (rc != ECORE_SUCCESS) {
510 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
514 ecore_reset_vport_stats(edev);
515 qdev->vport_started = true;
516 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
521 /* Activate or deactivate vport via vport-update */
522 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
524 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
525 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
526 struct ecore_sp_vport_update_params params;
527 struct ecore_hwfn *p_hwfn;
531 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
533 params.update_vport_active_rx_flg = 1;
534 params.update_vport_active_tx_flg = 1;
535 params.vport_active_rx_flg = flg;
536 params.vport_active_tx_flg = flg;
537 if (~qdev->enable_tx_switching & flg) {
538 params.update_tx_switching_flg = 1;
539 params.tx_switching_flg = !flg;
541 for_each_hwfn(edev, i) {
542 p_hwfn = &edev->hwfns[i];
543 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
544 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
545 ECORE_SPQ_MODE_EBLOCK, NULL);
546 if (rc != ECORE_SUCCESS) {
547 DP_ERR(edev, "Failed to update vport\n");
551 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
557 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
558 uint16_t mtu, bool enable)
560 /* Enable LRO in split mode */
561 sge_tpa_params->tpa_ipv4_en_flg = enable;
562 sge_tpa_params->tpa_ipv6_en_flg = enable;
563 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
564 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
565 /* set if tpa enable changes */
566 sge_tpa_params->update_tpa_en_flg = 1;
567 /* set if tpa parameters should be handled */
568 sge_tpa_params->update_tpa_param_flg = enable;
570 sge_tpa_params->max_buffers_per_cqe = 20;
571 /* Enable TPA in split mode. In this mode each TPA segment
572 * starts on the new BD, so there is one BD per segment.
574 sge_tpa_params->tpa_pkt_split_flg = 1;
575 sge_tpa_params->tpa_hdr_data_split_flg = 0;
576 sge_tpa_params->tpa_gro_consistent_flg = 0;
577 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
578 sge_tpa_params->tpa_max_size = 0x7FFF;
579 sge_tpa_params->tpa_min_size_to_start = mtu / 2;
580 sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
583 /* Enable/disable LRO via vport-update */
584 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
586 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
587 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
588 struct ecore_sp_vport_update_params params;
589 struct ecore_sge_tpa_params tpa_params;
590 struct ecore_hwfn *p_hwfn;
594 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
595 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
596 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
598 params.sge_tpa_params = &tpa_params;
599 for_each_hwfn(edev, i) {
600 p_hwfn = &edev->hwfns[i];
601 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
602 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
603 ECORE_SPQ_MODE_EBLOCK, NULL);
604 if (rc != ECORE_SUCCESS) {
605 DP_ERR(edev, "Failed to update LRO\n");
609 qdev->enable_lro = flg;
610 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
615 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
617 memset(ucast, 0, sizeof(struct ecore_filter_ucast));
618 ucast->is_rx_filter = true;
619 ucast->is_tx_filter = true;
620 /* ucast->assert_on_error = true; - For debug */
624 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
625 enum qed_filter_rx_mode_type type)
627 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
628 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
629 struct ecore_filter_accept_flags flags;
631 memset(&flags, 0, sizeof(flags));
633 flags.update_rx_mode_config = 1;
634 flags.update_tx_mode_config = 1;
635 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
636 ECORE_ACCEPT_MCAST_MATCHED |
639 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
640 ECORE_ACCEPT_MCAST_MATCHED |
643 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
644 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
646 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
647 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
649 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
650 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
651 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
652 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
653 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
654 ECORE_ACCEPT_MCAST_UNMATCHED;
657 return ecore_filter_accept_cmd(edev, 0, flags, false, false,
658 ECORE_SPQ_MODE_CB, NULL);
662 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
663 bool enable, bool mask)
665 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
666 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
667 enum _ecore_status_t rc = ECORE_INVAL;
668 struct ecore_ptt *p_ptt;
669 struct ecore_tunnel_info tunn;
670 struct ecore_hwfn *p_hwfn;
673 memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
674 tunn.vxlan.b_update_mode = enable;
675 tunn.vxlan.b_mode_enabled = mask;
676 tunn.b_update_rx_cls = true;
677 tunn.b_update_tx_cls = true;
678 tunn.vxlan.tun_cls = clss;
680 for_each_hwfn(edev, i) {
681 p_hwfn = &edev->hwfns[i];
683 p_ptt = ecore_ptt_acquire(p_hwfn);
689 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
690 &tunn, ECORE_SPQ_MODE_CB, NULL);
691 if (rc != ECORE_SUCCESS) {
692 DP_ERR(edev, "Failed to update tunn_clss %u\n",
695 ecore_ptt_release(p_hwfn, p_ptt);
700 if (rc == ECORE_SUCCESS) {
701 qdev->vxlan.enable = enable;
702 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
703 DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled");
710 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
713 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
714 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
715 struct qede_ucast_entry *tmp = NULL;
716 struct qede_ucast_entry *u;
717 struct ether_addr *mac_addr;
719 mac_addr = (struct ether_addr *)ucast->mac;
721 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
722 if ((memcmp(mac_addr, &tmp->mac,
723 ETHER_ADDR_LEN) == 0) &&
724 ucast->vni == tmp->vni &&
725 ucast->vlan == tmp->vlan) {
726 DP_INFO(edev, "Unicast MAC is already added"
727 " with vlan = %u, vni = %u\n",
728 ucast->vlan, ucast->vni);
732 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
733 RTE_CACHE_LINE_SIZE);
735 DP_ERR(edev, "Did not allocate memory for ucast\n");
738 ether_addr_copy(mac_addr, &u->mac);
739 u->vlan = ucast->vlan;
741 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
744 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
745 if ((memcmp(mac_addr, &tmp->mac,
746 ETHER_ADDR_LEN) == 0) &&
747 ucast->vlan == tmp->vlan &&
748 ucast->vni == tmp->vni)
752 DP_INFO(edev, "Unicast MAC is not found\n");
755 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
763 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
764 uint32_t mc_addrs_num)
766 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
767 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
768 struct ecore_filter_mcast mcast;
769 struct qede_mcast_entry *m = NULL;
773 for (i = 0; i < mc_addrs_num; i++) {
774 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
775 RTE_CACHE_LINE_SIZE);
777 DP_ERR(edev, "Did not allocate memory for mcast\n");
780 ether_addr_copy(&mc_addrs[i], &m->mac);
781 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
783 memset(&mcast, 0, sizeof(mcast));
784 mcast.num_mc_addrs = mc_addrs_num;
785 mcast.opcode = ECORE_FILTER_ADD;
786 for (i = 0; i < mc_addrs_num; i++)
787 ether_addr_copy(&mc_addrs[i], (struct ether_addr *)
789 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
790 if (rc != ECORE_SUCCESS) {
791 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
798 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
800 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
801 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
802 struct qede_mcast_entry *tmp = NULL;
803 struct ecore_filter_mcast mcast;
807 memset(&mcast, 0, sizeof(mcast));
808 mcast.num_mc_addrs = qdev->num_mc_addr;
809 mcast.opcode = ECORE_FILTER_REMOVE;
811 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
812 ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]);
815 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
816 if (rc != ECORE_SUCCESS) {
817 DP_ERR(edev, "Failed to delete multicast filter\n");
821 while (!SLIST_EMPTY(&qdev->mc_list_head)) {
822 tmp = SLIST_FIRST(&qdev->mc_list_head);
823 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
825 SLIST_INIT(&qdev->mc_list_head);
830 static enum _ecore_status_t
831 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
834 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
835 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
836 enum _ecore_status_t rc = ECORE_INVAL;
838 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
839 DP_ERR(edev, "Ucast filter table limit exceeded,"
840 " Please enable promisc mode\n");
844 rc = qede_ucast_filter(eth_dev, ucast, add);
846 rc = ecore_filter_ucast_cmd(edev, ucast,
847 ECORE_SPQ_MODE_CB, NULL);
848 /* Indicate error only for add filter operation.
849 * Delete filter operations are not severe.
851 if ((rc != ECORE_SUCCESS) && add)
852 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
859 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
860 __rte_unused uint32_t index, __rte_unused uint32_t pool)
862 struct ecore_filter_ucast ucast;
865 if (!is_valid_assigned_ether_addr(mac_addr))
868 qede_set_ucast_cmn_params(&ucast);
869 ucast.opcode = ECORE_FILTER_ADD;
870 ucast.type = ECORE_FILTER_MAC;
871 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
872 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
877 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
879 struct qede_dev *qdev = eth_dev->data->dev_private;
880 struct ecore_dev *edev = &qdev->edev;
881 struct ecore_filter_ucast ucast;
883 PMD_INIT_FUNC_TRACE(edev);
885 if (index >= qdev->dev_info.num_mac_filters) {
886 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
887 index, qdev->dev_info.num_mac_filters);
891 if (!is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index]))
894 qede_set_ucast_cmn_params(&ucast);
895 ucast.opcode = ECORE_FILTER_REMOVE;
896 ucast.type = ECORE_FILTER_MAC;
898 /* Use the index maintained by rte */
899 ether_addr_copy(ð_dev->data->mac_addrs[index],
900 (struct ether_addr *)&ucast.mac);
902 qede_mac_int_ops(eth_dev, &ucast, false);
906 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
908 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
909 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
911 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
912 mac_addr->addr_bytes)) {
913 DP_ERR(edev, "Setting MAC address is not allowed\n");
914 ether_addr_copy(&qdev->primary_mac,
915 ð_dev->data->mac_addrs[0]);
919 qede_mac_addr_remove(eth_dev, 0);
920 qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
923 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
925 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
926 struct ecore_sp_vport_update_params params;
927 struct ecore_hwfn *p_hwfn;
931 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
933 params.update_accept_any_vlan_flg = 1;
934 params.accept_any_vlan = flg;
935 for_each_hwfn(edev, i) {
936 p_hwfn = &edev->hwfns[i];
937 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
938 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
939 ECORE_SPQ_MODE_EBLOCK, NULL);
940 if (rc != ECORE_SUCCESS) {
941 DP_ERR(edev, "Failed to configure accept-any-vlan\n");
946 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
949 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
951 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
952 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
953 struct ecore_sp_vport_update_params params;
954 struct ecore_hwfn *p_hwfn;
958 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
960 params.update_inner_vlan_removal_flg = 1;
961 params.inner_vlan_removal_flg = flg;
962 for_each_hwfn(edev, i) {
963 p_hwfn = &edev->hwfns[i];
964 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
965 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
966 ECORE_SPQ_MODE_EBLOCK, NULL);
967 if (rc != ECORE_SUCCESS) {
968 DP_ERR(edev, "Failed to update vport\n");
973 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
977 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
978 uint16_t vlan_id, int on)
980 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
981 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
982 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
983 struct qede_vlan_entry *tmp = NULL;
984 struct qede_vlan_entry *vlan;
985 struct ecore_filter_ucast ucast;
989 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
990 DP_ERR(edev, "Reached max VLAN filter limit"
991 " enabling accept_any_vlan\n");
992 qede_config_accept_any_vlan(qdev, true);
996 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
997 if (tmp->vid == vlan_id) {
998 DP_INFO(edev, "VLAN %u already configured\n",
1004 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
1005 RTE_CACHE_LINE_SIZE);
1008 DP_ERR(edev, "Did not allocate memory for VLAN\n");
1012 qede_set_ucast_cmn_params(&ucast);
1013 ucast.opcode = ECORE_FILTER_ADD;
1014 ucast.type = ECORE_FILTER_VLAN;
1015 ucast.vlan = vlan_id;
1016 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1019 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
1023 vlan->vid = vlan_id;
1024 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
1025 qdev->configured_vlans++;
1026 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
1027 vlan_id, qdev->configured_vlans);
1030 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
1031 if (tmp->vid == vlan_id)
1036 if (qdev->configured_vlans == 0) {
1038 "No VLAN filters configured yet\n");
1042 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
1046 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
1048 qede_set_ucast_cmn_params(&ucast);
1049 ucast.opcode = ECORE_FILTER_REMOVE;
1050 ucast.type = ECORE_FILTER_VLAN;
1051 ucast.vlan = vlan_id;
1052 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
1055 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
1058 qdev->configured_vlans--;
1059 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
1060 vlan_id, qdev->configured_vlans);
1067 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1069 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1070 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1071 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1073 if (mask & ETH_VLAN_STRIP_MASK) {
1074 if (rxmode->hw_vlan_strip)
1075 (void)qede_vlan_stripping(eth_dev, 1);
1077 (void)qede_vlan_stripping(eth_dev, 0);
1080 if (mask & ETH_VLAN_FILTER_MASK) {
1081 /* VLAN filtering kicks in when a VLAN is added */
1082 if (rxmode->hw_vlan_filter) {
1083 qede_vlan_filter_set(eth_dev, 0, 1);
1085 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
1087 " Please remove existing VLAN filters"
1088 " before disabling VLAN filtering\n");
1089 /* Signal app that VLAN filtering is still
1092 rxmode->hw_vlan_filter = true;
1094 qede_vlan_filter_set(eth_dev, 0, 0);
1099 if (mask & ETH_VLAN_EXTEND_MASK)
1100 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
1101 " and classification is based on outer tag only\n");
1103 qdev->vlan_offload_mask = mask;
1105 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
1106 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
1111 static void qede_prandom_bytes(uint32_t *buff)
1115 srand((unsigned int)time(NULL));
1116 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
1120 int qede_config_rss(struct rte_eth_dev *eth_dev)
1122 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1123 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
1124 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1126 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
1127 struct rte_eth_rss_reta_entry64 reta_conf[2];
1128 struct rte_eth_rss_conf rss_conf;
1129 uint32_t i, id, pos, q;
1131 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1132 if (!rss_conf.rss_key) {
1133 DP_INFO(edev, "Applying driver default key\n");
1134 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1135 qede_prandom_bytes(&def_rss_key[0]);
1136 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
1139 /* Configure RSS hash */
1140 if (qede_rss_hash_update(eth_dev, &rss_conf))
1143 /* Configure default RETA */
1144 memset(reta_conf, 0, sizeof(reta_conf));
1145 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
1146 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
1148 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1149 id = i / RTE_RETA_GROUP_SIZE;
1150 pos = i % RTE_RETA_GROUP_SIZE;
1151 q = i % QEDE_RSS_COUNT(qdev);
1152 reta_conf[id].reta[pos] = q;
1154 if (qede_rss_reta_update(eth_dev, &reta_conf[0],
1155 ECORE_RSS_IND_TABLE_SIZE))
1161 static void qede_fastpath_start(struct ecore_dev *edev)
1163 struct ecore_hwfn *p_hwfn;
1166 for_each_hwfn(edev, i) {
1167 p_hwfn = &edev->hwfns[i];
1168 ecore_hw_start_fastpath(p_hwfn);
1172 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1174 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1175 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1176 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1178 PMD_INIT_FUNC_TRACE(edev);
1180 /* Update MTU only if it has changed */
1181 if (eth_dev->data->mtu != qdev->mtu) {
1182 if (qede_update_mtu(eth_dev, qdev->mtu))
1186 /* Configure TPA parameters */
1187 if (rxmode->enable_lro) {
1188 if (qede_enable_tpa(eth_dev, true))
1190 /* Enable scatter mode for LRO */
1191 if (!rxmode->enable_scatter)
1192 eth_dev->data->scattered_rx = 1;
1196 if (qede_start_queues(eth_dev))
1200 qede_reset_queue_stats(qdev, true);
1202 /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1203 * enabling RSS. Hence RSS configuration is deferred upto this point.
1204 * Also, we would like to retain similar behavior in PF case, so we
1205 * don't do PF/VF specific check here.
1207 if (rxmode->mq_mode == ETH_MQ_RX_RSS)
1208 if (qede_config_rss(eth_dev))
1212 if (qede_activate_vport(eth_dev, true))
1215 /* Update link status */
1216 qede_link_update(eth_dev, 0);
1218 /* Start/resume traffic */
1219 qede_fastpath_start(edev);
1221 DP_INFO(edev, "Device started\n");
1225 DP_ERR(edev, "Device start fails\n");
1226 return -1; /* common error code is < 0 */
1229 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1231 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1232 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1234 PMD_INIT_FUNC_TRACE(edev);
1237 if (qede_activate_vport(eth_dev, false))
1240 if (qdev->enable_lro)
1241 qede_enable_tpa(eth_dev, false);
1244 qede_stop_queues(eth_dev);
1246 /* Disable traffic */
1247 ecore_hw_stop_fastpath(edev); /* TBD - loop */
1249 DP_INFO(edev, "Device is stopped\n");
1252 #define QEDE_VF_TX_SWITCHING "vf_tx_switching"
1254 const char *valid_args[] = {
1255 QEDE_VF_TX_SWITCHING,
1259 static int qede_args_check(const char *key, const char *val, void *opaque)
1263 struct rte_eth_dev *eth_dev = opaque;
1264 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1265 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1268 tmp = strtoul(val, NULL, 0);
1270 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1274 if (strcmp(QEDE_VF_TX_SWITCHING, key) == 0 && IS_VF(edev)) {
1275 qdev->enable_tx_switching = !!tmp;
1276 DP_INFO(edev, "Disabling VF tx-switching\n");
1282 static int qede_args(struct rte_eth_dev *eth_dev)
1284 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1285 struct rte_kvargs *kvlist;
1286 struct rte_devargs *devargs;
1290 devargs = pci_dev->device.devargs;
1292 return 0; /* return success */
1294 kvlist = rte_kvargs_parse(devargs->args, valid_args);
1298 /* Process parameters. */
1299 for (i = 0; (valid_args[i] != NULL); ++i) {
1300 if (rte_kvargs_count(kvlist, valid_args[i])) {
1301 ret = rte_kvargs_process(kvlist, valid_args[i],
1302 qede_args_check, eth_dev);
1303 if (ret != ECORE_SUCCESS) {
1304 rte_kvargs_free(kvlist);
1309 rte_kvargs_free(kvlist);
1314 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1316 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1317 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1318 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1321 PMD_INIT_FUNC_TRACE(edev);
1323 /* Check requirements for 100G mode */
1324 if (ECORE_IS_CMT(edev)) {
1325 if (eth_dev->data->nb_rx_queues < 2 ||
1326 eth_dev->data->nb_tx_queues < 2) {
1327 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1331 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1332 (eth_dev->data->nb_tx_queues % 2 != 0)) {
1334 "100G mode needs even no. of RX/TX queues\n");
1339 /* We need to have min 1 RX queue.There is no min check in
1340 * rte_eth_dev_configure(), so we are checking it here.
1342 if (eth_dev->data->nb_rx_queues == 0) {
1343 DP_ERR(edev, "Minimum one RX queue is required\n");
1347 /* Enable Tx switching by default */
1348 qdev->enable_tx_switching = 1;
1350 /* Parse devargs and fix up rxmode */
1351 if (qede_args(eth_dev))
1352 DP_NOTICE(edev, false,
1353 "Invalid devargs supplied, requested change will not take effect\n");
1355 /* Sanity checks and throw warnings */
1356 if (rxmode->enable_scatter)
1357 eth_dev->data->scattered_rx = 1;
1359 if (!rxmode->hw_strip_crc)
1360 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
1362 if (!rxmode->hw_ip_checksum)
1363 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
1365 if (rxmode->header_split)
1366 DP_INFO(edev, "Header split enable is not supported\n");
1367 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
1369 DP_ERR(edev, "Unsupported multi-queue mode\n");
1372 /* Flow director mode check */
1373 if (qede_check_fdir_support(eth_dev))
1376 qede_dealloc_fp_resc(eth_dev);
1377 qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1378 qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1379 if (qede_alloc_fp_resc(qdev))
1382 /* If jumbo enabled adjust MTU */
1383 if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
1384 eth_dev->data->mtu =
1385 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1386 ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
1388 if (qede_start_vport(qdev, eth_dev->data->mtu))
1390 qdev->mtu = eth_dev->data->mtu;
1392 /* Enable VLAN offloads by default */
1393 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
1394 ETH_VLAN_FILTER_MASK);
1398 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1399 QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1404 /* Info about HW descriptor ring limitations */
1405 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1406 .nb_max = 0x8000, /* 32K */
1408 .nb_align = 128 /* lowest common multiple */
1411 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1412 .nb_max = 0x8000, /* 32K */
1415 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1416 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1420 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1421 struct rte_eth_dev_info *dev_info)
1423 struct qede_dev *qdev = eth_dev->data->dev_private;
1424 struct ecore_dev *edev = &qdev->edev;
1425 struct qed_link_output link;
1426 uint32_t speed_cap = 0;
1428 PMD_INIT_FUNC_TRACE(edev);
1430 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1431 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1432 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1433 dev_info->rx_desc_lim = qede_rx_desc_lim;
1434 dev_info->tx_desc_lim = qede_tx_desc_lim;
1437 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1438 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1440 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1441 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1442 dev_info->max_tx_queues = dev_info->max_rx_queues;
1444 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1445 dev_info->max_vfs = 0;
1446 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1447 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1448 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1450 dev_info->default_txconf = (struct rte_eth_txconf) {
1451 .txq_flags = QEDE_TXQ_FLAGS,
1454 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
1455 DEV_RX_OFFLOAD_IPV4_CKSUM |
1456 DEV_RX_OFFLOAD_UDP_CKSUM |
1457 DEV_RX_OFFLOAD_TCP_CKSUM |
1458 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1459 DEV_RX_OFFLOAD_TCP_LRO);
1461 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1462 DEV_TX_OFFLOAD_IPV4_CKSUM |
1463 DEV_TX_OFFLOAD_UDP_CKSUM |
1464 DEV_TX_OFFLOAD_TCP_CKSUM |
1465 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1466 DEV_TX_OFFLOAD_TCP_TSO |
1467 DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
1469 memset(&link, 0, sizeof(struct qed_link_output));
1470 qdev->ops->common->get_link(edev, &link);
1471 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1472 speed_cap |= ETH_LINK_SPEED_1G;
1473 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1474 speed_cap |= ETH_LINK_SPEED_10G;
1475 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1476 speed_cap |= ETH_LINK_SPEED_25G;
1477 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1478 speed_cap |= ETH_LINK_SPEED_40G;
1479 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1480 speed_cap |= ETH_LINK_SPEED_50G;
1481 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1482 speed_cap |= ETH_LINK_SPEED_100G;
1483 dev_info->speed_capa = speed_cap;
1486 /* return 0 means link status changed, -1 means not changed */
1488 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1490 struct qede_dev *qdev = eth_dev->data->dev_private;
1491 struct ecore_dev *edev = &qdev->edev;
1492 uint16_t link_duplex, old_link_status;
1493 struct qed_link_output link;
1494 struct rte_eth_link *curr = ð_dev->data->dev_link;
1496 memset(&link, 0, sizeof(struct qed_link_output));
1497 qdev->ops->common->get_link(edev, &link);
1500 curr->link_speed = link.speed;
1503 switch (link.duplex) {
1504 case QEDE_DUPLEX_HALF:
1505 link_duplex = ETH_LINK_HALF_DUPLEX;
1507 case QEDE_DUPLEX_FULL:
1508 link_duplex = ETH_LINK_FULL_DUPLEX;
1510 case QEDE_DUPLEX_UNKNOWN:
1514 curr->link_duplex = link_duplex;
1517 old_link_status = curr->link_status;
1518 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1521 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1522 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1524 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1525 curr->link_speed, curr->link_duplex,
1526 curr->link_autoneg, curr->link_status);
1528 /* return 0 means link status changed, -1 means not changed */
1529 return ((curr->link_status == old_link_status) ? -1 : 0);
1532 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1534 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1535 struct qede_dev *qdev = eth_dev->data->dev_private;
1536 struct ecore_dev *edev = &qdev->edev;
1538 PMD_INIT_FUNC_TRACE(edev);
1541 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1543 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1544 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1546 qed_configure_filter_rx_mode(eth_dev, type);
1549 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1551 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1552 struct qede_dev *qdev = eth_dev->data->dev_private;
1553 struct ecore_dev *edev = &qdev->edev;
1555 PMD_INIT_FUNC_TRACE(edev);
1558 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1559 qed_configure_filter_rx_mode(eth_dev,
1560 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1562 qed_configure_filter_rx_mode(eth_dev,
1563 QED_FILTER_RX_MODE_TYPE_REGULAR);
1566 static void qede_poll_sp_sb_cb(void *param)
1568 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1569 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1570 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1573 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1574 qede_interrupt_action(&edev->hwfns[1]);
1576 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1580 DP_ERR(edev, "Unable to start periodic"
1581 " timer rc %d\n", rc);
1582 assert(false && "Unable to start periodic timer");
1586 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1588 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1589 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1590 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1592 PMD_INIT_FUNC_TRACE(edev);
1594 /* dev_stop() shall cleanup fp resources in hw but without releasing
1595 * dma memories and sw structures so that dev_start() can be called
1596 * by the app without reconfiguration. However, in dev_close() we
1597 * can release all the resources and device can be brought up newly
1599 if (eth_dev->data->dev_started)
1600 qede_dev_stop(eth_dev);
1602 qede_stop_vport(edev);
1603 qdev->vport_started = false;
1604 qede_fdir_dealloc_resc(eth_dev);
1605 qede_dealloc_fp_resc(eth_dev);
1607 eth_dev->data->nb_rx_queues = 0;
1608 eth_dev->data->nb_tx_queues = 0;
1610 /* Bring the link down */
1611 qede_dev_set_link_state(eth_dev, false);
1612 qdev->ops->common->slowpath_stop(edev);
1613 qdev->ops->common->remove(edev);
1614 rte_intr_disable(&pci_dev->intr_handle);
1616 switch (pci_dev->intr_handle.type) {
1617 case RTE_INTR_HANDLE_UIO_INTX:
1618 case RTE_INTR_HANDLE_VFIO_LEGACY:
1619 rte_intr_callback_unregister(&pci_dev->intr_handle,
1620 qede_interrupt_handler_intx,
1624 rte_intr_callback_unregister(&pci_dev->intr_handle,
1625 qede_interrupt_handler,
1629 if (ECORE_IS_CMT(edev))
1630 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1634 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1636 struct qede_dev *qdev = eth_dev->data->dev_private;
1637 struct ecore_dev *edev = &qdev->edev;
1638 struct ecore_eth_stats stats;
1639 unsigned int i = 0, j = 0, qid;
1640 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1641 struct qede_tx_queue *txq;
1643 ecore_get_vport_stats(edev, &stats);
1646 eth_stats->ipackets = stats.common.rx_ucast_pkts +
1647 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1649 eth_stats->ibytes = stats.common.rx_ucast_bytes +
1650 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1652 eth_stats->ierrors = stats.common.rx_crc_errors +
1653 stats.common.rx_align_errors +
1654 stats.common.rx_carrier_errors +
1655 stats.common.rx_oversize_packets +
1656 stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1658 eth_stats->rx_nombuf = stats.common.no_buff_discards;
1660 eth_stats->imissed = stats.common.mftag_filter_discards +
1661 stats.common.mac_filter_discards +
1662 stats.common.no_buff_discards +
1663 stats.common.brb_truncates + stats.common.brb_discards;
1666 eth_stats->opackets = stats.common.tx_ucast_pkts +
1667 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1669 eth_stats->obytes = stats.common.tx_ucast_bytes +
1670 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1672 eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1675 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1676 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1677 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1678 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1679 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1680 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1681 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1682 "Not all the queue stats will be displayed. Set"
1683 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1684 " appropriately and retry.\n");
1687 eth_stats->q_ipackets[i] =
1689 ((char *)(qdev->fp_array[qid].rxq)) +
1690 offsetof(struct qede_rx_queue,
1692 eth_stats->q_errors[i] =
1694 ((char *)(qdev->fp_array[qid].rxq)) +
1695 offsetof(struct qede_rx_queue,
1698 ((char *)(qdev->fp_array[qid].rxq)) +
1699 offsetof(struct qede_rx_queue,
1702 if (i == rxq_stat_cntrs)
1707 txq = qdev->fp_array[qid].txq;
1708 eth_stats->q_opackets[j] =
1709 *((uint64_t *)(uintptr_t)
1710 (((uint64_t)(uintptr_t)(txq)) +
1711 offsetof(struct qede_tx_queue,
1714 if (j == txq_stat_cntrs)
1722 qede_get_xstats_count(struct qede_dev *qdev) {
1723 if (ECORE_IS_BB(&qdev->edev))
1724 return RTE_DIM(qede_xstats_strings) +
1725 RTE_DIM(qede_bb_xstats_strings) +
1726 (RTE_DIM(qede_rxq_xstats_strings) *
1727 RTE_MIN(QEDE_RSS_COUNT(qdev),
1728 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1730 return RTE_DIM(qede_xstats_strings) +
1731 RTE_DIM(qede_ah_xstats_strings) +
1732 (RTE_DIM(qede_rxq_xstats_strings) *
1733 RTE_MIN(QEDE_RSS_COUNT(qdev),
1734 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1738 qede_get_xstats_names(struct rte_eth_dev *dev,
1739 struct rte_eth_xstat_name *xstats_names,
1740 __rte_unused unsigned int limit)
1742 struct qede_dev *qdev = dev->data->dev_private;
1743 struct ecore_dev *edev = &qdev->edev;
1744 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1745 unsigned int i, qid, stat_idx = 0;
1746 unsigned int rxq_stat_cntrs;
1748 if (xstats_names != NULL) {
1749 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1750 snprintf(xstats_names[stat_idx].name,
1751 sizeof(xstats_names[stat_idx].name),
1753 qede_xstats_strings[i].name);
1757 if (ECORE_IS_BB(edev)) {
1758 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1759 snprintf(xstats_names[stat_idx].name,
1760 sizeof(xstats_names[stat_idx].name),
1762 qede_bb_xstats_strings[i].name);
1766 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1767 snprintf(xstats_names[stat_idx].name,
1768 sizeof(xstats_names[stat_idx].name),
1770 qede_ah_xstats_strings[i].name);
1775 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1776 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1777 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1778 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1779 snprintf(xstats_names[stat_idx].name,
1780 sizeof(xstats_names[stat_idx].name),
1782 qede_rxq_xstats_strings[i].name, qid,
1783 qede_rxq_xstats_strings[i].name + 4);
1793 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1796 struct qede_dev *qdev = dev->data->dev_private;
1797 struct ecore_dev *edev = &qdev->edev;
1798 struct ecore_eth_stats stats;
1799 const unsigned int num = qede_get_xstats_count(qdev);
1800 unsigned int i, qid, stat_idx = 0;
1801 unsigned int rxq_stat_cntrs;
1806 ecore_get_vport_stats(edev, &stats);
1808 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1809 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1810 qede_xstats_strings[i].offset);
1811 xstats[stat_idx].id = stat_idx;
1815 if (ECORE_IS_BB(edev)) {
1816 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1817 xstats[stat_idx].value =
1818 *(uint64_t *)(((char *)&stats) +
1819 qede_bb_xstats_strings[i].offset);
1820 xstats[stat_idx].id = stat_idx;
1824 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1825 xstats[stat_idx].value =
1826 *(uint64_t *)(((char *)&stats) +
1827 qede_ah_xstats_strings[i].offset);
1828 xstats[stat_idx].id = stat_idx;
1833 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1834 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1835 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1837 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1838 xstats[stat_idx].value = *(uint64_t *)(
1839 ((char *)(qdev->fp_array[qid].rxq)) +
1840 qede_rxq_xstats_strings[i].offset);
1841 xstats[stat_idx].id = stat_idx;
1851 qede_reset_xstats(struct rte_eth_dev *dev)
1853 struct qede_dev *qdev = dev->data->dev_private;
1854 struct ecore_dev *edev = &qdev->edev;
1856 ecore_reset_vport_stats(edev);
1857 qede_reset_queue_stats(qdev, true);
1860 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1862 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1863 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1864 struct qed_link_params link_params;
1867 DP_INFO(edev, "setting link state %d\n", link_up);
1868 memset(&link_params, 0, sizeof(link_params));
1869 link_params.link_up = link_up;
1870 rc = qdev->ops->common->set_link(edev, &link_params);
1871 if (rc != ECORE_SUCCESS)
1872 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1877 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1879 return qede_dev_set_link_state(eth_dev, true);
1882 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1884 return qede_dev_set_link_state(eth_dev, false);
1887 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1889 struct qede_dev *qdev = eth_dev->data->dev_private;
1890 struct ecore_dev *edev = &qdev->edev;
1892 ecore_reset_vport_stats(edev);
1893 qede_reset_queue_stats(qdev, false);
1896 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1898 enum qed_filter_rx_mode_type type =
1899 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1901 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1902 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1904 qed_configure_filter_rx_mode(eth_dev, type);
1907 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1909 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1910 qed_configure_filter_rx_mode(eth_dev,
1911 QED_FILTER_RX_MODE_TYPE_PROMISC);
1913 qed_configure_filter_rx_mode(eth_dev,
1914 QED_FILTER_RX_MODE_TYPE_REGULAR);
1918 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
1919 uint32_t mc_addrs_num)
1921 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1922 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1925 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
1926 DP_ERR(edev, "Reached max multicast filters limit,"
1927 "Please enable multicast promisc mode\n");
1931 for (i = 0; i < mc_addrs_num; i++) {
1932 if (!is_multicast_ether_addr(&mc_addrs[i])) {
1933 DP_ERR(edev, "Not a valid multicast MAC\n");
1938 /* Flush all existing entries */
1939 if (qede_del_mcast_filters(eth_dev))
1942 /* Set new mcast list */
1943 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
1946 /* Update MTU via vport-update without doing port restart.
1947 * The vport must be deactivated before calling this API.
1949 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
1951 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1952 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1953 struct ecore_hwfn *p_hwfn;
1958 struct ecore_sp_vport_update_params params;
1960 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1961 params.vport_id = 0;
1963 params.vport_id = 0;
1964 for_each_hwfn(edev, i) {
1965 p_hwfn = &edev->hwfns[i];
1966 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1967 rc = ecore_sp_vport_update(p_hwfn, ¶ms,
1968 ECORE_SPQ_MODE_EBLOCK, NULL);
1969 if (rc != ECORE_SUCCESS)
1973 for_each_hwfn(edev, i) {
1974 p_hwfn = &edev->hwfns[i];
1975 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
1976 if (rc == ECORE_INVAL) {
1977 DP_INFO(edev, "VF MTU Update TLV not supported\n");
1978 /* Recreate vport */
1979 rc = qede_start_vport(qdev, mtu);
1980 if (rc != ECORE_SUCCESS)
1983 /* Restore config lost due to vport stop */
1984 if (eth_dev->data->promiscuous)
1985 qede_promiscuous_enable(eth_dev);
1987 qede_promiscuous_disable(eth_dev);
1989 if (eth_dev->data->all_multicast)
1990 qede_allmulticast_enable(eth_dev);
1992 qede_allmulticast_disable(eth_dev);
1994 qede_vlan_offload_set(eth_dev,
1995 qdev->vlan_offload_mask);
1996 } else if (rc != ECORE_SUCCESS) {
2001 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
2006 DP_ERR(edev, "Failed to update MTU\n");
2010 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
2011 struct rte_eth_fc_conf *fc_conf)
2013 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2014 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2015 struct qed_link_output current_link;
2016 struct qed_link_params params;
2018 memset(¤t_link, 0, sizeof(current_link));
2019 qdev->ops->common->get_link(edev, ¤t_link);
2021 memset(¶ms, 0, sizeof(params));
2022 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
2023 if (fc_conf->autoneg) {
2024 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
2025 DP_ERR(edev, "Autoneg not supported\n");
2028 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2031 /* Pause is assumed to be supported (SUPPORTED_Pause) */
2032 if (fc_conf->mode == RTE_FC_FULL)
2033 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
2034 QED_LINK_PAUSE_RX_ENABLE);
2035 if (fc_conf->mode == RTE_FC_TX_PAUSE)
2036 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2037 if (fc_conf->mode == RTE_FC_RX_PAUSE)
2038 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2040 params.link_up = true;
2041 (void)qdev->ops->common->set_link(edev, ¶ms);
2046 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
2047 struct rte_eth_fc_conf *fc_conf)
2049 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2050 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2051 struct qed_link_output current_link;
2053 memset(¤t_link, 0, sizeof(current_link));
2054 qdev->ops->common->get_link(edev, ¤t_link);
2056 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
2057 fc_conf->autoneg = true;
2059 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
2060 QED_LINK_PAUSE_TX_ENABLE))
2061 fc_conf->mode = RTE_FC_FULL;
2062 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
2063 fc_conf->mode = RTE_FC_RX_PAUSE;
2064 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
2065 fc_conf->mode = RTE_FC_TX_PAUSE;
2067 fc_conf->mode = RTE_FC_NONE;
2072 static const uint32_t *
2073 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
2075 static const uint32_t ptypes[] = {
2077 RTE_PTYPE_L2_ETHER_VLAN,
2082 RTE_PTYPE_TUNNEL_VXLAN,
2085 RTE_PTYPE_INNER_L2_ETHER,
2086 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2087 RTE_PTYPE_INNER_L3_IPV4,
2088 RTE_PTYPE_INNER_L3_IPV6,
2089 RTE_PTYPE_INNER_L4_TCP,
2090 RTE_PTYPE_INNER_L4_UDP,
2091 RTE_PTYPE_INNER_L4_FRAG,
2095 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
2101 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
2104 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
2105 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
2106 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
2107 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
2108 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
2109 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
2110 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
2111 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
2114 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
2115 struct rte_eth_rss_conf *rss_conf)
2117 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2118 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2119 struct ecore_sp_vport_update_params vport_update_params;
2120 struct ecore_rss_params rss_params;
2121 struct ecore_hwfn *p_hwfn;
2122 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2123 uint64_t hf = rss_conf->rss_hf;
2124 uint8_t len = rss_conf->rss_key_len;
2129 memset(&vport_update_params, 0, sizeof(vport_update_params));
2130 memset(&rss_params, 0, sizeof(rss_params));
2132 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
2133 (unsigned long)hf, len, key);
2137 DP_INFO(edev, "Enabling rss\n");
2140 qede_init_rss_caps(&rss_params.rss_caps, hf);
2141 rss_params.update_rss_capabilities = 1;
2145 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
2146 DP_ERR(edev, "RSS key length exceeds limit\n");
2149 DP_INFO(edev, "Applying user supplied hash key\n");
2150 rss_params.update_rss_key = 1;
2151 memcpy(&rss_params.rss_key, key, len);
2153 rss_params.rss_enable = 1;
2156 rss_params.update_rss_config = 1;
2157 /* tbl_size has to be set with capabilities */
2158 rss_params.rss_table_size_log = 7;
2159 vport_update_params.vport_id = 0;
2160 /* pass the L2 handles instead of qids */
2161 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
2162 idx = i % QEDE_RSS_COUNT(qdev);
2163 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
2165 vport_update_params.rss_params = &rss_params;
2167 for_each_hwfn(edev, i) {
2168 p_hwfn = &edev->hwfns[i];
2169 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2170 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2171 ECORE_SPQ_MODE_EBLOCK, NULL);
2173 DP_ERR(edev, "vport-update for RSS failed\n");
2177 qdev->rss_enable = rss_params.rss_enable;
2179 /* Update local structure for hash query */
2180 qdev->rss_conf.rss_hf = hf;
2181 qdev->rss_conf.rss_key_len = len;
2182 if (qdev->rss_enable) {
2183 if (qdev->rss_conf.rss_key == NULL) {
2184 qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2185 if (qdev->rss_conf.rss_key == NULL) {
2186 DP_ERR(edev, "No memory to store RSS key\n");
2191 DP_INFO(edev, "Storing RSS key\n");
2192 memcpy(qdev->rss_conf.rss_key, key, len);
2194 } else if (!qdev->rss_enable && len == 0) {
2195 if (qdev->rss_conf.rss_key) {
2196 free(qdev->rss_conf.rss_key);
2197 qdev->rss_conf.rss_key = NULL;
2198 DP_INFO(edev, "Free RSS key\n");
2205 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2206 struct rte_eth_rss_conf *rss_conf)
2208 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2210 rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2211 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2213 if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2214 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2215 rss_conf->rss_key_len);
2219 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2220 struct ecore_rss_params *rss)
2223 bool rss_mode = 1; /* enable */
2224 struct ecore_queue_cid *cid;
2225 struct ecore_rss_params *t_rss;
2227 /* In regular scenario, we'd simply need to take input handlers.
2228 * But in CMT, we'd have to split the handlers according to the
2229 * engine they were configured on. We'd then have to understand
2230 * whether RSS is really required, since 2-queues on CMT doesn't
2234 /* CMT should be round-robin */
2235 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2236 cid = rss->rss_ind_table[i];
2238 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2243 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2247 t_rss->update_rss_ind_table = 1;
2248 t_rss->rss_table_size_log = 7;
2249 t_rss->update_rss_config = 1;
2251 /* Make sure RSS is actually required */
2252 for_each_hwfn(edev, fn) {
2253 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2255 if (rss[fn].rss_ind_table[i] !=
2256 rss[fn].rss_ind_table[0])
2260 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2262 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2269 t_rss->rss_enable = rss_mode;
2274 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2275 struct rte_eth_rss_reta_entry64 *reta_conf,
2278 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2279 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2280 struct ecore_sp_vport_update_params vport_update_params;
2281 struct ecore_rss_params *params;
2282 struct ecore_hwfn *p_hwfn;
2283 uint16_t i, idx, shift;
2287 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2288 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2293 memset(&vport_update_params, 0, sizeof(vport_update_params));
2294 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2295 RTE_CACHE_LINE_SIZE);
2296 if (params == NULL) {
2297 DP_ERR(edev, "failed to allocate memory\n");
2301 for (i = 0; i < reta_size; i++) {
2302 idx = i / RTE_RETA_GROUP_SIZE;
2303 shift = i % RTE_RETA_GROUP_SIZE;
2304 if (reta_conf[idx].mask & (1ULL << shift)) {
2305 entry = reta_conf[idx].reta[shift];
2306 /* Pass rxq handles to ecore */
2307 params->rss_ind_table[i] =
2308 qdev->fp_array[entry].rxq->handle;
2309 /* Update the local copy for RETA query command */
2310 qdev->rss_ind_table[i] = entry;
2314 params->update_rss_ind_table = 1;
2315 params->rss_table_size_log = 7;
2316 params->update_rss_config = 1;
2318 /* Fix up RETA for CMT mode device */
2319 if (ECORE_IS_CMT(edev))
2320 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2322 vport_update_params.vport_id = 0;
2323 /* Use the current value of rss_enable */
2324 params->rss_enable = qdev->rss_enable;
2325 vport_update_params.rss_params = params;
2327 for_each_hwfn(edev, i) {
2328 p_hwfn = &edev->hwfns[i];
2329 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2330 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2331 ECORE_SPQ_MODE_EBLOCK, NULL);
2333 DP_ERR(edev, "vport-update for RSS failed\n");
2343 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2344 struct rte_eth_rss_reta_entry64 *reta_conf,
2347 struct qede_dev *qdev = eth_dev->data->dev_private;
2348 struct ecore_dev *edev = &qdev->edev;
2349 uint16_t i, idx, shift;
2352 if (reta_size > ETH_RSS_RETA_SIZE_128) {
2353 DP_ERR(edev, "reta_size %d is not supported\n",
2358 for (i = 0; i < reta_size; i++) {
2359 idx = i / RTE_RETA_GROUP_SIZE;
2360 shift = i % RTE_RETA_GROUP_SIZE;
2361 if (reta_conf[idx].mask & (1ULL << shift)) {
2362 entry = qdev->rss_ind_table[i];
2363 reta_conf[idx].reta[shift] = entry;
2372 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2374 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2375 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2376 struct rte_eth_dev_info dev_info = {0};
2377 struct qede_fastpath *fp;
2378 uint32_t max_rx_pkt_len;
2379 uint32_t frame_size;
2381 bool restart = false;
2384 PMD_INIT_FUNC_TRACE(edev);
2385 qede_dev_info_get(dev, &dev_info);
2386 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
2387 frame_size = max_rx_pkt_len;
2388 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2389 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
2390 mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
2394 if (!dev->data->scattered_rx &&
2395 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2396 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2397 dev->data->min_rx_buf_size);
2400 /* Temporarily replace I/O functions with dummy ones. It cannot
2401 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2403 dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2404 dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2405 if (dev->data->dev_started) {
2406 dev->data->dev_started = 0;
2413 /* Fix up RX buf size for all queues of the port */
2415 fp = &qdev->fp_array[i];
2416 if (fp->rxq != NULL) {
2417 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2418 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2419 /* cache align the mbuf size to simplfy rx_buf_size
2422 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
2423 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
2427 fp->rxq->rx_buf_size = rc;
2430 if (max_rx_pkt_len > ETHER_MAX_LEN)
2431 dev->data->dev_conf.rxmode.jumbo_frame = 1;
2433 dev->data->dev_conf.rxmode.jumbo_frame = 0;
2435 if (!dev->data->dev_started && restart) {
2436 qede_dev_start(dev);
2437 dev->data->dev_started = 1;
2440 /* update max frame size */
2441 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
2443 dev->rx_pkt_burst = qede_recv_pkts;
2444 dev->tx_pkt_burst = qede_xmit_pkts;
2450 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
2451 struct rte_eth_udp_tunnel *tunnel_udp,
2454 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2455 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2456 struct ecore_tunnel_info tunn; /* @DPDK */
2457 struct ecore_hwfn *p_hwfn;
2458 struct ecore_ptt *p_ptt;
2462 PMD_INIT_FUNC_TRACE(edev);
2464 memset(&tunn, 0, sizeof(tunn));
2465 if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
2466 /* Enable VxLAN tunnel if needed before UDP port update using
2467 * default MAC/VLAN classification.
2470 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
2472 "UDP port %u was already configured\n",
2473 tunnel_udp->udp_port);
2474 return ECORE_SUCCESS;
2476 /* Enable VXLAN if it was not enabled while adding
2479 if (!qdev->vxlan.enable) {
2480 rc = qede_vxlan_enable(eth_dev,
2481 ECORE_TUNN_CLSS_MAC_VLAN, true, true);
2482 if (rc != ECORE_SUCCESS) {
2483 DP_ERR(edev, "Failed to enable VXLAN "
2484 "prior to updating UDP port\n");
2488 udp_port = tunnel_udp->udp_port;
2490 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
2491 DP_ERR(edev, "UDP port %u doesn't exist\n",
2492 tunnel_udp->udp_port);
2498 tunn.vxlan_port.b_update_port = true;
2499 tunn.vxlan_port.port = udp_port;
2500 for_each_hwfn(edev, i) {
2501 p_hwfn = &edev->hwfns[i];
2503 p_ptt = ecore_ptt_acquire(p_hwfn);
2509 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2510 ECORE_SPQ_MODE_CB, NULL);
2511 if (rc != ECORE_SUCCESS) {
2512 DP_ERR(edev, "Unable to config UDP port %u\n",
2513 tunn.vxlan_port.port);
2515 ecore_ptt_release(p_hwfn, p_ptt);
2520 qdev->vxlan.udp_port = udp_port;
2521 /* If the request is to delete UDP port and if the number of
2522 * VXLAN filters have reached 0 then VxLAN offload can be be
2525 if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
2526 return qede_vxlan_enable(eth_dev,
2527 ECORE_TUNN_CLSS_MAC_VLAN, false, true);
2534 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
2535 struct rte_eth_udp_tunnel *tunnel_udp)
2537 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
2541 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
2542 struct rte_eth_udp_tunnel *tunnel_udp)
2544 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
2547 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
2548 uint32_t *clss, char *str)
2551 *clss = MAX_ECORE_TUNN_CLSS;
2553 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
2554 if (filter == qede_tunn_types[j].rte_filter_type) {
2555 *type = qede_tunn_types[j].qede_type;
2556 *clss = qede_tunn_types[j].qede_tunn_clss;
2557 strcpy(str, qede_tunn_types[j].string);
2564 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2565 const struct rte_eth_tunnel_filter_conf *conf,
2568 /* Init commmon ucast params first */
2569 qede_set_ucast_cmn_params(ucast);
2571 /* Copy out the required fields based on classification type */
2575 case ECORE_FILTER_VNI:
2576 ucast->vni = conf->tenant_id;
2578 case ECORE_FILTER_INNER_VLAN:
2579 ucast->vlan = conf->inner_vlan;
2581 case ECORE_FILTER_MAC:
2582 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2585 case ECORE_FILTER_INNER_MAC:
2586 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2589 case ECORE_FILTER_MAC_VNI_PAIR:
2590 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2592 ucast->vni = conf->tenant_id;
2594 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2595 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2597 ucast->vni = conf->tenant_id;
2599 case ECORE_FILTER_INNER_PAIR:
2600 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2602 ucast->vlan = conf->inner_vlan;
2608 return ECORE_SUCCESS;
2611 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
2612 enum rte_filter_op filter_op,
2613 const struct rte_eth_tunnel_filter_conf *conf)
2615 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2616 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2617 enum ecore_filter_ucast_type type;
2618 enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
2619 struct ecore_filter_ucast ucast = {0};
2621 uint16_t filter_type = 0;
2624 PMD_INIT_FUNC_TRACE(edev);
2626 switch (filter_op) {
2627 case RTE_ETH_FILTER_ADD:
2629 return qede_vxlan_enable(eth_dev,
2630 ECORE_TUNN_CLSS_MAC_VLAN, true, true);
2632 filter_type = conf->filter_type;
2633 /* Determine if the given filter classification is supported */
2634 qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
2635 if (clss == MAX_ECORE_TUNN_CLSS) {
2636 DP_ERR(edev, "Unsupported filter type\n");
2639 /* Init tunnel ucast params */
2640 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2641 if (rc != ECORE_SUCCESS) {
2642 DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
2646 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2647 str, filter_op, ucast.type);
2649 ucast.opcode = ECORE_FILTER_ADD;
2651 /* Skip MAC/VLAN if filter is based on VNI */
2652 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2653 rc = qede_mac_int_ops(eth_dev, &ucast, 1);
2655 /* Enable accept anyvlan */
2656 qede_config_accept_any_vlan(qdev, true);
2659 rc = qede_ucast_filter(eth_dev, &ucast, 1);
2661 rc = ecore_filter_ucast_cmd(edev, &ucast,
2662 ECORE_SPQ_MODE_CB, NULL);
2665 if (rc != ECORE_SUCCESS)
2668 qdev->vxlan.num_filters++;
2669 qdev->vxlan.filter_type = filter_type;
2670 if (!qdev->vxlan.enable)
2671 return qede_vxlan_enable(eth_dev, clss, true, true);
2674 case RTE_ETH_FILTER_DELETE:
2676 return qede_vxlan_enable(eth_dev,
2677 ECORE_TUNN_CLSS_MAC_VLAN, false, true);
2679 filter_type = conf->filter_type;
2680 /* Determine if the given filter classification is supported */
2681 qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
2682 if (clss == MAX_ECORE_TUNN_CLSS) {
2683 DP_ERR(edev, "Unsupported filter type\n");
2686 /* Init tunnel ucast params */
2687 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2688 if (rc != ECORE_SUCCESS) {
2689 DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
2693 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2694 str, filter_op, ucast.type);
2696 ucast.opcode = ECORE_FILTER_REMOVE;
2698 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2699 rc = qede_mac_int_ops(eth_dev, &ucast, 0);
2701 rc = qede_ucast_filter(eth_dev, &ucast, 0);
2703 rc = ecore_filter_ucast_cmd(edev, &ucast,
2704 ECORE_SPQ_MODE_CB, NULL);
2706 if (rc != ECORE_SUCCESS)
2709 qdev->vxlan.num_filters--;
2711 /* Disable VXLAN if VXLAN filters become 0 */
2712 if (qdev->vxlan.num_filters == 0)
2713 return qede_vxlan_enable(eth_dev, clss, false, true);
2716 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2723 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2724 enum rte_filter_type filter_type,
2725 enum rte_filter_op filter_op,
2728 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2729 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2730 struct rte_eth_tunnel_filter_conf *filter_conf =
2731 (struct rte_eth_tunnel_filter_conf *)arg;
2733 switch (filter_type) {
2734 case RTE_ETH_FILTER_TUNNEL:
2735 switch (filter_conf->tunnel_type) {
2736 case RTE_TUNNEL_TYPE_VXLAN:
2738 "Packet steering to the specified Rx queue"
2739 " is not supported with VXLAN tunneling");
2740 return(qede_vxlan_tunn_config(eth_dev, filter_op,
2742 /* Place holders for future tunneling support */
2743 case RTE_TUNNEL_TYPE_GENEVE:
2744 case RTE_TUNNEL_TYPE_TEREDO:
2745 case RTE_TUNNEL_TYPE_NVGRE:
2746 case RTE_TUNNEL_TYPE_IP_IN_GRE:
2747 case RTE_L2_TUNNEL_TYPE_E_TAG:
2748 DP_ERR(edev, "Unsupported tunnel type %d\n",
2749 filter_conf->tunnel_type);
2751 case RTE_TUNNEL_TYPE_NONE:
2756 case RTE_ETH_FILTER_FDIR:
2757 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2758 case RTE_ETH_FILTER_NTUPLE:
2759 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2760 case RTE_ETH_FILTER_MACVLAN:
2761 case RTE_ETH_FILTER_ETHERTYPE:
2762 case RTE_ETH_FILTER_FLEXIBLE:
2763 case RTE_ETH_FILTER_SYN:
2764 case RTE_ETH_FILTER_HASH:
2765 case RTE_ETH_FILTER_L2_TUNNEL:
2766 case RTE_ETH_FILTER_MAX:
2768 DP_ERR(edev, "Unsupported filter type %d\n",
2776 static const struct eth_dev_ops qede_eth_dev_ops = {
2777 .dev_configure = qede_dev_configure,
2778 .dev_infos_get = qede_dev_info_get,
2779 .rx_queue_setup = qede_rx_queue_setup,
2780 .rx_queue_release = qede_rx_queue_release,
2781 .tx_queue_setup = qede_tx_queue_setup,
2782 .tx_queue_release = qede_tx_queue_release,
2783 .dev_start = qede_dev_start,
2784 .dev_set_link_up = qede_dev_set_link_up,
2785 .dev_set_link_down = qede_dev_set_link_down,
2786 .link_update = qede_link_update,
2787 .promiscuous_enable = qede_promiscuous_enable,
2788 .promiscuous_disable = qede_promiscuous_disable,
2789 .allmulticast_enable = qede_allmulticast_enable,
2790 .allmulticast_disable = qede_allmulticast_disable,
2791 .set_mc_addr_list = qede_set_mc_addr_list,
2792 .dev_stop = qede_dev_stop,
2793 .dev_close = qede_dev_close,
2794 .stats_get = qede_get_stats,
2795 .stats_reset = qede_reset_stats,
2796 .xstats_get = qede_get_xstats,
2797 .xstats_reset = qede_reset_xstats,
2798 .xstats_get_names = qede_get_xstats_names,
2799 .mac_addr_add = qede_mac_addr_add,
2800 .mac_addr_remove = qede_mac_addr_remove,
2801 .mac_addr_set = qede_mac_addr_set,
2802 .vlan_offload_set = qede_vlan_offload_set,
2803 .vlan_filter_set = qede_vlan_filter_set,
2804 .flow_ctrl_set = qede_flow_ctrl_set,
2805 .flow_ctrl_get = qede_flow_ctrl_get,
2806 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2807 .rss_hash_update = qede_rss_hash_update,
2808 .rss_hash_conf_get = qede_rss_hash_conf_get,
2809 .reta_update = qede_rss_reta_update,
2810 .reta_query = qede_rss_reta_query,
2811 .mtu_set = qede_set_mtu,
2812 .filter_ctrl = qede_dev_filter_ctrl,
2813 .udp_tunnel_port_add = qede_udp_dst_port_add,
2814 .udp_tunnel_port_del = qede_udp_dst_port_del,
2817 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2818 .dev_configure = qede_dev_configure,
2819 .dev_infos_get = qede_dev_info_get,
2820 .rx_queue_setup = qede_rx_queue_setup,
2821 .rx_queue_release = qede_rx_queue_release,
2822 .tx_queue_setup = qede_tx_queue_setup,
2823 .tx_queue_release = qede_tx_queue_release,
2824 .dev_start = qede_dev_start,
2825 .dev_set_link_up = qede_dev_set_link_up,
2826 .dev_set_link_down = qede_dev_set_link_down,
2827 .link_update = qede_link_update,
2828 .promiscuous_enable = qede_promiscuous_enable,
2829 .promiscuous_disable = qede_promiscuous_disable,
2830 .allmulticast_enable = qede_allmulticast_enable,
2831 .allmulticast_disable = qede_allmulticast_disable,
2832 .set_mc_addr_list = qede_set_mc_addr_list,
2833 .dev_stop = qede_dev_stop,
2834 .dev_close = qede_dev_close,
2835 .stats_get = qede_get_stats,
2836 .stats_reset = qede_reset_stats,
2837 .xstats_get = qede_get_xstats,
2838 .xstats_reset = qede_reset_xstats,
2839 .xstats_get_names = qede_get_xstats_names,
2840 .vlan_offload_set = qede_vlan_offload_set,
2841 .vlan_filter_set = qede_vlan_filter_set,
2842 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2843 .rss_hash_update = qede_rss_hash_update,
2844 .rss_hash_conf_get = qede_rss_hash_conf_get,
2845 .reta_update = qede_rss_reta_update,
2846 .reta_query = qede_rss_reta_query,
2847 .mtu_set = qede_set_mtu,
2848 .udp_tunnel_port_add = qede_udp_dst_port_add,
2849 .udp_tunnel_port_del = qede_udp_dst_port_del,
2850 .mac_addr_add = qede_mac_addr_add,
2851 .mac_addr_remove = qede_mac_addr_remove,
2852 .mac_addr_set = qede_mac_addr_set,
2855 static void qede_update_pf_params(struct ecore_dev *edev)
2857 struct ecore_pf_params pf_params;
2859 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2860 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2861 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2862 qed_ops->common->update_pf_params(edev, &pf_params);
2865 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2867 struct rte_pci_device *pci_dev;
2868 struct rte_pci_addr pci_addr;
2869 struct qede_dev *adapter;
2870 struct ecore_dev *edev;
2871 struct qed_dev_eth_info dev_info;
2872 struct qed_slowpath_params params;
2873 static bool do_once = true;
2874 uint8_t bulletin_change;
2875 uint8_t vf_mac[ETHER_ADDR_LEN];
2876 uint8_t is_mac_forced;
2878 /* Fix up ecore debug level */
2879 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2880 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2884 /* Extract key data structures */
2885 adapter = eth_dev->data->dev_private;
2886 adapter->ethdev = eth_dev;
2887 edev = &adapter->edev;
2888 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2889 pci_addr = pci_dev->addr;
2891 PMD_INIT_FUNC_TRACE(edev);
2893 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2894 pci_addr.bus, pci_addr.devid, pci_addr.function,
2895 eth_dev->data->port_id);
2897 eth_dev->rx_pkt_burst = qede_recv_pkts;
2898 eth_dev->tx_pkt_burst = qede_xmit_pkts;
2899 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2901 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2902 DP_ERR(edev, "Skipping device init from secondary process\n");
2906 rte_eth_copy_pci_info(eth_dev, pci_dev);
2909 edev->vendor_id = pci_dev->id.vendor_id;
2910 edev->device_id = pci_dev->id.device_id;
2912 qed_ops = qed_get_eth_ops();
2914 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2918 DP_INFO(edev, "Starting qede probe\n");
2919 rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2922 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2925 qede_update_pf_params(edev);
2927 switch (pci_dev->intr_handle.type) {
2928 case RTE_INTR_HANDLE_UIO_INTX:
2929 case RTE_INTR_HANDLE_VFIO_LEGACY:
2930 int_mode = ECORE_INT_MODE_INTA;
2931 rte_intr_callback_register(&pci_dev->intr_handle,
2932 qede_interrupt_handler_intx,
2936 int_mode = ECORE_INT_MODE_MSIX;
2937 rte_intr_callback_register(&pci_dev->intr_handle,
2938 qede_interrupt_handler,
2942 if (rte_intr_enable(&pci_dev->intr_handle)) {
2943 DP_ERR(edev, "rte_intr_enable() failed\n");
2947 /* Start the Slowpath-process */
2948 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
2950 params.int_mode = int_mode;
2951 params.drv_major = QEDE_PMD_VERSION_MAJOR;
2952 params.drv_minor = QEDE_PMD_VERSION_MINOR;
2953 params.drv_rev = QEDE_PMD_VERSION_REVISION;
2954 params.drv_eng = QEDE_PMD_VERSION_PATCH;
2955 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2956 QEDE_PMD_DRV_VER_STR_SIZE);
2958 /* For CMT mode device do periodic polling for slowpath events.
2959 * This is required since uio device uses only one MSI-x
2960 * interrupt vector but we need one for each engine.
2962 if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2963 rc = rte_eal_alarm_set(timer_period * US_PER_S,
2967 DP_ERR(edev, "Unable to start periodic"
2968 " timer rc %d\n", rc);
2973 rc = qed_ops->common->slowpath_start(edev, ¶ms);
2975 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2976 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2981 rc = qed_ops->fill_dev_info(edev, &dev_info);
2983 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2984 qed_ops->common->slowpath_stop(edev);
2985 qed_ops->common->remove(edev);
2986 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2991 qede_alloc_etherdev(adapter, &dev_info);
2993 adapter->ops->common->set_name(edev, edev->name);
2996 adapter->dev_info.num_mac_filters =
2997 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
3000 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
3001 (uint32_t *)&adapter->dev_info.num_mac_filters);
3003 /* Allocate memory for storing MAC addr */
3004 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
3006 adapter->dev_info.num_mac_filters),
3007 RTE_CACHE_LINE_SIZE);
3009 if (eth_dev->data->mac_addrs == NULL) {
3010 DP_ERR(edev, "Failed to allocate MAC address\n");
3011 qed_ops->common->slowpath_stop(edev);
3012 qed_ops->common->remove(edev);
3013 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
3019 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
3020 hw_info.hw_mac_addr,
3021 ð_dev->data->mac_addrs[0]);
3022 ether_addr_copy(ð_dev->data->mac_addrs[0],
3023 &adapter->primary_mac);
3025 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
3027 if (bulletin_change) {
3029 ecore_vf_bulletin_get_forced_mac(
3030 ECORE_LEADING_HWFN(edev),
3034 DP_INFO(edev, "VF macaddr received from PF\n");
3035 ether_addr_copy((struct ether_addr *)&vf_mac,
3036 ð_dev->data->mac_addrs[0]);
3037 ether_addr_copy(ð_dev->data->mac_addrs[0],
3038 &adapter->primary_mac);
3040 DP_ERR(edev, "No VF macaddr assigned\n");
3045 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
3048 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
3049 qede_print_adapter_info(adapter);
3054 /* Bring-up the link */
3055 qede_dev_set_link_state(eth_dev, true);
3057 adapter->num_tx_queues = 0;
3058 adapter->num_rx_queues = 0;
3059 SLIST_INIT(&adapter->fdir_info.fdir_list_head);
3060 SLIST_INIT(&adapter->vlan_list_head);
3061 SLIST_INIT(&adapter->uc_list_head);
3062 SLIST_INIT(&adapter->mc_list_head);
3063 adapter->mtu = ETHER_MTU;
3064 adapter->vport_started = false;
3066 /* VF tunnel offloads is enabled by default in PF driver */
3067 adapter->vxlan.enable = true;
3068 adapter->vxlan.num_filters = 0;
3069 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
3070 ETH_TUNNEL_FILTER_IVLAN;
3071 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
3073 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
3074 adapter->primary_mac.addr_bytes[0],
3075 adapter->primary_mac.addr_bytes[1],
3076 adapter->primary_mac.addr_bytes[2],
3077 adapter->primary_mac.addr_bytes[3],
3078 adapter->primary_mac.addr_bytes[4],
3079 adapter->primary_mac.addr_bytes[5]);
3081 DP_INFO(edev, "Device initialized\n");
3086 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
3088 return qede_common_dev_init(eth_dev, 1);
3091 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
3093 return qede_common_dev_init(eth_dev, 0);
3096 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
3098 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
3099 struct qede_dev *qdev = eth_dev->data->dev_private;
3100 struct ecore_dev *edev = &qdev->edev;
3102 PMD_INIT_FUNC_TRACE(edev);
3105 /* only uninitialize in the primary process */
3106 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3109 /* safe to close dev here */
3110 qede_dev_close(eth_dev);
3112 eth_dev->dev_ops = NULL;
3113 eth_dev->rx_pkt_burst = NULL;
3114 eth_dev->tx_pkt_burst = NULL;
3116 if (eth_dev->data->mac_addrs)
3117 rte_free(eth_dev->data->mac_addrs);
3119 eth_dev->data->mac_addrs = NULL;
3124 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3126 return qede_dev_common_uninit(eth_dev);
3129 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
3131 return qede_dev_common_uninit(eth_dev);
3134 static const struct rte_pci_id pci_id_qedevf_map[] = {
3135 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3137 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
3140 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
3143 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
3148 static const struct rte_pci_id pci_id_qede_map[] = {
3149 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
3151 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
3154 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
3157 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
3160 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
3163 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
3166 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
3169 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
3172 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
3175 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
3178 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
3183 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3184 struct rte_pci_device *pci_dev)
3186 return rte_eth_dev_pci_generic_probe(pci_dev,
3187 sizeof(struct qede_dev), qedevf_eth_dev_init);
3190 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3192 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
3195 static struct rte_pci_driver rte_qedevf_pmd = {
3196 .id_table = pci_id_qedevf_map,
3197 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3198 .probe = qedevf_eth_dev_pci_probe,
3199 .remove = qedevf_eth_dev_pci_remove,
3202 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3203 struct rte_pci_device *pci_dev)
3205 return rte_eth_dev_pci_generic_probe(pci_dev,
3206 sizeof(struct qede_dev), qede_eth_dev_init);
3209 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3211 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
3214 static struct rte_pci_driver rte_qede_pmd = {
3215 .id_table = pci_id_qede_map,
3216 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3217 .probe = qede_eth_dev_pci_probe,
3218 .remove = qede_eth_dev_pci_remove,
3221 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
3222 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
3223 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
3224 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
3225 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
3226 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");