2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
14 static const struct qed_eth_ops *qed_ops;
15 static int64_t timer_period = 1;
17 /* VXLAN tunnel classification mapping */
18 const struct _qede_vxlan_tunn_types {
19 uint16_t rte_filter_type;
20 enum ecore_filter_ucast_type qede_type;
21 enum ecore_tunn_clss qede_tunn_clss;
23 } qede_tunn_types[] = {
25 ETH_TUNNEL_FILTER_OMAC,
27 ECORE_TUNN_CLSS_MAC_VLAN,
31 ETH_TUNNEL_FILTER_TENID,
33 ECORE_TUNN_CLSS_MAC_VNI,
37 ETH_TUNNEL_FILTER_IMAC,
38 ECORE_FILTER_INNER_MAC,
39 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
43 ETH_TUNNEL_FILTER_IVLAN,
44 ECORE_FILTER_INNER_VLAN,
45 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
49 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
50 ECORE_FILTER_MAC_VNI_PAIR,
51 ECORE_TUNN_CLSS_MAC_VNI,
55 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
58 "outer-mac and inner-mac"
61 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
64 "outer-mac and inner-vlan"
67 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
68 ECORE_FILTER_INNER_MAC_VNI_PAIR,
69 ECORE_TUNN_CLSS_INNER_MAC_VNI,
73 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
79 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
80 ECORE_FILTER_INNER_PAIR,
81 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
82 "inner-mac and inner-vlan",
85 ETH_TUNNEL_FILTER_OIP,
91 ETH_TUNNEL_FILTER_IIP,
97 RTE_TUNNEL_FILTER_IMAC_IVLAN,
103 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
109 RTE_TUNNEL_FILTER_IMAC_TENID,
115 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
122 struct rte_qede_xstats_name_off {
123 char name[RTE_ETH_XSTATS_NAME_SIZE];
127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
128 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
129 {"rx_multicast_bytes",
130 offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
131 {"rx_broadcast_bytes",
132 offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
133 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
134 {"rx_multicast_packets",
135 offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
136 {"rx_broadcast_packets",
137 offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
139 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
140 {"tx_multicast_bytes",
141 offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
142 {"tx_broadcast_bytes",
143 offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
144 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
145 {"tx_multicast_packets",
146 offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
147 {"tx_broadcast_packets",
148 offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
150 {"rx_64_byte_packets",
151 offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
152 {"rx_65_to_127_byte_packets",
153 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
154 {"rx_128_to_255_byte_packets",
155 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
156 {"rx_256_to_511_byte_packets",
157 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
158 {"rx_512_to_1023_byte_packets",
159 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
160 {"rx_1024_to_1518_byte_packets",
161 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
162 {"rx_1519_to_1522_byte_packets",
163 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
164 {"rx_1519_to_2047_byte_packets",
165 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
166 {"rx_2048_to_4095_byte_packets",
167 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
168 {"rx_4096_to_9216_byte_packets",
169 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
170 {"rx_9217_to_16383_byte_packets",
171 offsetof(struct ecore_eth_stats,
172 rx_9217_to_16383_byte_packets)},
173 {"tx_64_byte_packets",
174 offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
175 {"tx_65_to_127_byte_packets",
176 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
177 {"tx_128_to_255_byte_packets",
178 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
179 {"tx_256_to_511_byte_packets",
180 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
181 {"tx_512_to_1023_byte_packets",
182 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
183 {"tx_1024_to_1518_byte_packets",
184 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
185 {"trx_1519_to_1522_byte_packets",
186 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
187 {"tx_2048_to_4095_byte_packets",
188 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
189 {"tx_4096_to_9216_byte_packets",
190 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
191 {"tx_9217_to_16383_byte_packets",
192 offsetof(struct ecore_eth_stats,
193 tx_9217_to_16383_byte_packets)},
195 {"rx_mac_crtl_frames",
196 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
197 {"tx_mac_control_frames",
198 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
199 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
200 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
201 {"rx_priority_flow_control_frames",
202 offsetof(struct ecore_eth_stats, rx_pfc_frames)},
203 {"tx_priority_flow_control_frames",
204 offsetof(struct ecore_eth_stats, tx_pfc_frames)},
206 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
207 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
208 {"rx_carrier_errors",
209 offsetof(struct ecore_eth_stats, rx_carrier_errors)},
210 {"rx_oversize_packet_errors",
211 offsetof(struct ecore_eth_stats, rx_oversize_packets)},
212 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
213 {"rx_undersize_packet_errors",
214 offsetof(struct ecore_eth_stats, rx_undersize_packets)},
215 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
216 {"rx_host_buffer_not_available",
217 offsetof(struct ecore_eth_stats, no_buff_discards)},
218 /* Number of packets discarded because they are bigger than MTU */
219 {"rx_packet_too_big_discards",
220 offsetof(struct ecore_eth_stats, packet_too_big_discard)},
221 {"rx_ttl_zero_discards",
222 offsetof(struct ecore_eth_stats, ttl0_discard)},
223 {"rx_multi_function_tag_filter_discards",
224 offsetof(struct ecore_eth_stats, mftag_filter_discards)},
225 {"rx_mac_filter_discards",
226 offsetof(struct ecore_eth_stats, mac_filter_discards)},
227 {"rx_hw_buffer_truncates",
228 offsetof(struct ecore_eth_stats, brb_truncates)},
229 {"rx_hw_buffer_discards",
230 offsetof(struct ecore_eth_stats, brb_discards)},
231 {"tx_lpi_entry_count",
232 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
233 {"tx_total_collisions",
234 offsetof(struct ecore_eth_stats, tx_total_collisions)},
235 {"tx_error_drop_packets",
236 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
238 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
239 {"rx_mac_unicast_packets",
240 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
241 {"rx_mac_multicast_packets",
242 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
243 {"rx_mac_broadcast_packets",
244 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
246 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
247 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
248 {"tx_mac_unicast_packets",
249 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
250 {"tx_mac_multicast_packets",
251 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
252 {"tx_mac_broadcast_packets",
253 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
255 {"lro_coalesced_packets",
256 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
257 {"lro_coalesced_events",
258 offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
260 offsetof(struct ecore_eth_stats, tpa_aborts_num)},
261 {"lro_not_coalesced_packets",
262 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
263 {"lro_coalesced_bytes",
264 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
267 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
269 offsetof(struct qede_rx_queue, rx_segs)},
271 offsetof(struct qede_rx_queue, rx_hw_errors)},
272 {"rx_q_allocation_errors",
273 offsetof(struct qede_rx_queue, rx_alloc_errors)}
276 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
278 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
282 qede_interrupt_handler(void *param)
284 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
285 struct qede_dev *qdev = eth_dev->data->dev_private;
286 struct ecore_dev *edev = &qdev->edev;
288 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
289 if (rte_intr_enable(eth_dev->intr_handle))
290 DP_ERR(edev, "rte_intr_enable failed\n");
294 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
296 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
297 qdev->num_tc = qdev->dev_info.num_tc;
301 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
302 static void qede_print_adapter_info(struct qede_dev *qdev)
304 struct ecore_dev *edev = &qdev->edev;
305 struct qed_dev_info *info = &qdev->dev_info.common;
306 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
307 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
309 DP_INFO(edev, "*********************************\n");
310 DP_INFO(edev, " DPDK version:%s\n", rte_version());
311 DP_INFO(edev, " Chip details : %s %c%d\n",
312 ECORE_IS_BB(edev) ? "BB" : "AH",
313 'A' + edev->chip_rev,
314 (int)edev->chip_metal);
315 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
316 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
317 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
318 ver_str, QEDE_PMD_VERSION);
319 DP_INFO(edev, " Driver version : %s\n", drv_ver);
320 DP_INFO(edev, " Firmware version : %s\n", ver_str);
322 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
324 (info->mfw_rev >> 24) & 0xff,
325 (info->mfw_rev >> 16) & 0xff,
326 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
327 DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
328 DP_INFO(edev, " Firmware file : %s\n", fw_file);
329 DP_INFO(edev, "*********************************\n");
333 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
335 memset(ucast, 0, sizeof(struct ecore_filter_ucast));
336 ucast->is_rx_filter = true;
337 ucast->is_tx_filter = true;
338 /* ucast->assert_on_error = true; - For debug */
341 static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
342 uint8_t clss, bool mode, bool mask)
344 memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
345 p_tunn->vxlan.b_update_mode = mode;
346 p_tunn->vxlan.b_mode_enabled = mask;
347 p_tunn->b_update_rx_cls = true;
348 p_tunn->b_update_tx_cls = true;
349 p_tunn->vxlan.tun_cls = clss;
353 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
356 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
357 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
358 struct qede_ucast_entry *tmp = NULL;
359 struct qede_ucast_entry *u;
360 struct ether_addr *mac_addr;
362 mac_addr = (struct ether_addr *)ucast->mac;
364 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
365 if ((memcmp(mac_addr, &tmp->mac,
366 ETHER_ADDR_LEN) == 0) &&
367 ucast->vlan == tmp->vlan) {
368 DP_ERR(edev, "Unicast MAC is already added"
369 " with vlan = %u, vni = %u\n",
370 ucast->vlan, ucast->vni);
374 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
375 RTE_CACHE_LINE_SIZE);
377 DP_ERR(edev, "Did not allocate memory for ucast\n");
380 ether_addr_copy(mac_addr, &u->mac);
381 u->vlan = ucast->vlan;
383 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
386 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
387 if ((memcmp(mac_addr, &tmp->mac,
388 ETHER_ADDR_LEN) == 0) &&
389 ucast->vlan == tmp->vlan &&
390 ucast->vni == tmp->vni)
394 DP_INFO(edev, "Unicast MAC is not found\n");
397 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
405 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
408 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
409 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
410 struct ether_addr *mac_addr;
411 struct qede_mcast_entry *tmp = NULL;
412 struct qede_mcast_entry *m;
414 mac_addr = (struct ether_addr *)mcast->mac;
416 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
417 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
419 "Multicast MAC is already added\n");
423 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
424 RTE_CACHE_LINE_SIZE);
427 "Did not allocate memory for mcast\n");
430 ether_addr_copy(mac_addr, &m->mac);
431 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
434 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
435 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
439 DP_INFO(edev, "Multicast mac is not found\n");
442 SLIST_REMOVE(&qdev->mc_list_head, tmp,
443 qede_mcast_entry, list);
450 static enum _ecore_status_t
451 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
454 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
455 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
456 enum _ecore_status_t rc;
457 struct ecore_filter_mcast mcast;
458 struct qede_mcast_entry *tmp;
462 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
464 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
466 "Mcast filter table limit exceeded, "
467 "Please enable mcast promisc mode\n");
471 rc = qede_mcast_filter(eth_dev, ucast, add);
473 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
474 memset(&mcast, 0, sizeof(mcast));
475 mcast.num_mc_addrs = qdev->num_mc_addr;
476 mcast.opcode = ECORE_FILTER_ADD;
477 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
478 ether_addr_copy(&tmp->mac,
479 (struct ether_addr *)&mcast.mac[j]);
482 rc = ecore_filter_mcast_cmd(edev, &mcast,
483 ECORE_SPQ_MODE_CB, NULL);
485 if (rc != ECORE_SUCCESS) {
486 DP_ERR(edev, "Failed to add multicast filter"
487 " rc = %d, op = %d\n", rc, add);
489 } else { /* Unicast */
491 if (qdev->num_uc_addr >=
492 qdev->dev_info.num_mac_filters) {
494 "Ucast filter table limit exceeded,"
495 " Please enable promisc mode\n");
499 rc = qede_ucast_filter(eth_dev, ucast, add);
501 rc = ecore_filter_ucast_cmd(edev, ucast,
502 ECORE_SPQ_MODE_CB, NULL);
503 if (rc != ECORE_SUCCESS) {
504 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
513 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
514 __rte_unused uint32_t index, __rte_unused uint32_t pool)
516 struct ecore_filter_ucast ucast;
519 qede_set_ucast_cmn_params(&ucast);
520 ucast.type = ECORE_FILTER_MAC;
521 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
522 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
527 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
529 struct qede_dev *qdev = eth_dev->data->dev_private;
530 struct ecore_dev *edev = &qdev->edev;
531 struct ecore_filter_ucast ucast;
533 PMD_INIT_FUNC_TRACE(edev);
535 if (index >= qdev->dev_info.num_mac_filters) {
536 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
537 index, qdev->dev_info.num_mac_filters);
541 qede_set_ucast_cmn_params(&ucast);
542 ucast.opcode = ECORE_FILTER_REMOVE;
543 ucast.type = ECORE_FILTER_MAC;
545 /* Use the index maintained by rte */
546 ether_addr_copy(ð_dev->data->mac_addrs[index],
547 (struct ether_addr *)&ucast.mac);
549 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
553 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
555 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
556 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
558 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
559 mac_addr->addr_bytes)) {
560 DP_ERR(edev, "Setting MAC address is not allowed\n");
561 ether_addr_copy(&qdev->primary_mac,
562 ð_dev->data->mac_addrs[0]);
566 qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
569 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
571 struct ecore_dev *edev = &qdev->edev;
572 struct qed_update_vport_params params = {
574 .accept_any_vlan = action,
575 .update_accept_any_vlan_flg = 1,
579 /* Proceed only if action actually needs to be performed */
580 if (qdev->accept_any_vlan == action)
583 rc = qdev->ops->vport_update(edev, ¶ms);
585 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
586 action ? "enable" : "disable");
588 DP_INFO(edev, "%s accept-any-vlan\n",
589 action ? "enabled" : "disabled");
590 qdev->accept_any_vlan = action;
594 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
596 struct qed_update_vport_params vport_update_params;
597 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
598 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
601 memset(&vport_update_params, 0, sizeof(vport_update_params));
602 vport_update_params.vport_id = 0;
603 vport_update_params.update_inner_vlan_removal_flg = 1;
604 vport_update_params.inner_vlan_removal_flg = set_stripping;
605 rc = qdev->ops->vport_update(edev, &vport_update_params);
607 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
610 qdev->vlan_strip_flg = set_stripping;
615 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
616 uint16_t vlan_id, int on)
618 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
619 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
620 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
621 struct qede_vlan_entry *tmp = NULL;
622 struct qede_vlan_entry *vlan;
623 struct ecore_filter_ucast ucast;
627 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
628 DP_ERR(edev, "Reached max VLAN filter limit"
629 " enabling accept_any_vlan\n");
630 qede_config_accept_any_vlan(qdev, true);
634 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
635 if (tmp->vid == vlan_id) {
636 DP_ERR(edev, "VLAN %u already configured\n",
642 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
643 RTE_CACHE_LINE_SIZE);
646 DP_ERR(edev, "Did not allocate memory for VLAN\n");
650 qede_set_ucast_cmn_params(&ucast);
651 ucast.opcode = ECORE_FILTER_ADD;
652 ucast.type = ECORE_FILTER_VLAN;
653 ucast.vlan = vlan_id;
654 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
657 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
662 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
663 qdev->configured_vlans++;
664 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
665 vlan_id, qdev->configured_vlans);
668 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
669 if (tmp->vid == vlan_id)
674 if (qdev->configured_vlans == 0) {
676 "No VLAN filters configured yet\n");
680 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
684 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
686 qede_set_ucast_cmn_params(&ucast);
687 ucast.opcode = ECORE_FILTER_REMOVE;
688 ucast.type = ECORE_FILTER_VLAN;
689 ucast.vlan = vlan_id;
690 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
693 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
696 qdev->configured_vlans--;
697 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
698 vlan_id, qdev->configured_vlans);
705 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
707 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
708 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
709 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
711 if (mask & ETH_VLAN_STRIP_MASK) {
712 if (rxmode->hw_vlan_strip)
713 (void)qede_vlan_stripping(eth_dev, 1);
715 (void)qede_vlan_stripping(eth_dev, 0);
718 if (mask & ETH_VLAN_FILTER_MASK) {
719 /* VLAN filtering kicks in when a VLAN is added */
720 if (rxmode->hw_vlan_filter) {
721 qede_vlan_filter_set(eth_dev, 0, 1);
723 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
725 " Please remove existing VLAN filters"
726 " before disabling VLAN filtering\n");
727 /* Signal app that VLAN filtering is still
730 rxmode->hw_vlan_filter = true;
732 qede_vlan_filter_set(eth_dev, 0, 0);
737 if (mask & ETH_VLAN_EXTEND_MASK)
738 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
739 " and classification is based on outer tag only\n");
741 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
742 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
745 static int qede_init_vport(struct qede_dev *qdev)
747 struct ecore_dev *edev = &qdev->edev;
748 struct qed_start_vport_params start = {0};
751 start.remove_inner_vlan = 1;
752 start.enable_lro = qdev->enable_lro;
753 start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
755 start.drop_ttl0 = false;
756 start.clear_stats = 1;
757 start.handle_ptp_pkts = 0;
759 rc = qdev->ops->vport_start(edev, &start);
761 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
766 "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
767 start.vport_id, ETHER_MTU);
772 static void qede_prandom_bytes(uint32_t *buff)
776 srand((unsigned int)time(NULL));
777 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
781 int qede_config_rss(struct rte_eth_dev *eth_dev)
783 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
784 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
785 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
787 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
788 struct rte_eth_rss_reta_entry64 reta_conf[2];
789 struct rte_eth_rss_conf rss_conf;
790 uint32_t i, id, pos, q;
792 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
793 if (!rss_conf.rss_key) {
794 DP_INFO(edev, "Applying driver default key\n");
795 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
796 qede_prandom_bytes(&def_rss_key[0]);
797 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
800 /* Configure RSS hash */
801 if (qede_rss_hash_update(eth_dev, &rss_conf))
804 /* Configure default RETA */
805 memset(reta_conf, 0, sizeof(reta_conf));
806 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
807 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
809 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
810 id = i / RTE_RETA_GROUP_SIZE;
811 pos = i % RTE_RETA_GROUP_SIZE;
812 q = i % QEDE_RSS_COUNT(qdev);
813 reta_conf[id].reta[pos] = q;
815 if (qede_rss_reta_update(eth_dev, &reta_conf[0],
816 ECORE_RSS_IND_TABLE_SIZE))
822 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
824 struct qede_dev *qdev = eth_dev->data->dev_private;
825 struct ecore_dev *edev = &qdev->edev;
826 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
829 PMD_INIT_FUNC_TRACE(edev);
831 /* Check requirements for 100G mode */
832 if (edev->num_hwfns > 1) {
833 if (eth_dev->data->nb_rx_queues < 2 ||
834 eth_dev->data->nb_tx_queues < 2) {
835 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
839 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
840 (eth_dev->data->nb_tx_queues % 2 != 0)) {
842 "100G mode needs even no. of RX/TX queues\n");
847 /* Sanity checks and throw warnings */
848 if (rxmode->enable_scatter == 1)
849 eth_dev->data->scattered_rx = 1;
851 if (!rxmode->hw_strip_crc)
852 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
854 if (!rxmode->hw_ip_checksum)
855 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
858 if (rxmode->enable_lro) {
859 qdev->enable_lro = true;
860 /* Enable scatter mode for LRO */
861 if (!rxmode->enable_scatter)
862 eth_dev->data->scattered_rx = 1;
865 /* Check for the port restart case */
866 if (qdev->state != QEDE_DEV_INIT) {
867 rc = qdev->ops->vport_stop(edev, 0);
870 qede_dealloc_fp_resc(eth_dev);
873 qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
874 qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
875 qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
877 /* Fastpath status block should be initialized before sending
878 * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
880 rc = qede_alloc_fp_resc(qdev);
884 /* Issue VPORT-START with default config values to allow
885 * other port configurations early on.
887 rc = qede_init_vport(qdev);
891 if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
892 rxmode->mq_mode == ETH_MQ_RX_NONE)) {
893 DP_ERR(edev, "Unsupported RSS mode\n");
894 qdev->ops->vport_stop(edev, 0);
895 qede_dealloc_fp_resc(eth_dev);
899 /* Flow director mode check */
900 rc = qede_check_fdir_support(eth_dev);
902 qdev->ops->vport_stop(edev, 0);
903 qede_dealloc_fp_resc(eth_dev);
906 SLIST_INIT(&qdev->fdir_info.fdir_list_head);
908 SLIST_INIT(&qdev->vlan_list_head);
910 /* Enable VLAN offloads by default */
911 qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
912 ETH_VLAN_FILTER_MASK |
913 ETH_VLAN_EXTEND_MASK);
915 qdev->state = QEDE_DEV_CONFIG;
917 DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
918 (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
924 /* Info about HW descriptor ring limitations */
925 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
926 .nb_max = NUM_RX_BDS_MAX,
928 .nb_align = 128 /* lowest common multiple */
931 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
932 .nb_max = NUM_TX_BDS_MAX,
935 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
936 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
940 qede_dev_info_get(struct rte_eth_dev *eth_dev,
941 struct rte_eth_dev_info *dev_info)
943 struct qede_dev *qdev = eth_dev->data->dev_private;
944 struct ecore_dev *edev = &qdev->edev;
945 struct qed_link_output link;
946 uint32_t speed_cap = 0;
948 PMD_INIT_FUNC_TRACE(edev);
950 dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
951 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
952 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
953 dev_info->rx_desc_lim = qede_rx_desc_lim;
954 dev_info->tx_desc_lim = qede_tx_desc_lim;
957 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
958 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
960 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
961 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
962 dev_info->max_tx_queues = dev_info->max_rx_queues;
964 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
965 dev_info->max_vfs = 0;
966 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
967 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
968 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
970 dev_info->default_txconf = (struct rte_eth_txconf) {
971 .txq_flags = QEDE_TXQ_FLAGS,
974 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
975 DEV_RX_OFFLOAD_IPV4_CKSUM |
976 DEV_RX_OFFLOAD_UDP_CKSUM |
977 DEV_RX_OFFLOAD_TCP_CKSUM |
978 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
979 DEV_RX_OFFLOAD_TCP_LRO);
981 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
982 DEV_TX_OFFLOAD_IPV4_CKSUM |
983 DEV_TX_OFFLOAD_UDP_CKSUM |
984 DEV_TX_OFFLOAD_TCP_CKSUM |
985 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
986 DEV_TX_OFFLOAD_TCP_TSO |
987 DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
989 memset(&link, 0, sizeof(struct qed_link_output));
990 qdev->ops->common->get_link(edev, &link);
991 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
992 speed_cap |= ETH_LINK_SPEED_1G;
993 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
994 speed_cap |= ETH_LINK_SPEED_10G;
995 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
996 speed_cap |= ETH_LINK_SPEED_25G;
997 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
998 speed_cap |= ETH_LINK_SPEED_40G;
999 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1000 speed_cap |= ETH_LINK_SPEED_50G;
1001 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1002 speed_cap |= ETH_LINK_SPEED_100G;
1003 dev_info->speed_capa = speed_cap;
1006 /* return 0 means link status changed, -1 means not changed */
1008 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1010 struct qede_dev *qdev = eth_dev->data->dev_private;
1011 struct ecore_dev *edev = &qdev->edev;
1012 uint16_t link_duplex;
1013 struct qed_link_output link;
1014 struct rte_eth_link *curr = ð_dev->data->dev_link;
1016 memset(&link, 0, sizeof(struct qed_link_output));
1017 qdev->ops->common->get_link(edev, &link);
1020 curr->link_speed = link.speed;
1023 switch (link.duplex) {
1024 case QEDE_DUPLEX_HALF:
1025 link_duplex = ETH_LINK_HALF_DUPLEX;
1027 case QEDE_DUPLEX_FULL:
1028 link_duplex = ETH_LINK_FULL_DUPLEX;
1030 case QEDE_DUPLEX_UNKNOWN:
1034 curr->link_duplex = link_duplex;
1037 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1040 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1041 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1043 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1044 curr->link_speed, curr->link_duplex,
1045 curr->link_autoneg, curr->link_status);
1047 /* return 0 means link status changed, -1 means not changed */
1048 return ((curr->link_status == link.link_up) ? -1 : 0);
1051 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1053 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1054 struct qede_dev *qdev = eth_dev->data->dev_private;
1055 struct ecore_dev *edev = &qdev->edev;
1057 PMD_INIT_FUNC_TRACE(edev);
1060 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1062 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1063 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1065 qed_configure_filter_rx_mode(eth_dev, type);
1068 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1070 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1071 struct qede_dev *qdev = eth_dev->data->dev_private;
1072 struct ecore_dev *edev = &qdev->edev;
1074 PMD_INIT_FUNC_TRACE(edev);
1077 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1078 qed_configure_filter_rx_mode(eth_dev,
1079 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1081 qed_configure_filter_rx_mode(eth_dev,
1082 QED_FILTER_RX_MODE_TYPE_REGULAR);
1085 static void qede_poll_sp_sb_cb(void *param)
1087 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1088 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1089 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1092 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1093 qede_interrupt_action(&edev->hwfns[1]);
1095 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1099 DP_ERR(edev, "Unable to start periodic"
1100 " timer rc %d\n", rc);
1101 assert(false && "Unable to start periodic timer");
1105 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1107 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1108 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1109 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1112 PMD_INIT_FUNC_TRACE(edev);
1114 qede_fdir_dealloc_resc(eth_dev);
1116 /* dev_stop() shall cleanup fp resources in hw but without releasing
1117 * dma memories and sw structures so that dev_start() can be called
1118 * by the app without reconfiguration. However, in dev_close() we
1119 * can release all the resources and device can be brought up newly
1121 if (qdev->state != QEDE_DEV_STOP)
1122 qede_dev_stop(eth_dev);
1124 DP_INFO(edev, "Device is already stopped\n");
1126 rc = qdev->ops->vport_stop(edev, 0);
1128 DP_ERR(edev, "Failed to stop VPORT\n");
1130 qede_dealloc_fp_resc(eth_dev);
1132 qdev->ops->common->slowpath_stop(edev);
1134 qdev->ops->common->remove(edev);
1136 rte_intr_disable(&pci_dev->intr_handle);
1138 rte_intr_callback_unregister(&pci_dev->intr_handle,
1139 qede_interrupt_handler, (void *)eth_dev);
1141 if (edev->num_hwfns > 1)
1142 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1144 qdev->state = QEDE_DEV_INIT; /* Go back to init state */
1148 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1150 struct qede_dev *qdev = eth_dev->data->dev_private;
1151 struct ecore_dev *edev = &qdev->edev;
1152 struct ecore_eth_stats stats;
1153 unsigned int i = 0, j = 0, qid;
1154 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1155 struct qede_tx_queue *txq;
1157 qdev->ops->get_vport_stats(edev, &stats);
1160 eth_stats->ipackets = stats.rx_ucast_pkts +
1161 stats.rx_mcast_pkts + stats.rx_bcast_pkts;
1163 eth_stats->ibytes = stats.rx_ucast_bytes +
1164 stats.rx_mcast_bytes + stats.rx_bcast_bytes;
1166 eth_stats->ierrors = stats.rx_crc_errors +
1167 stats.rx_align_errors +
1168 stats.rx_carrier_errors +
1169 stats.rx_oversize_packets +
1170 stats.rx_jabbers + stats.rx_undersize_packets;
1172 eth_stats->rx_nombuf = stats.no_buff_discards;
1174 eth_stats->imissed = stats.mftag_filter_discards +
1175 stats.mac_filter_discards +
1176 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
1179 eth_stats->opackets = stats.tx_ucast_pkts +
1180 stats.tx_mcast_pkts + stats.tx_bcast_pkts;
1182 eth_stats->obytes = stats.tx_ucast_bytes +
1183 stats.tx_mcast_bytes + stats.tx_bcast_bytes;
1185 eth_stats->oerrors = stats.tx_err_drop_pkts;
1188 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1189 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1190 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1191 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1192 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1193 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1194 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1195 "Not all the queue stats will be displayed. Set"
1196 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1197 " appropriately and retry.\n");
1199 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
1200 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
1201 eth_stats->q_ipackets[i] =
1203 ((char *)(qdev->fp_array[(qid)].rxq)) +
1204 offsetof(struct qede_rx_queue,
1206 eth_stats->q_errors[i] =
1208 ((char *)(qdev->fp_array[(qid)].rxq)) +
1209 offsetof(struct qede_rx_queue,
1212 ((char *)(qdev->fp_array[(qid)].rxq)) +
1213 offsetof(struct qede_rx_queue,
1217 if (i == rxq_stat_cntrs)
1221 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
1222 if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
1223 txq = qdev->fp_array[(qid)].txqs[0];
1224 eth_stats->q_opackets[j] =
1225 *((uint64_t *)(uintptr_t)
1226 (((uint64_t)(uintptr_t)(txq)) +
1227 offsetof(struct qede_tx_queue,
1231 if (j == txq_stat_cntrs)
1237 qede_get_xstats_count(struct qede_dev *qdev) {
1238 return RTE_DIM(qede_xstats_strings) +
1239 (RTE_DIM(qede_rxq_xstats_strings) *
1240 RTE_MIN(QEDE_RSS_COUNT(qdev),
1241 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1245 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
1246 struct rte_eth_xstat_name *xstats_names,
1247 __rte_unused unsigned int limit)
1249 struct qede_dev *qdev = dev->data->dev_private;
1250 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1251 unsigned int i, qid, stat_idx = 0;
1252 unsigned int rxq_stat_cntrs;
1254 if (xstats_names != NULL) {
1255 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1256 snprintf(xstats_names[stat_idx].name,
1257 sizeof(xstats_names[stat_idx].name),
1259 qede_xstats_strings[i].name);
1263 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1264 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1265 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1266 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1267 snprintf(xstats_names[stat_idx].name,
1268 sizeof(xstats_names[stat_idx].name),
1270 qede_rxq_xstats_strings[i].name, qid,
1271 qede_rxq_xstats_strings[i].name + 4);
1281 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1284 struct qede_dev *qdev = dev->data->dev_private;
1285 struct ecore_dev *edev = &qdev->edev;
1286 struct ecore_eth_stats stats;
1287 const unsigned int num = qede_get_xstats_count(qdev);
1288 unsigned int i, qid, stat_idx = 0;
1289 unsigned int rxq_stat_cntrs;
1294 qdev->ops->get_vport_stats(edev, &stats);
1296 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1297 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1298 qede_xstats_strings[i].offset);
1299 xstats[stat_idx].id = stat_idx;
1303 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1304 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1305 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1306 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
1307 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1308 xstats[stat_idx].value = *(uint64_t *)(
1309 ((char *)(qdev->fp_array[(qid)].rxq)) +
1310 qede_rxq_xstats_strings[i].offset);
1311 xstats[stat_idx].id = stat_idx;
1321 qede_reset_xstats(struct rte_eth_dev *dev)
1323 struct qede_dev *qdev = dev->data->dev_private;
1324 struct ecore_dev *edev = &qdev->edev;
1326 ecore_reset_vport_stats(edev);
1329 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1331 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1332 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1333 struct qed_link_params link_params;
1336 DP_INFO(edev, "setting link state %d\n", link_up);
1337 memset(&link_params, 0, sizeof(link_params));
1338 link_params.link_up = link_up;
1339 rc = qdev->ops->common->set_link(edev, &link_params);
1340 if (rc != ECORE_SUCCESS)
1341 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1346 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1348 return qede_dev_set_link_state(eth_dev, true);
1351 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1353 return qede_dev_set_link_state(eth_dev, false);
1356 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1358 struct qede_dev *qdev = eth_dev->data->dev_private;
1359 struct ecore_dev *edev = &qdev->edev;
1361 ecore_reset_vport_stats(edev);
1364 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1366 enum qed_filter_rx_mode_type type =
1367 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1369 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1370 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1372 qed_configure_filter_rx_mode(eth_dev, type);
1375 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1377 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1378 qed_configure_filter_rx_mode(eth_dev,
1379 QED_FILTER_RX_MODE_TYPE_PROMISC);
1381 qed_configure_filter_rx_mode(eth_dev,
1382 QED_FILTER_RX_MODE_TYPE_REGULAR);
1385 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1386 struct rte_eth_fc_conf *fc_conf)
1388 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1389 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1390 struct qed_link_output current_link;
1391 struct qed_link_params params;
1393 memset(¤t_link, 0, sizeof(current_link));
1394 qdev->ops->common->get_link(edev, ¤t_link);
1396 memset(¶ms, 0, sizeof(params));
1397 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1398 if (fc_conf->autoneg) {
1399 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1400 DP_ERR(edev, "Autoneg not supported\n");
1403 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1406 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1407 if (fc_conf->mode == RTE_FC_FULL)
1408 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1409 QED_LINK_PAUSE_RX_ENABLE);
1410 if (fc_conf->mode == RTE_FC_TX_PAUSE)
1411 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1412 if (fc_conf->mode == RTE_FC_RX_PAUSE)
1413 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1415 params.link_up = true;
1416 (void)qdev->ops->common->set_link(edev, ¶ms);
1421 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1422 struct rte_eth_fc_conf *fc_conf)
1424 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1425 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1426 struct qed_link_output current_link;
1428 memset(¤t_link, 0, sizeof(current_link));
1429 qdev->ops->common->get_link(edev, ¤t_link);
1431 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1432 fc_conf->autoneg = true;
1434 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1435 QED_LINK_PAUSE_TX_ENABLE))
1436 fc_conf->mode = RTE_FC_FULL;
1437 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1438 fc_conf->mode = RTE_FC_RX_PAUSE;
1439 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1440 fc_conf->mode = RTE_FC_TX_PAUSE;
1442 fc_conf->mode = RTE_FC_NONE;
1447 static const uint32_t *
1448 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1450 static const uint32_t ptypes[] = {
1456 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1462 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1465 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
1466 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
1467 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
1468 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
1469 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
1470 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
1471 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
1472 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
1475 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1476 struct rte_eth_rss_conf *rss_conf)
1478 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1479 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1480 struct ecore_sp_vport_update_params vport_update_params;
1481 struct ecore_rss_params rss_params;
1482 struct ecore_hwfn *p_hwfn;
1483 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1484 uint64_t hf = rss_conf->rss_hf;
1485 uint8_t len = rss_conf->rss_key_len;
1490 memset(&vport_update_params, 0, sizeof(vport_update_params));
1491 memset(&rss_params, 0, sizeof(rss_params));
1493 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
1494 (unsigned long)hf, len, key);
1498 DP_INFO(edev, "Enabling rss\n");
1501 qede_init_rss_caps(&rss_params.rss_caps, hf);
1502 rss_params.update_rss_capabilities = 1;
1506 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
1507 DP_ERR(edev, "RSS key length exceeds limit\n");
1510 DP_INFO(edev, "Applying user supplied hash key\n");
1511 rss_params.update_rss_key = 1;
1512 memcpy(&rss_params.rss_key, key, len);
1514 rss_params.rss_enable = 1;
1517 rss_params.update_rss_config = 1;
1518 /* tbl_size has to be set with capabilities */
1519 rss_params.rss_table_size_log = 7;
1520 vport_update_params.vport_id = 0;
1521 /* pass the L2 handles instead of qids */
1522 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
1523 idx = qdev->rss_ind_table[i];
1524 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
1526 vport_update_params.rss_params = &rss_params;
1528 for_each_hwfn(edev, i) {
1529 p_hwfn = &edev->hwfns[i];
1530 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1531 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
1532 ECORE_SPQ_MODE_EBLOCK, NULL);
1534 DP_ERR(edev, "vport-update for RSS failed\n");
1538 qdev->rss_enable = rss_params.rss_enable;
1540 /* Update local structure for hash query */
1541 qdev->rss_conf.rss_hf = hf;
1542 qdev->rss_conf.rss_key_len = len;
1543 if (qdev->rss_enable) {
1544 if (qdev->rss_conf.rss_key == NULL) {
1545 qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
1546 if (qdev->rss_conf.rss_key == NULL) {
1547 DP_ERR(edev, "No memory to store RSS key\n");
1552 DP_INFO(edev, "Storing RSS key\n");
1553 memcpy(qdev->rss_conf.rss_key, key, len);
1555 } else if (!qdev->rss_enable && len == 0) {
1556 if (qdev->rss_conf.rss_key) {
1557 free(qdev->rss_conf.rss_key);
1558 qdev->rss_conf.rss_key = NULL;
1559 DP_INFO(edev, "Free RSS key\n");
1566 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1567 struct rte_eth_rss_conf *rss_conf)
1569 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1571 rss_conf->rss_hf = qdev->rss_conf.rss_hf;
1572 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
1574 if (rss_conf->rss_key && qdev->rss_conf.rss_key)
1575 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
1576 rss_conf->rss_key_len);
1580 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
1581 struct ecore_rss_params *rss)
1584 bool rss_mode = 1; /* enable */
1585 struct ecore_queue_cid *cid;
1586 struct ecore_rss_params *t_rss;
1588 /* In regular scenario, we'd simply need to take input handlers.
1589 * But in CMT, we'd have to split the handlers according to the
1590 * engine they were configured on. We'd then have to understand
1591 * whether RSS is really required, since 2-queues on CMT doesn't
1595 /* CMT should be round-robin */
1596 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1597 cid = rss->rss_ind_table[i];
1599 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
1604 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
1608 t_rss->update_rss_ind_table = 1;
1609 t_rss->rss_table_size_log = 7;
1610 t_rss->update_rss_config = 1;
1612 /* Make sure RSS is actually required */
1613 for_each_hwfn(edev, fn) {
1614 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
1616 if (rss[fn].rss_ind_table[i] !=
1617 rss[fn].rss_ind_table[0])
1621 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
1623 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1630 t_rss->rss_enable = rss_mode;
1635 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
1636 struct rte_eth_rss_reta_entry64 *reta_conf,
1639 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1640 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1641 struct ecore_sp_vport_update_params vport_update_params;
1642 struct ecore_rss_params *params;
1643 struct ecore_hwfn *p_hwfn;
1644 uint16_t i, idx, shift;
1648 if (reta_size > ETH_RSS_RETA_SIZE_128) {
1649 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
1654 memset(&vport_update_params, 0, sizeof(vport_update_params));
1655 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
1656 RTE_CACHE_LINE_SIZE);
1658 for (i = 0; i < reta_size; i++) {
1659 idx = i / RTE_RETA_GROUP_SIZE;
1660 shift = i % RTE_RETA_GROUP_SIZE;
1661 if (reta_conf[idx].mask & (1ULL << shift)) {
1662 entry = reta_conf[idx].reta[shift];
1663 /* Pass rxq handles to ecore */
1664 params->rss_ind_table[i] =
1665 qdev->fp_array[entry].rxq->handle;
1666 /* Update the local copy for RETA query command */
1667 qdev->rss_ind_table[i] = entry;
1671 params->update_rss_ind_table = 1;
1672 params->rss_table_size_log = 7;
1673 params->update_rss_config = 1;
1675 /* Fix up RETA for CMT mode device */
1676 if (edev->num_hwfns > 1)
1677 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
1679 vport_update_params.vport_id = 0;
1680 /* Use the current value of rss_enable */
1681 params->rss_enable = qdev->rss_enable;
1682 vport_update_params.rss_params = params;
1684 for_each_hwfn(edev, i) {
1685 p_hwfn = &edev->hwfns[i];
1686 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1687 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
1688 ECORE_SPQ_MODE_EBLOCK, NULL);
1690 DP_ERR(edev, "vport-update for RSS failed\n");
1700 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
1701 struct rte_eth_rss_reta_entry64 *reta_conf,
1704 struct qede_dev *qdev = eth_dev->data->dev_private;
1705 struct ecore_dev *edev = &qdev->edev;
1706 uint16_t i, idx, shift;
1709 if (reta_size > ETH_RSS_RETA_SIZE_128) {
1710 DP_ERR(edev, "reta_size %d is not supported\n",
1715 for (i = 0; i < reta_size; i++) {
1716 idx = i / RTE_RETA_GROUP_SIZE;
1717 shift = i % RTE_RETA_GROUP_SIZE;
1718 if (reta_conf[idx].mask & (1ULL << shift)) {
1719 entry = qdev->rss_ind_table[i];
1720 reta_conf[idx].reta[shift] = entry;
1727 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1729 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
1730 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1731 struct rte_eth_dev_info dev_info = {0};
1732 struct qede_fastpath *fp;
1733 uint32_t frame_size;
1734 uint16_t rx_buf_size;
1738 PMD_INIT_FUNC_TRACE(edev);
1739 qede_dev_info_get(dev, &dev_info);
1740 frame_size = mtu + QEDE_ETH_OVERHEAD;
1741 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
1742 DP_ERR(edev, "MTU %u out of range\n", mtu);
1745 if (!dev->data->scattered_rx &&
1746 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
1747 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
1748 dev->data->min_rx_buf_size);
1751 /* Temporarily replace I/O functions with dummy ones. It cannot
1752 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
1754 dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
1755 dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
1759 /* Fix up RX buf size for all queues of the port */
1761 fp = &qdev->fp_array[i];
1762 if (fp->type & QEDE_FASTPATH_RX) {
1763 bufsz = (uint16_t)rte_pktmbuf_data_room_size(
1764 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
1765 if (dev->data->scattered_rx)
1766 rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
1768 rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
1769 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
1770 fp->rxq->rx_buf_size = rx_buf_size;
1771 DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
1774 qede_dev_start(dev);
1775 if (frame_size > ETHER_MAX_LEN)
1776 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1778 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1779 /* update max frame size */
1780 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1782 dev->rx_pkt_burst = qede_recv_pkts;
1783 dev->tx_pkt_burst = qede_xmit_pkts;
1789 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
1790 struct rte_eth_udp_tunnel *tunnel_udp,
1793 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1794 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1795 struct ecore_tunnel_info tunn; /* @DPDK */
1796 struct ecore_hwfn *p_hwfn;
1799 PMD_INIT_FUNC_TRACE(edev);
1801 memset(&tunn, 0, sizeof(tunn));
1802 if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
1803 tunn.vxlan_port.b_update_port = true;
1804 tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
1805 QEDE_VXLAN_DEF_PORT;
1806 for_each_hwfn(edev, i) {
1807 p_hwfn = &edev->hwfns[i];
1808 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
1809 ECORE_SPQ_MODE_CB, NULL);
1810 if (rc != ECORE_SUCCESS) {
1811 DP_ERR(edev, "Unable to config UDP port %u\n",
1812 tunn.vxlan_port.port);
1822 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
1823 struct rte_eth_udp_tunnel *tunnel_udp)
1825 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
1829 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
1830 struct rte_eth_udp_tunnel *tunnel_udp)
1832 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
1835 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
1836 uint32_t *clss, char *str)
1839 *clss = MAX_ECORE_TUNN_CLSS;
1841 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
1842 if (filter == qede_tunn_types[j].rte_filter_type) {
1843 *type = qede_tunn_types[j].qede_type;
1844 *clss = qede_tunn_types[j].qede_tunn_clss;
1845 strcpy(str, qede_tunn_types[j].string);
1852 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
1853 const struct rte_eth_tunnel_filter_conf *conf,
1856 /* Init commmon ucast params first */
1857 qede_set_ucast_cmn_params(ucast);
1859 /* Copy out the required fields based on classification type */
1863 case ECORE_FILTER_VNI:
1864 ucast->vni = conf->tenant_id;
1866 case ECORE_FILTER_INNER_VLAN:
1867 ucast->vlan = conf->inner_vlan;
1869 case ECORE_FILTER_MAC:
1870 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
1873 case ECORE_FILTER_INNER_MAC:
1874 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1877 case ECORE_FILTER_MAC_VNI_PAIR:
1878 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
1880 ucast->vni = conf->tenant_id;
1882 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1883 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1885 ucast->vni = conf->tenant_id;
1887 case ECORE_FILTER_INNER_PAIR:
1888 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
1890 ucast->vlan = conf->inner_vlan;
1896 return ECORE_SUCCESS;
1899 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
1900 enum rte_filter_op filter_op,
1901 const struct rte_eth_tunnel_filter_conf *conf)
1903 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1904 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1905 struct ecore_tunnel_info tunn;
1906 struct ecore_hwfn *p_hwfn;
1907 enum ecore_filter_ucast_type type;
1908 enum ecore_tunn_clss clss;
1909 struct ecore_filter_ucast ucast;
1911 uint16_t filter_type;
1914 filter_type = conf->filter_type | qdev->vxlan_filter_type;
1915 /* First determine if the given filter classification is supported */
1916 qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
1917 if (clss == MAX_ECORE_TUNN_CLSS) {
1918 DP_ERR(edev, "Wrong filter type\n");
1921 /* Init tunnel ucast params */
1922 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
1923 if (rc != ECORE_SUCCESS) {
1924 DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
1928 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
1929 str, filter_op, ucast.type);
1930 switch (filter_op) {
1931 case RTE_ETH_FILTER_ADD:
1932 ucast.opcode = ECORE_FILTER_ADD;
1934 /* Skip MAC/VLAN if filter is based on VNI */
1935 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
1936 rc = qede_mac_int_ops(eth_dev, &ucast, 1);
1938 /* Enable accept anyvlan */
1939 qede_config_accept_any_vlan(qdev, true);
1942 rc = qede_ucast_filter(eth_dev, &ucast, 1);
1944 rc = ecore_filter_ucast_cmd(edev, &ucast,
1945 ECORE_SPQ_MODE_CB, NULL);
1948 if (rc != ECORE_SUCCESS)
1951 qdev->vxlan_filter_type = filter_type;
1953 DP_INFO(edev, "Enabling VXLAN tunneling\n");
1954 qede_set_cmn_tunn_param(&tunn, clss, true, true);
1955 for_each_hwfn(edev, i) {
1956 p_hwfn = &edev->hwfns[i];
1957 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
1958 &tunn, ECORE_SPQ_MODE_CB, NULL);
1959 if (rc != ECORE_SUCCESS) {
1960 DP_ERR(edev, "Failed to update tunn_clss %u\n",
1961 tunn.vxlan.tun_cls);
1964 qdev->num_tunn_filters++; /* Filter added successfully */
1966 case RTE_ETH_FILTER_DELETE:
1967 ucast.opcode = ECORE_FILTER_REMOVE;
1969 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
1970 rc = qede_mac_int_ops(eth_dev, &ucast, 0);
1972 rc = qede_ucast_filter(eth_dev, &ucast, 0);
1974 rc = ecore_filter_ucast_cmd(edev, &ucast,
1975 ECORE_SPQ_MODE_CB, NULL);
1977 if (rc != ECORE_SUCCESS)
1980 qdev->vxlan_filter_type = filter_type;
1981 qdev->num_tunn_filters--;
1983 /* Disable VXLAN if VXLAN filters become 0 */
1984 if (qdev->num_tunn_filters == 0) {
1985 DP_INFO(edev, "Disabling VXLAN tunneling\n");
1987 /* Use 0 as tunnel mode */
1988 qede_set_cmn_tunn_param(&tunn, clss, false, true);
1989 for_each_hwfn(edev, i) {
1990 p_hwfn = &edev->hwfns[i];
1991 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
1992 ECORE_SPQ_MODE_CB, NULL);
1993 if (rc != ECORE_SUCCESS) {
1995 "Failed to update tunn_clss %u\n",
1996 tunn.vxlan.tun_cls);
2003 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2006 DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
2011 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2012 enum rte_filter_type filter_type,
2013 enum rte_filter_op filter_op,
2016 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2017 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2018 struct rte_eth_tunnel_filter_conf *filter_conf =
2019 (struct rte_eth_tunnel_filter_conf *)arg;
2021 switch (filter_type) {
2022 case RTE_ETH_FILTER_TUNNEL:
2023 switch (filter_conf->tunnel_type) {
2024 case RTE_TUNNEL_TYPE_VXLAN:
2026 "Packet steering to the specified Rx queue"
2027 " is not supported with VXLAN tunneling");
2028 return(qede_vxlan_tunn_config(eth_dev, filter_op,
2030 /* Place holders for future tunneling support */
2031 case RTE_TUNNEL_TYPE_GENEVE:
2032 case RTE_TUNNEL_TYPE_TEREDO:
2033 case RTE_TUNNEL_TYPE_NVGRE:
2034 case RTE_TUNNEL_TYPE_IP_IN_GRE:
2035 case RTE_L2_TUNNEL_TYPE_E_TAG:
2036 DP_ERR(edev, "Unsupported tunnel type %d\n",
2037 filter_conf->tunnel_type);
2039 case RTE_TUNNEL_TYPE_NONE:
2044 case RTE_ETH_FILTER_FDIR:
2045 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2046 case RTE_ETH_FILTER_NTUPLE:
2047 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2048 case RTE_ETH_FILTER_MACVLAN:
2049 case RTE_ETH_FILTER_ETHERTYPE:
2050 case RTE_ETH_FILTER_FLEXIBLE:
2051 case RTE_ETH_FILTER_SYN:
2052 case RTE_ETH_FILTER_HASH:
2053 case RTE_ETH_FILTER_L2_TUNNEL:
2054 case RTE_ETH_FILTER_MAX:
2056 DP_ERR(edev, "Unsupported filter type %d\n",
2064 static const struct eth_dev_ops qede_eth_dev_ops = {
2065 .dev_configure = qede_dev_configure,
2066 .dev_infos_get = qede_dev_info_get,
2067 .rx_queue_setup = qede_rx_queue_setup,
2068 .rx_queue_release = qede_rx_queue_release,
2069 .tx_queue_setup = qede_tx_queue_setup,
2070 .tx_queue_release = qede_tx_queue_release,
2071 .dev_start = qede_dev_start,
2072 .dev_set_link_up = qede_dev_set_link_up,
2073 .dev_set_link_down = qede_dev_set_link_down,
2074 .link_update = qede_link_update,
2075 .promiscuous_enable = qede_promiscuous_enable,
2076 .promiscuous_disable = qede_promiscuous_disable,
2077 .allmulticast_enable = qede_allmulticast_enable,
2078 .allmulticast_disable = qede_allmulticast_disable,
2079 .dev_stop = qede_dev_stop,
2080 .dev_close = qede_dev_close,
2081 .stats_get = qede_get_stats,
2082 .stats_reset = qede_reset_stats,
2083 .xstats_get = qede_get_xstats,
2084 .xstats_reset = qede_reset_xstats,
2085 .xstats_get_names = qede_get_xstats_names,
2086 .mac_addr_add = qede_mac_addr_add,
2087 .mac_addr_remove = qede_mac_addr_remove,
2088 .mac_addr_set = qede_mac_addr_set,
2089 .vlan_offload_set = qede_vlan_offload_set,
2090 .vlan_filter_set = qede_vlan_filter_set,
2091 .flow_ctrl_set = qede_flow_ctrl_set,
2092 .flow_ctrl_get = qede_flow_ctrl_get,
2093 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2094 .rss_hash_update = qede_rss_hash_update,
2095 .rss_hash_conf_get = qede_rss_hash_conf_get,
2096 .reta_update = qede_rss_reta_update,
2097 .reta_query = qede_rss_reta_query,
2098 .mtu_set = qede_set_mtu,
2099 .filter_ctrl = qede_dev_filter_ctrl,
2100 .udp_tunnel_port_add = qede_udp_dst_port_add,
2101 .udp_tunnel_port_del = qede_udp_dst_port_del,
2104 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2105 .dev_configure = qede_dev_configure,
2106 .dev_infos_get = qede_dev_info_get,
2107 .rx_queue_setup = qede_rx_queue_setup,
2108 .rx_queue_release = qede_rx_queue_release,
2109 .tx_queue_setup = qede_tx_queue_setup,
2110 .tx_queue_release = qede_tx_queue_release,
2111 .dev_start = qede_dev_start,
2112 .dev_set_link_up = qede_dev_set_link_up,
2113 .dev_set_link_down = qede_dev_set_link_down,
2114 .link_update = qede_link_update,
2115 .promiscuous_enable = qede_promiscuous_enable,
2116 .promiscuous_disable = qede_promiscuous_disable,
2117 .allmulticast_enable = qede_allmulticast_enable,
2118 .allmulticast_disable = qede_allmulticast_disable,
2119 .dev_stop = qede_dev_stop,
2120 .dev_close = qede_dev_close,
2121 .stats_get = qede_get_stats,
2122 .stats_reset = qede_reset_stats,
2123 .xstats_get = qede_get_xstats,
2124 .xstats_reset = qede_reset_xstats,
2125 .xstats_get_names = qede_get_xstats_names,
2126 .vlan_offload_set = qede_vlan_offload_set,
2127 .vlan_filter_set = qede_vlan_filter_set,
2128 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2129 .rss_hash_update = qede_rss_hash_update,
2130 .rss_hash_conf_get = qede_rss_hash_conf_get,
2131 .reta_update = qede_rss_reta_update,
2132 .reta_query = qede_rss_reta_query,
2133 .mtu_set = qede_set_mtu,
2136 static void qede_update_pf_params(struct ecore_dev *edev)
2138 struct ecore_pf_params pf_params;
2140 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2141 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2142 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2143 qed_ops->common->update_pf_params(edev, &pf_params);
2146 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2148 struct rte_pci_device *pci_dev;
2149 struct rte_pci_addr pci_addr;
2150 struct qede_dev *adapter;
2151 struct ecore_dev *edev;
2152 struct qed_dev_eth_info dev_info;
2153 struct qed_slowpath_params params;
2154 static bool do_once = true;
2155 uint8_t bulletin_change;
2156 uint8_t vf_mac[ETHER_ADDR_LEN];
2157 uint8_t is_mac_forced;
2159 /* Fix up ecore debug level */
2160 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2161 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2164 /* Extract key data structures */
2165 adapter = eth_dev->data->dev_private;
2166 edev = &adapter->edev;
2167 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2168 pci_addr = pci_dev->addr;
2170 PMD_INIT_FUNC_TRACE(edev);
2172 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2173 pci_addr.bus, pci_addr.devid, pci_addr.function,
2174 eth_dev->data->port_id);
2176 eth_dev->rx_pkt_burst = qede_recv_pkts;
2177 eth_dev->tx_pkt_burst = qede_xmit_pkts;
2178 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2180 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2181 DP_NOTICE(edev, false,
2182 "Skipping device init from secondary process\n");
2186 rte_eth_copy_pci_info(eth_dev, pci_dev);
2189 edev->vendor_id = pci_dev->id.vendor_id;
2190 edev->device_id = pci_dev->id.device_id;
2192 qed_ops = qed_get_eth_ops();
2194 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2198 DP_INFO(edev, "Starting qede probe\n");
2200 rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
2201 dp_module, dp_level, is_vf);
2204 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2208 qede_update_pf_params(edev);
2210 rte_intr_callback_register(&pci_dev->intr_handle,
2211 qede_interrupt_handler, (void *)eth_dev);
2213 if (rte_intr_enable(&pci_dev->intr_handle)) {
2214 DP_ERR(edev, "rte_intr_enable() failed\n");
2218 /* Start the Slowpath-process */
2219 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
2220 params.int_mode = ECORE_INT_MODE_MSIX;
2221 params.drv_major = QEDE_PMD_VERSION_MAJOR;
2222 params.drv_minor = QEDE_PMD_VERSION_MINOR;
2223 params.drv_rev = QEDE_PMD_VERSION_REVISION;
2224 params.drv_eng = QEDE_PMD_VERSION_PATCH;
2225 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2226 QEDE_PMD_DRV_VER_STR_SIZE);
2228 /* For CMT mode device do periodic polling for slowpath events.
2229 * This is required since uio device uses only one MSI-x
2230 * interrupt vector but we need one for each engine.
2232 if (edev->num_hwfns > 1 && IS_PF(edev)) {
2233 rc = rte_eal_alarm_set(timer_period * US_PER_S,
2237 DP_ERR(edev, "Unable to start periodic"
2238 " timer rc %d\n", rc);
2243 rc = qed_ops->common->slowpath_start(edev, ¶ms);
2245 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2246 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2251 rc = qed_ops->fill_dev_info(edev, &dev_info);
2253 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2254 qed_ops->common->slowpath_stop(edev);
2255 qed_ops->common->remove(edev);
2256 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2261 qede_alloc_etherdev(adapter, &dev_info);
2263 adapter->ops->common->set_name(edev, edev->name);
2266 adapter->dev_info.num_mac_filters =
2267 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2270 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2271 (uint32_t *)&adapter->dev_info.num_mac_filters);
2273 /* Allocate memory for storing MAC addr */
2274 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2276 adapter->dev_info.num_mac_filters),
2277 RTE_CACHE_LINE_SIZE);
2279 if (eth_dev->data->mac_addrs == NULL) {
2280 DP_ERR(edev, "Failed to allocate MAC address\n");
2281 qed_ops->common->slowpath_stop(edev);
2282 qed_ops->common->remove(edev);
2283 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2289 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2290 hw_info.hw_mac_addr,
2291 ð_dev->data->mac_addrs[0]);
2292 ether_addr_copy(ð_dev->data->mac_addrs[0],
2293 &adapter->primary_mac);
2295 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2297 if (bulletin_change) {
2299 ecore_vf_bulletin_get_forced_mac(
2300 ECORE_LEADING_HWFN(edev),
2303 if (is_mac_exist && is_mac_forced) {
2304 DP_INFO(edev, "VF macaddr received from PF\n");
2305 ether_addr_copy((struct ether_addr *)&vf_mac,
2306 ð_dev->data->mac_addrs[0]);
2307 ether_addr_copy(ð_dev->data->mac_addrs[0],
2308 &adapter->primary_mac);
2310 DP_NOTICE(edev, false,
2311 "No VF macaddr assigned\n");
2316 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2319 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
2320 qede_print_adapter_info(adapter);
2325 adapter->state = QEDE_DEV_INIT;
2327 DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2328 adapter->primary_mac.addr_bytes[0],
2329 adapter->primary_mac.addr_bytes[1],
2330 adapter->primary_mac.addr_bytes[2],
2331 adapter->primary_mac.addr_bytes[3],
2332 adapter->primary_mac.addr_bytes[4],
2333 adapter->primary_mac.addr_bytes[5]);
2338 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2340 return qede_common_dev_init(eth_dev, 1);
2343 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2345 return qede_common_dev_init(eth_dev, 0);
2348 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2350 /* only uninitialize in the primary process */
2351 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2354 /* safe to close dev here */
2355 qede_dev_close(eth_dev);
2357 eth_dev->dev_ops = NULL;
2358 eth_dev->rx_pkt_burst = NULL;
2359 eth_dev->tx_pkt_burst = NULL;
2361 if (eth_dev->data->mac_addrs)
2362 rte_free(eth_dev->data->mac_addrs);
2364 eth_dev->data->mac_addrs = NULL;
2369 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2371 return qede_dev_common_uninit(eth_dev);
2374 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2376 return qede_dev_common_uninit(eth_dev);
2379 static const struct rte_pci_id pci_id_qedevf_map[] = {
2380 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2382 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2385 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2388 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2393 static const struct rte_pci_id pci_id_qede_map[] = {
2394 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2396 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2399 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2402 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2405 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2408 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2411 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2414 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2417 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2420 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2423 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2428 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2429 struct rte_pci_device *pci_dev)
2431 return rte_eth_dev_pci_generic_probe(pci_dev,
2432 sizeof(struct qede_dev), qedevf_eth_dev_init);
2435 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2437 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2440 static struct rte_pci_driver rte_qedevf_pmd = {
2441 .id_table = pci_id_qedevf_map,
2442 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2443 .probe = qedevf_eth_dev_pci_probe,
2444 .remove = qedevf_eth_dev_pci_remove,
2447 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2448 struct rte_pci_device *pci_dev)
2450 return rte_eth_dev_pci_generic_probe(pci_dev,
2451 sizeof(struct qede_dev), qede_eth_dev_init);
2454 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2456 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2459 static struct rte_pci_driver rte_qede_pmd = {
2460 .id_table = pci_id_qede_map,
2461 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2462 .probe = qede_eth_dev_pci_probe,
2463 .remove = qede_eth_dev_pci_remove,
2466 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2467 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2468 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2469 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2470 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2471 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");