2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
14 static const struct qed_eth_ops *qed_ops;
15 static const char *drivername = "qede pmd";
16 static int64_t timer_period = 1;
18 struct rte_qede_xstats_name_off {
19 char name[RTE_ETH_XSTATS_NAME_SIZE];
23 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
24 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
25 {"rx_multicast_bytes",
26 offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
27 {"rx_broadcast_bytes",
28 offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
29 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
30 {"rx_multicast_packets",
31 offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
32 {"rx_broadcast_packets",
33 offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
35 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
36 {"tx_multicast_bytes",
37 offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
38 {"tx_broadcast_bytes",
39 offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
40 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
41 {"tx_multicast_packets",
42 offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
43 {"tx_broadcast_packets",
44 offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
46 {"rx_64_byte_packets",
47 offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
48 {"rx_65_to_127_byte_packets",
49 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
50 {"rx_128_to_255_byte_packets",
51 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
52 {"rx_256_to_511_byte_packets",
53 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
54 {"rx_512_to_1023_byte_packets",
55 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
56 {"rx_1024_to_1518_byte_packets",
57 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
58 {"rx_1519_to_1522_byte_packets",
59 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
60 {"rx_1519_to_2047_byte_packets",
61 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
62 {"rx_2048_to_4095_byte_packets",
63 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
64 {"rx_4096_to_9216_byte_packets",
65 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
66 {"rx_9217_to_16383_byte_packets",
67 offsetof(struct ecore_eth_stats,
68 rx_9217_to_16383_byte_packets)},
69 {"tx_64_byte_packets",
70 offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
71 {"tx_65_to_127_byte_packets",
72 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
73 {"tx_128_to_255_byte_packets",
74 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
75 {"tx_256_to_511_byte_packets",
76 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
77 {"tx_512_to_1023_byte_packets",
78 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
79 {"tx_1024_to_1518_byte_packets",
80 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
81 {"trx_1519_to_1522_byte_packets",
82 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
83 {"tx_2048_to_4095_byte_packets",
84 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
85 {"tx_4096_to_9216_byte_packets",
86 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
87 {"tx_9217_to_16383_byte_packets",
88 offsetof(struct ecore_eth_stats,
89 tx_9217_to_16383_byte_packets)},
91 {"rx_mac_crtl_frames",
92 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
93 {"tx_mac_control_frames",
94 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
95 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
96 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
97 {"rx_priority_flow_control_frames",
98 offsetof(struct ecore_eth_stats, rx_pfc_frames)},
99 {"tx_priority_flow_control_frames",
100 offsetof(struct ecore_eth_stats, tx_pfc_frames)},
102 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
103 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
104 {"rx_carrier_errors",
105 offsetof(struct ecore_eth_stats, rx_carrier_errors)},
106 {"rx_oversize_packet_errors",
107 offsetof(struct ecore_eth_stats, rx_oversize_packets)},
108 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
109 {"rx_undersize_packet_errors",
110 offsetof(struct ecore_eth_stats, rx_undersize_packets)},
111 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
112 {"rx_host_buffer_not_available",
113 offsetof(struct ecore_eth_stats, no_buff_discards)},
114 /* Number of packets discarded because they are bigger than MTU */
115 {"rx_packet_too_big_discards",
116 offsetof(struct ecore_eth_stats, packet_too_big_discard)},
117 {"rx_ttl_zero_discards",
118 offsetof(struct ecore_eth_stats, ttl0_discard)},
119 {"rx_multi_function_tag_filter_discards",
120 offsetof(struct ecore_eth_stats, mftag_filter_discards)},
121 {"rx_mac_filter_discards",
122 offsetof(struct ecore_eth_stats, mac_filter_discards)},
123 {"rx_hw_buffer_truncates",
124 offsetof(struct ecore_eth_stats, brb_truncates)},
125 {"rx_hw_buffer_discards",
126 offsetof(struct ecore_eth_stats, brb_discards)},
127 {"tx_lpi_entry_count",
128 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
129 {"tx_total_collisions",
130 offsetof(struct ecore_eth_stats, tx_total_collisions)},
131 {"tx_error_drop_packets",
132 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
134 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
135 {"rx_mac_unicast_packets",
136 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
137 {"rx_mac_multicast_packets",
138 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
139 {"rx_mac_broadcast_packets",
140 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
142 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
143 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
144 {"tx_mac_unicast_packets",
145 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
146 {"tx_mac_multicast_packets",
147 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
148 {"tx_mac_broadcast_packets",
149 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
151 {"lro_coalesced_packets",
152 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
153 {"lro_coalesced_events",
154 offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
156 offsetof(struct ecore_eth_stats, tpa_aborts_num)},
157 {"lro_not_coalesced_packets",
158 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
159 {"lro_coalesced_bytes",
160 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
163 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
165 offsetof(struct qede_rx_queue, rx_segs)},
167 offsetof(struct qede_rx_queue, rx_hw_errors)},
168 {"rx_q_allocation_errors",
169 offsetof(struct qede_rx_queue, rx_alloc_errors)}
172 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
174 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
178 qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
180 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
181 struct qede_dev *qdev = eth_dev->data->dev_private;
182 struct ecore_dev *edev = &qdev->edev;
184 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
185 if (rte_intr_enable(ð_dev->pci_dev->intr_handle))
186 DP_ERR(edev, "rte_intr_enable failed\n");
190 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
192 rte_memcpy(&qdev->dev_info, info, sizeof(*info));
193 qdev->num_tc = qdev->dev_info.num_tc;
197 static void qede_print_adapter_info(struct qede_dev *qdev)
199 struct ecore_dev *edev = &qdev->edev;
200 struct qed_dev_info *info = &qdev->dev_info.common;
201 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
202 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
204 DP_INFO(edev, "*********************************\n");
205 DP_INFO(edev, " DPDK version:%s\n", rte_version());
206 DP_INFO(edev, " Chip details : %s%d\n",
207 ECORE_IS_BB(edev) ? "BB" : "AH",
208 CHIP_REV_IS_A0(edev) ? 0 : 1);
209 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
210 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
211 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
212 ver_str, QEDE_PMD_VERSION);
213 DP_INFO(edev, " Driver version : %s\n", drv_ver);
214 DP_INFO(edev, " Firmware version : %s\n", ver_str);
216 snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
218 (info->mfw_rev >> 24) & 0xff,
219 (info->mfw_rev >> 16) & 0xff,
220 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
221 DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
222 DP_INFO(edev, " Firmware file : %s\n", fw_file);
223 DP_INFO(edev, "*********************************\n");
226 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
228 memset(ucast, 0, sizeof(struct ecore_filter_ucast));
229 ucast->is_rx_filter = true;
230 ucast->is_tx_filter = true;
231 /* ucast->assert_on_error = true; - For debug */
235 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
238 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
239 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
240 struct qede_ucast_entry *tmp = NULL;
241 struct qede_ucast_entry *u;
242 struct ether_addr *mac_addr;
244 mac_addr = (struct ether_addr *)ucast->mac;
246 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
247 if ((memcmp(mac_addr, &tmp->mac,
248 ETHER_ADDR_LEN) == 0) &&
249 ucast->vlan == tmp->vlan) {
250 DP_ERR(edev, "Unicast MAC is already added"
251 " with vlan = %u, vni = %u\n",
252 ucast->vlan, ucast->vni);
256 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
257 RTE_CACHE_LINE_SIZE);
259 DP_ERR(edev, "Did not allocate memory for ucast\n");
262 ether_addr_copy(mac_addr, &u->mac);
263 u->vlan = ucast->vlan;
264 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
267 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
268 if ((memcmp(mac_addr, &tmp->mac,
269 ETHER_ADDR_LEN) == 0) &&
270 ucast->vlan == tmp->vlan)
274 DP_INFO(edev, "Unicast MAC is not found\n");
277 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
285 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
288 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
289 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
290 struct ether_addr *mac_addr;
291 struct qede_mcast_entry *tmp = NULL;
292 struct qede_mcast_entry *m;
294 mac_addr = (struct ether_addr *)mcast->mac;
296 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
297 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
299 "Multicast MAC is already added\n");
303 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
304 RTE_CACHE_LINE_SIZE);
307 "Did not allocate memory for mcast\n");
310 ether_addr_copy(mac_addr, &m->mac);
311 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
314 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
315 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
319 DP_INFO(edev, "Multicast mac is not found\n");
322 SLIST_REMOVE(&qdev->mc_list_head, tmp,
323 qede_mcast_entry, list);
330 static enum _ecore_status_t
331 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
334 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
335 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
336 enum _ecore_status_t rc;
337 struct ecore_filter_mcast mcast;
338 struct qede_mcast_entry *tmp;
342 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
344 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
346 "Mcast filter table limit exceeded, "
347 "Please enable mcast promisc mode\n");
351 rc = qede_mcast_filter(eth_dev, ucast, add);
353 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
354 memset(&mcast, 0, sizeof(mcast));
355 mcast.num_mc_addrs = qdev->num_mc_addr;
356 mcast.opcode = ECORE_FILTER_ADD;
357 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
358 ether_addr_copy(&tmp->mac,
359 (struct ether_addr *)&mcast.mac[j]);
362 rc = ecore_filter_mcast_cmd(edev, &mcast,
363 ECORE_SPQ_MODE_CB, NULL);
365 if (rc != ECORE_SUCCESS) {
366 DP_ERR(edev, "Failed to add multicast filter"
367 " rc = %d, op = %d\n", rc, add);
369 } else { /* Unicast */
371 if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) {
373 "Ucast filter table limit exceeded,"
374 " Please enable promisc mode\n");
378 rc = qede_ucast_filter(eth_dev, ucast, add);
380 rc = ecore_filter_ucast_cmd(edev, ucast,
381 ECORE_SPQ_MODE_CB, NULL);
382 if (rc != ECORE_SUCCESS) {
383 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
392 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
393 uint32_t index, __rte_unused uint32_t pool)
395 struct ecore_filter_ucast ucast;
397 qede_set_ucast_cmn_params(&ucast);
398 ucast.type = ECORE_FILTER_MAC;
399 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
400 (void)qede_mac_int_ops(eth_dev, &ucast, 1);
404 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
406 struct qede_dev *qdev = eth_dev->data->dev_private;
407 struct ecore_dev *edev = &qdev->edev;
408 struct ether_addr mac_addr;
409 struct ecore_filter_ucast ucast;
412 PMD_INIT_FUNC_TRACE(edev);
414 if (index >= qdev->dev_info.num_mac_addrs) {
415 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
416 index, qdev->dev_info.num_mac_addrs);
420 qede_set_ucast_cmn_params(&ucast);
421 ucast.opcode = ECORE_FILTER_REMOVE;
422 ucast.type = ECORE_FILTER_MAC;
424 /* Use the index maintained by rte */
425 ether_addr_copy(ð_dev->data->mac_addrs[index],
426 (struct ether_addr *)&ucast.mac);
428 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
432 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
434 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
435 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
436 struct ecore_filter_ucast ucast;
439 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
440 mac_addr->addr_bytes)) {
441 DP_ERR(edev, "Setting MAC address is not allowed\n");
442 ether_addr_copy(&qdev->primary_mac,
443 ð_dev->data->mac_addrs[0]);
447 /* First remove the primary mac */
448 qede_set_ucast_cmn_params(&ucast);
449 ucast.opcode = ECORE_FILTER_REMOVE;
450 ucast.type = ECORE_FILTER_MAC;
451 ether_addr_copy(&qdev->primary_mac,
452 (struct ether_addr *)&ucast.mac);
453 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
455 DP_ERR(edev, "Unable to remove current macaddr"
456 " Reverting to previous default mac\n");
457 ether_addr_copy(&qdev->primary_mac,
458 ð_dev->data->mac_addrs[0]);
463 ucast.opcode = ECORE_FILTER_ADD;
464 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
465 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
467 DP_ERR(edev, "Unable to add new default mac\n");
469 ether_addr_copy(mac_addr, &qdev->primary_mac);
472 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
474 struct ecore_dev *edev = &qdev->edev;
475 struct qed_update_vport_params params = {
477 .accept_any_vlan = action,
478 .update_accept_any_vlan_flg = 1,
482 /* Proceed only if action actually needs to be performed */
483 if (qdev->accept_any_vlan == action)
486 rc = qdev->ops->vport_update(edev, ¶ms);
488 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
489 action ? "enable" : "disable");
491 DP_INFO(edev, "%s accept-any-vlan\n",
492 action ? "enabled" : "disabled");
493 qdev->accept_any_vlan = action;
497 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
499 struct qed_update_vport_params vport_update_params;
500 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
501 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
504 memset(&vport_update_params, 0, sizeof(vport_update_params));
505 vport_update_params.vport_id = 0;
506 vport_update_params.update_inner_vlan_removal_flg = 1;
507 vport_update_params.inner_vlan_removal_flg = set_stripping;
508 rc = qdev->ops->vport_update(edev, &vport_update_params);
510 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
517 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
519 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
520 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
521 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
523 if (mask & ETH_VLAN_STRIP_MASK) {
524 if (rxmode->hw_vlan_strip)
525 (void)qede_vlan_stripping(eth_dev, 1);
527 (void)qede_vlan_stripping(eth_dev, 0);
530 if (mask & ETH_VLAN_FILTER_MASK) {
531 /* VLAN filtering kicks in when a VLAN is added */
532 if (rxmode->hw_vlan_filter) {
533 qede_vlan_filter_set(eth_dev, 0, 1);
535 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
536 DP_NOTICE(edev, false,
537 " Please remove existing VLAN filters"
538 " before disabling VLAN filtering\n");
539 /* Signal app that VLAN filtering is still
542 rxmode->hw_vlan_filter = true;
544 qede_vlan_filter_set(eth_dev, 0, 0);
549 if (mask & ETH_VLAN_EXTEND_MASK)
550 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
551 " and classification is based on outer tag only\n");
553 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
554 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
557 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
558 uint16_t vlan_id, int on)
560 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
561 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
562 struct qed_dev_eth_info *dev_info = &qdev->dev_info;
563 struct qede_vlan_entry *tmp = NULL;
564 struct qede_vlan_entry *vlan;
565 struct ecore_filter_ucast ucast;
569 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
570 DP_INFO(edev, "Reached max VLAN filter limit"
571 " enabling accept_any_vlan\n");
572 qede_config_accept_any_vlan(qdev, true);
576 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
577 if (tmp->vid == vlan_id) {
578 DP_ERR(edev, "VLAN %u already configured\n",
584 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
585 RTE_CACHE_LINE_SIZE);
588 DP_ERR(edev, "Did not allocate memory for VLAN\n");
592 qede_set_ucast_cmn_params(&ucast);
593 ucast.opcode = ECORE_FILTER_ADD;
594 ucast.type = ECORE_FILTER_VLAN;
595 ucast.vlan = vlan_id;
596 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
599 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
604 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
605 qdev->configured_vlans++;
606 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
607 vlan_id, qdev->configured_vlans);
610 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
611 if (tmp->vid == vlan_id)
616 if (qdev->configured_vlans == 0) {
618 "No VLAN filters configured yet\n");
622 DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
626 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
628 qede_set_ucast_cmn_params(&ucast);
629 ucast.opcode = ECORE_FILTER_REMOVE;
630 ucast.type = ECORE_FILTER_VLAN;
631 ucast.vlan = vlan_id;
632 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
635 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
638 qdev->configured_vlans--;
639 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
640 vlan_id, qdev->configured_vlans);
647 static int qede_init_vport(struct qede_dev *qdev)
649 struct ecore_dev *edev = &qdev->edev;
650 struct qed_start_vport_params start = {0};
653 start.remove_inner_vlan = 1;
654 start.gro_enable = 0;
655 start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
657 start.drop_ttl0 = false;
658 start.clear_stats = 1;
659 start.handle_ptp_pkts = 0;
661 rc = qdev->ops->vport_start(edev, &start);
663 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
668 "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
669 start.vport_id, ETHER_MTU);
674 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
676 struct qede_dev *qdev = eth_dev->data->dev_private;
677 struct ecore_dev *edev = &qdev->edev;
678 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
681 PMD_INIT_FUNC_TRACE(edev);
683 /* Check requirements for 100G mode */
684 if (edev->num_hwfns > 1) {
685 if (eth_dev->data->nb_rx_queues < 2 ||
686 eth_dev->data->nb_tx_queues < 2) {
687 DP_NOTICE(edev, false,
688 "100G mode needs min. 2 RX/TX queues\n");
692 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
693 (eth_dev->data->nb_tx_queues % 2 != 0)) {
694 DP_NOTICE(edev, false,
695 "100G mode needs even no. of RX/TX queues\n");
700 /* Sanity checks and throw warnings */
701 if (rxmode->enable_scatter == 1)
702 eth_dev->data->scattered_rx = 1;
704 if (rxmode->enable_lro == 1) {
705 DP_INFO(edev, "LRO is not supported\n");
709 if (!rxmode->hw_strip_crc)
710 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
712 if (!rxmode->hw_ip_checksum)
713 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
716 /* Check for the port restart case */
717 if (qdev->state != QEDE_DEV_INIT) {
718 rc = qdev->ops->vport_stop(edev, 0);
721 qede_dealloc_fp_resc(eth_dev);
724 qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
725 qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
726 qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
728 /* Fastpath status block should be initialized before sending
729 * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
731 rc = qede_alloc_fp_resc(qdev);
735 /* Issue VPORT-START with default config values to allow
736 * other port configurations early on.
738 rc = qede_init_vport(qdev);
742 SLIST_INIT(&qdev->vlan_list_head);
744 /* Add primary mac for PF */
746 qede_mac_addr_set(eth_dev, &qdev->primary_mac);
748 /* Enable VLAN offloads by default */
749 qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
750 ETH_VLAN_FILTER_MASK |
751 ETH_VLAN_EXTEND_MASK);
753 qdev->state = QEDE_DEV_CONFIG;
755 DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
756 (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
762 /* Info about HW descriptor ring limitations */
763 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
764 .nb_max = NUM_RX_BDS_MAX,
766 .nb_align = 128 /* lowest common multiple */
769 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
770 .nb_max = NUM_TX_BDS_MAX,
776 qede_dev_info_get(struct rte_eth_dev *eth_dev,
777 struct rte_eth_dev_info *dev_info)
779 struct qede_dev *qdev = eth_dev->data->dev_private;
780 struct ecore_dev *edev = &qdev->edev;
781 struct qed_link_output link;
782 uint32_t speed_cap = 0;
784 PMD_INIT_FUNC_TRACE(edev);
786 dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
788 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
789 dev_info->rx_desc_lim = qede_rx_desc_lim;
790 dev_info->tx_desc_lim = qede_tx_desc_lim;
791 dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
792 dev_info->max_tx_queues = dev_info->max_rx_queues;
793 dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
795 dev_info->max_vfs = 0;
797 dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
798 dev_info->driver_name = qdev->drv_ver;
799 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
800 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
802 dev_info->default_txconf = (struct rte_eth_txconf) {
803 .txq_flags = QEDE_TXQ_FLAGS,
806 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
807 DEV_RX_OFFLOAD_IPV4_CKSUM |
808 DEV_RX_OFFLOAD_UDP_CKSUM |
809 DEV_RX_OFFLOAD_TCP_CKSUM);
810 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
811 DEV_TX_OFFLOAD_IPV4_CKSUM |
812 DEV_TX_OFFLOAD_UDP_CKSUM |
813 DEV_TX_OFFLOAD_TCP_CKSUM);
815 memset(&link, 0, sizeof(struct qed_link_output));
816 qdev->ops->common->get_link(edev, &link);
817 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
818 speed_cap |= ETH_LINK_SPEED_1G;
819 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
820 speed_cap |= ETH_LINK_SPEED_10G;
821 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
822 speed_cap |= ETH_LINK_SPEED_25G;
823 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
824 speed_cap |= ETH_LINK_SPEED_40G;
825 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
826 speed_cap |= ETH_LINK_SPEED_50G;
827 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
828 speed_cap |= ETH_LINK_SPEED_100G;
829 dev_info->speed_capa = speed_cap;
832 /* return 0 means link status changed, -1 means not changed */
834 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
836 struct qede_dev *qdev = eth_dev->data->dev_private;
837 struct ecore_dev *edev = &qdev->edev;
838 uint16_t link_duplex;
839 struct qed_link_output link;
840 struct rte_eth_link *curr = ð_dev->data->dev_link;
842 memset(&link, 0, sizeof(struct qed_link_output));
843 qdev->ops->common->get_link(edev, &link);
846 curr->link_speed = link.speed;
849 switch (link.duplex) {
850 case QEDE_DUPLEX_HALF:
851 link_duplex = ETH_LINK_HALF_DUPLEX;
853 case QEDE_DUPLEX_FULL:
854 link_duplex = ETH_LINK_FULL_DUPLEX;
856 case QEDE_DUPLEX_UNKNOWN:
860 curr->link_duplex = link_duplex;
863 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
866 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
867 ETH_LINK_AUTONEG : ETH_LINK_FIXED;
869 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
870 curr->link_speed, curr->link_duplex,
871 curr->link_autoneg, curr->link_status);
873 /* return 0 means link status changed, -1 means not changed */
874 return ((curr->link_status == link.link_up) ? -1 : 0);
877 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
879 struct qede_dev *qdev = eth_dev->data->dev_private;
880 struct ecore_dev *edev = &qdev->edev;
882 PMD_INIT_FUNC_TRACE(edev);
884 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
886 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
887 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
889 qed_configure_filter_rx_mode(eth_dev, type);
892 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
894 struct qede_dev *qdev = eth_dev->data->dev_private;
895 struct ecore_dev *edev = &qdev->edev;
897 PMD_INIT_FUNC_TRACE(edev);
899 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
900 qed_configure_filter_rx_mode(eth_dev,
901 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
903 qed_configure_filter_rx_mode(eth_dev,
904 QED_FILTER_RX_MODE_TYPE_REGULAR);
907 static void qede_poll_sp_sb_cb(void *param)
909 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
910 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
911 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
914 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
915 qede_interrupt_action(&edev->hwfns[1]);
917 rc = rte_eal_alarm_set(timer_period * US_PER_S,
921 DP_ERR(edev, "Unable to start periodic"
922 " timer rc %d\n", rc);
923 assert(false && "Unable to start periodic timer");
927 static void qede_dev_close(struct rte_eth_dev *eth_dev)
929 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
930 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
933 PMD_INIT_FUNC_TRACE(edev);
935 /* dev_stop() shall cleanup fp resources in hw but without releasing
936 * dma memories and sw structures so that dev_start() can be called
937 * by the app without reconfiguration. However, in dev_close() we
938 * can release all the resources and device can be brought up newly
940 if (qdev->state != QEDE_DEV_STOP)
941 qede_dev_stop(eth_dev);
943 DP_INFO(edev, "Device is already stopped\n");
945 rc = qdev->ops->vport_stop(edev, 0);
947 DP_ERR(edev, "Failed to stop VPORT\n");
949 qede_dealloc_fp_resc(eth_dev);
951 qdev->ops->common->slowpath_stop(edev);
953 qdev->ops->common->remove(edev);
955 rte_intr_disable(ð_dev->pci_dev->intr_handle);
957 rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle,
958 qede_interrupt_handler, (void *)eth_dev);
960 if (edev->num_hwfns > 1)
961 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
963 qdev->state = QEDE_DEV_INIT; /* Go back to init state */
967 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
969 struct qede_dev *qdev = eth_dev->data->dev_private;
970 struct ecore_dev *edev = &qdev->edev;
971 struct ecore_eth_stats stats;
972 unsigned int i = 0, j = 0, qid;
973 unsigned int rxq_stat_cntrs, txq_stat_cntrs;
974 struct qede_tx_queue *txq;
976 qdev->ops->get_vport_stats(edev, &stats);
979 eth_stats->ipackets = stats.rx_ucast_pkts +
980 stats.rx_mcast_pkts + stats.rx_bcast_pkts;
982 eth_stats->ibytes = stats.rx_ucast_bytes +
983 stats.rx_mcast_bytes + stats.rx_bcast_bytes;
985 eth_stats->ierrors = stats.rx_crc_errors +
986 stats.rx_align_errors +
987 stats.rx_carrier_errors +
988 stats.rx_oversize_packets +
989 stats.rx_jabbers + stats.rx_undersize_packets;
991 eth_stats->rx_nombuf = stats.no_buff_discards;
993 eth_stats->imissed = stats.mftag_filter_discards +
994 stats.mac_filter_discards +
995 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
998 eth_stats->opackets = stats.tx_ucast_pkts +
999 stats.tx_mcast_pkts + stats.tx_bcast_pkts;
1001 eth_stats->obytes = stats.tx_ucast_bytes +
1002 stats.tx_mcast_bytes + stats.tx_bcast_bytes;
1004 eth_stats->oerrors = stats.tx_err_drop_pkts;
1007 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1008 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1009 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1010 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1011 if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) ||
1012 (txq_stat_cntrs != QEDE_TSS_COUNT(qdev)))
1013 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1014 "Not all the queue stats will be displayed. Set"
1015 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1016 " appropriately and retry.\n");
1018 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
1019 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
1020 eth_stats->q_ipackets[i] =
1022 ((char *)(qdev->fp_array[(qid)].rxq)) +
1023 offsetof(struct qede_rx_queue,
1025 eth_stats->q_errors[i] =
1027 ((char *)(qdev->fp_array[(qid)].rxq)) +
1028 offsetof(struct qede_rx_queue,
1031 ((char *)(qdev->fp_array[(qid)].rxq)) +
1032 offsetof(struct qede_rx_queue,
1036 if (i == rxq_stat_cntrs)
1040 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
1041 if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
1042 txq = qdev->fp_array[(qid)].txqs[0];
1043 eth_stats->q_opackets[j] =
1044 *((uint64_t *)(uintptr_t)
1045 (((uint64_t)(uintptr_t)(txq)) +
1046 offsetof(struct qede_tx_queue,
1050 if (j == txq_stat_cntrs)
1056 qede_get_xstats_count(struct qede_dev *qdev) {
1057 return RTE_DIM(qede_xstats_strings) +
1058 (RTE_DIM(qede_rxq_xstats_strings) *
1059 RTE_MIN(QEDE_RSS_COUNT(qdev),
1060 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1064 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
1065 struct rte_eth_xstat_name *xstats_names, unsigned limit)
1067 struct qede_dev *qdev = dev->data->dev_private;
1068 const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1069 unsigned int i, qid, stat_idx = 0;
1070 unsigned int rxq_stat_cntrs;
1072 if (xstats_names != NULL) {
1073 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1074 snprintf(xstats_names[stat_idx].name,
1075 sizeof(xstats_names[stat_idx].name),
1077 qede_xstats_strings[i].name);
1081 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1082 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1083 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1084 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1085 snprintf(xstats_names[stat_idx].name,
1086 sizeof(xstats_names[stat_idx].name),
1088 qede_rxq_xstats_strings[i].name, qid,
1089 qede_rxq_xstats_strings[i].name + 4);
1099 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1102 struct qede_dev *qdev = dev->data->dev_private;
1103 struct ecore_dev *edev = &qdev->edev;
1104 struct ecore_eth_stats stats;
1105 const unsigned int num = qede_get_xstats_count(qdev);
1106 unsigned int i, qid, stat_idx = 0;
1107 unsigned int rxq_stat_cntrs;
1112 qdev->ops->get_vport_stats(edev, &stats);
1114 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1115 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1116 qede_xstats_strings[i].offset);
1117 xstats[stat_idx].id = stat_idx;
1121 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1122 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1123 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1124 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
1125 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1126 xstats[stat_idx].value = *(uint64_t *)(
1127 ((char *)(qdev->fp_array[(qid)].rxq)) +
1128 qede_rxq_xstats_strings[i].offset);
1129 xstats[stat_idx].id = stat_idx;
1139 qede_reset_xstats(struct rte_eth_dev *dev)
1141 struct qede_dev *qdev = dev->data->dev_private;
1142 struct ecore_dev *edev = &qdev->edev;
1144 ecore_reset_vport_stats(edev);
1147 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1149 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1150 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1151 struct qed_link_params link_params;
1154 DP_INFO(edev, "setting link state %d\n", link_up);
1155 memset(&link_params, 0, sizeof(link_params));
1156 link_params.link_up = link_up;
1157 rc = qdev->ops->common->set_link(edev, &link_params);
1158 if (rc != ECORE_SUCCESS)
1159 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1164 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1166 return qede_dev_set_link_state(eth_dev, true);
1169 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1171 return qede_dev_set_link_state(eth_dev, false);
1174 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1176 struct qede_dev *qdev = eth_dev->data->dev_private;
1177 struct ecore_dev *edev = &qdev->edev;
1179 ecore_reset_vport_stats(edev);
1182 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1184 enum qed_filter_rx_mode_type type =
1185 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1187 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1188 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1190 qed_configure_filter_rx_mode(eth_dev, type);
1193 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1195 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1196 qed_configure_filter_rx_mode(eth_dev,
1197 QED_FILTER_RX_MODE_TYPE_PROMISC);
1199 qed_configure_filter_rx_mode(eth_dev,
1200 QED_FILTER_RX_MODE_TYPE_REGULAR);
1203 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1204 struct rte_eth_fc_conf *fc_conf)
1206 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1207 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1208 struct qed_link_output current_link;
1209 struct qed_link_params params;
1211 memset(¤t_link, 0, sizeof(current_link));
1212 qdev->ops->common->get_link(edev, ¤t_link);
1214 memset(¶ms, 0, sizeof(params));
1215 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1216 if (fc_conf->autoneg) {
1217 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1218 DP_ERR(edev, "Autoneg not supported\n");
1221 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1224 /* Pause is assumed to be supported (SUPPORTED_Pause) */
1225 if (fc_conf->mode == RTE_FC_FULL)
1226 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1227 QED_LINK_PAUSE_RX_ENABLE);
1228 if (fc_conf->mode == RTE_FC_TX_PAUSE)
1229 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1230 if (fc_conf->mode == RTE_FC_RX_PAUSE)
1231 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1233 params.link_up = true;
1234 (void)qdev->ops->common->set_link(edev, ¶ms);
1239 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1240 struct rte_eth_fc_conf *fc_conf)
1242 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1243 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1244 struct qed_link_output current_link;
1246 memset(¤t_link, 0, sizeof(current_link));
1247 qdev->ops->common->get_link(edev, ¤t_link);
1249 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1250 fc_conf->autoneg = true;
1252 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1253 QED_LINK_PAUSE_TX_ENABLE))
1254 fc_conf->mode = RTE_FC_FULL;
1255 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1256 fc_conf->mode = RTE_FC_RX_PAUSE;
1257 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1258 fc_conf->mode = RTE_FC_TX_PAUSE;
1260 fc_conf->mode = RTE_FC_NONE;
1265 static const uint32_t *
1266 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1268 static const uint32_t ptypes[] = {
1274 if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1280 void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1283 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
1284 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
1285 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
1286 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
1287 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
1288 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
1291 static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1292 struct rte_eth_rss_conf *rss_conf)
1294 struct qed_update_vport_params vport_update_params;
1295 struct qede_dev *qdev = eth_dev->data->dev_private;
1296 struct ecore_dev *edev = &qdev->edev;
1297 uint32_t *key = (uint32_t *)rss_conf->rss_key;
1298 uint64_t hf = rss_conf->rss_hf;
1301 memset(&vport_update_params, 0, sizeof(vport_update_params));
1305 qede_init_rss_caps(&qdev->rss_params.rss_caps, hf);
1306 memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1307 sizeof(vport_update_params.rss_params));
1309 memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
1310 rss_conf->rss_key_len);
1311 vport_update_params.update_rss_flg = 1;
1312 qdev->rss_enabled = 1;
1315 qdev->rss_enabled = 0;
1318 /* If the mapping doesn't fit any supported, return */
1319 if (qdev->rss_params.rss_caps == 0 && hf != 0)
1322 DP_INFO(edev, "%s\n", (vport_update_params.update_rss_flg) ?
1323 "Enabling RSS" : "Disabling RSS");
1325 vport_update_params.vport_id = 0;
1327 return qdev->ops->vport_update(edev, &vport_update_params);
1330 int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1331 struct rte_eth_rss_conf *rss_conf)
1333 struct qede_dev *qdev = eth_dev->data->dev_private;
1336 if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
1339 if (rss_conf->rss_key)
1340 memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
1341 sizeof(qdev->rss_params.rss_key));
1344 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ?
1346 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
1348 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
1349 ETH_RSS_IPV6_EX : 0;
1350 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
1351 ETH_RSS_NONFRAG_IPV4_TCP : 0;
1352 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1353 ETH_RSS_NONFRAG_IPV6_TCP : 0;
1354 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1355 ETH_RSS_IPV6_TCP_EX : 0;
1357 rss_conf->rss_hf = hf;
1362 static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
1363 struct rte_eth_rss_reta_entry64 *reta_conf,
1366 struct qed_update_vport_params vport_update_params;
1367 struct qede_dev *qdev = eth_dev->data->dev_private;
1368 struct ecore_dev *edev = &qdev->edev;
1369 uint16_t i, idx, shift;
1371 if (reta_size > ETH_RSS_RETA_SIZE_128) {
1372 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
1377 memset(&vport_update_params, 0, sizeof(vport_update_params));
1378 memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1379 sizeof(vport_update_params.rss_params));
1381 for (i = 0; i < reta_size; i++) {
1382 idx = i / RTE_RETA_GROUP_SIZE;
1383 shift = i % RTE_RETA_GROUP_SIZE;
1384 if (reta_conf[idx].mask & (1ULL << shift)) {
1385 uint8_t entry = reta_conf[idx].reta[shift];
1386 qdev->rss_params.rss_ind_table[i] = entry;
1390 vport_update_params.update_rss_flg = 1;
1391 vport_update_params.vport_id = 0;
1393 return qdev->ops->vport_update(edev, &vport_update_params);
1396 int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
1397 struct rte_eth_rss_reta_entry64 *reta_conf,
1400 struct qede_dev *qdev = eth_dev->data->dev_private;
1401 uint16_t i, idx, shift;
1403 if (reta_size > ETH_RSS_RETA_SIZE_128) {
1404 struct ecore_dev *edev = &qdev->edev;
1405 DP_ERR(edev, "reta_size %d is not supported\n",
1409 for (i = 0; i < reta_size; i++) {
1410 idx = i / RTE_RETA_GROUP_SIZE;
1411 shift = i % RTE_RETA_GROUP_SIZE;
1412 if (reta_conf[idx].mask & (1ULL << shift)) {
1413 uint8_t entry = qdev->rss_params.rss_ind_table[i];
1414 reta_conf[idx].reta[shift] = entry;
1421 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1423 uint32_t frame_size;
1424 struct qede_dev *qdev = dev->data->dev_private;
1425 struct rte_eth_dev_info dev_info = {0};
1427 qede_dev_info_get(dev, &dev_info);
1430 frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
1432 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1435 if (!dev->data->scattered_rx &&
1436 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1439 if (frame_size > ETHER_MAX_LEN)
1440 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1442 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1444 /* update max frame size */
1445 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1448 qede_dev_start(dev);
1453 static const struct eth_dev_ops qede_eth_dev_ops = {
1454 .dev_configure = qede_dev_configure,
1455 .dev_infos_get = qede_dev_info_get,
1456 .rx_queue_setup = qede_rx_queue_setup,
1457 .rx_queue_release = qede_rx_queue_release,
1458 .tx_queue_setup = qede_tx_queue_setup,
1459 .tx_queue_release = qede_tx_queue_release,
1460 .dev_start = qede_dev_start,
1461 .dev_set_link_up = qede_dev_set_link_up,
1462 .dev_set_link_down = qede_dev_set_link_down,
1463 .link_update = qede_link_update,
1464 .promiscuous_enable = qede_promiscuous_enable,
1465 .promiscuous_disable = qede_promiscuous_disable,
1466 .allmulticast_enable = qede_allmulticast_enable,
1467 .allmulticast_disable = qede_allmulticast_disable,
1468 .dev_stop = qede_dev_stop,
1469 .dev_close = qede_dev_close,
1470 .stats_get = qede_get_stats,
1471 .stats_reset = qede_reset_stats,
1472 .xstats_get = qede_get_xstats,
1473 .xstats_reset = qede_reset_xstats,
1474 .xstats_get_names = qede_get_xstats_names,
1475 .mac_addr_add = qede_mac_addr_add,
1476 .mac_addr_remove = qede_mac_addr_remove,
1477 .mac_addr_set = qede_mac_addr_set,
1478 .vlan_offload_set = qede_vlan_offload_set,
1479 .vlan_filter_set = qede_vlan_filter_set,
1480 .flow_ctrl_set = qede_flow_ctrl_set,
1481 .flow_ctrl_get = qede_flow_ctrl_get,
1482 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1483 .rss_hash_update = qede_rss_hash_update,
1484 .rss_hash_conf_get = qede_rss_hash_conf_get,
1485 .reta_update = qede_rss_reta_update,
1486 .reta_query = qede_rss_reta_query,
1487 .mtu_set = qede_set_mtu,
1490 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
1491 .dev_configure = qede_dev_configure,
1492 .dev_infos_get = qede_dev_info_get,
1493 .rx_queue_setup = qede_rx_queue_setup,
1494 .rx_queue_release = qede_rx_queue_release,
1495 .tx_queue_setup = qede_tx_queue_setup,
1496 .tx_queue_release = qede_tx_queue_release,
1497 .dev_start = qede_dev_start,
1498 .dev_set_link_up = qede_dev_set_link_up,
1499 .dev_set_link_down = qede_dev_set_link_down,
1500 .link_update = qede_link_update,
1501 .promiscuous_enable = qede_promiscuous_enable,
1502 .promiscuous_disable = qede_promiscuous_disable,
1503 .allmulticast_enable = qede_allmulticast_enable,
1504 .allmulticast_disable = qede_allmulticast_disable,
1505 .dev_stop = qede_dev_stop,
1506 .dev_close = qede_dev_close,
1507 .stats_get = qede_get_stats,
1508 .stats_reset = qede_reset_stats,
1509 .xstats_get = qede_get_xstats,
1510 .xstats_reset = qede_reset_xstats,
1511 .xstats_get_names = qede_get_xstats_names,
1512 .vlan_offload_set = qede_vlan_offload_set,
1513 .vlan_filter_set = qede_vlan_filter_set,
1514 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1515 .rss_hash_update = qede_rss_hash_update,
1516 .rss_hash_conf_get = qede_rss_hash_conf_get,
1517 .reta_update = qede_rss_reta_update,
1518 .reta_query = qede_rss_reta_query,
1519 .mtu_set = qede_set_mtu,
1522 static void qede_update_pf_params(struct ecore_dev *edev)
1524 struct ecore_pf_params pf_params;
1526 memset(&pf_params, 0, sizeof(struct ecore_pf_params));
1527 pf_params.eth_pf_params.num_cons = 64;
1528 qed_ops->common->update_pf_params(edev, &pf_params);
1531 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
1533 struct rte_pci_device *pci_dev;
1534 struct rte_pci_addr pci_addr;
1535 struct qede_dev *adapter;
1536 struct ecore_dev *edev;
1537 struct qed_dev_eth_info dev_info;
1538 struct qed_slowpath_params params;
1539 static bool do_once = true;
1540 uint8_t bulletin_change;
1541 uint8_t vf_mac[ETHER_ADDR_LEN];
1542 uint8_t is_mac_forced;
1544 /* Fix up ecore debug level */
1545 uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
1546 uint8_t dp_level = ECORE_LEVEL_VERBOSE;
1547 uint32_t max_mac_addrs;
1550 /* Extract key data structures */
1551 adapter = eth_dev->data->dev_private;
1552 edev = &adapter->edev;
1553 pci_addr = eth_dev->pci_dev->addr;
1555 PMD_INIT_FUNC_TRACE(edev);
1557 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
1558 pci_addr.bus, pci_addr.devid, pci_addr.function,
1559 eth_dev->data->port_id);
1561 eth_dev->rx_pkt_burst = qede_recv_pkts;
1562 eth_dev->tx_pkt_burst = qede_xmit_pkts;
1564 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1565 DP_NOTICE(edev, false,
1566 "Skipping device init from secondary process\n");
1570 pci_dev = eth_dev->pci_dev;
1572 rte_eth_copy_pci_info(eth_dev, pci_dev);
1575 edev->vendor_id = pci_dev->id.vendor_id;
1576 edev->device_id = pci_dev->id.device_id;
1578 qed_ops = qed_get_eth_ops();
1580 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
1584 DP_INFO(edev, "Starting qede probe\n");
1586 rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
1587 dp_module, dp_level, is_vf);
1590 DP_ERR(edev, "qede probe failed rc %d\n", rc);
1594 qede_update_pf_params(edev);
1596 rte_intr_callback_register(ð_dev->pci_dev->intr_handle,
1597 qede_interrupt_handler, (void *)eth_dev);
1599 if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) {
1600 DP_ERR(edev, "rte_intr_enable() failed\n");
1604 /* Start the Slowpath-process */
1605 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
1606 params.int_mode = ECORE_INT_MODE_MSIX;
1607 params.drv_major = QEDE_PMD_VERSION_MAJOR;
1608 params.drv_minor = QEDE_PMD_VERSION_MINOR;
1609 params.drv_rev = QEDE_PMD_VERSION_REVISION;
1610 params.drv_eng = QEDE_PMD_VERSION_PATCH;
1611 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
1612 QEDE_PMD_DRV_VER_STR_SIZE);
1614 /* For CMT mode device do periodic polling for slowpath events.
1615 * This is required since uio device uses only one MSI-x
1616 * interrupt vector but we need one for each engine.
1618 if (edev->num_hwfns > 1 && IS_PF(edev)) {
1619 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1623 DP_ERR(edev, "Unable to start periodic"
1624 " timer rc %d\n", rc);
1629 rc = qed_ops->common->slowpath_start(edev, ¶ms);
1631 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
1632 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1637 rc = qed_ops->fill_dev_info(edev, &dev_info);
1639 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
1640 qed_ops->common->slowpath_stop(edev);
1641 qed_ops->common->remove(edev);
1642 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1647 qede_alloc_etherdev(adapter, &dev_info);
1649 adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION);
1652 adapter->dev_info.num_mac_addrs =
1653 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
1656 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
1657 &adapter->dev_info.num_mac_addrs);
1659 /* Allocate memory for storing MAC addr */
1660 eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
1662 adapter->dev_info.num_mac_addrs),
1663 RTE_CACHE_LINE_SIZE);
1665 if (eth_dev->data->mac_addrs == NULL) {
1666 DP_ERR(edev, "Failed to allocate MAC address\n");
1667 qed_ops->common->slowpath_stop(edev);
1668 qed_ops->common->remove(edev);
1669 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1675 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
1676 hw_info.hw_mac_addr,
1677 ð_dev->data->mac_addrs[0]);
1678 ether_addr_copy(ð_dev->data->mac_addrs[0],
1679 &adapter->primary_mac);
1681 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
1683 if (bulletin_change) {
1685 ecore_vf_bulletin_get_forced_mac(
1686 ECORE_LEADING_HWFN(edev),
1689 if (is_mac_exist && is_mac_forced) {
1690 DP_INFO(edev, "VF macaddr received from PF\n");
1691 ether_addr_copy((struct ether_addr *)&vf_mac,
1692 ð_dev->data->mac_addrs[0]);
1693 ether_addr_copy(ð_dev->data->mac_addrs[0],
1694 &adapter->primary_mac);
1696 DP_NOTICE(edev, false,
1697 "No VF macaddr assigned\n");
1702 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
1705 qede_print_adapter_info(adapter);
1709 adapter->state = QEDE_DEV_INIT;
1711 DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
1712 adapter->primary_mac.addr_bytes[0],
1713 adapter->primary_mac.addr_bytes[1],
1714 adapter->primary_mac.addr_bytes[2],
1715 adapter->primary_mac.addr_bytes[3],
1716 adapter->primary_mac.addr_bytes[4],
1717 adapter->primary_mac.addr_bytes[5]);
1722 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
1724 return qede_common_dev_init(eth_dev, 1);
1727 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
1729 return qede_common_dev_init(eth_dev, 0);
1732 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
1734 /* only uninitialize in the primary process */
1735 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1738 /* safe to close dev here */
1739 qede_dev_close(eth_dev);
1741 eth_dev->dev_ops = NULL;
1742 eth_dev->rx_pkt_burst = NULL;
1743 eth_dev->tx_pkt_burst = NULL;
1745 if (eth_dev->data->mac_addrs)
1746 rte_free(eth_dev->data->mac_addrs);
1748 eth_dev->data->mac_addrs = NULL;
1753 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1755 return qede_dev_common_uninit(eth_dev);
1758 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1760 return qede_dev_common_uninit(eth_dev);
1763 static struct rte_pci_id pci_id_qedevf_map[] = {
1764 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1766 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
1769 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
1774 static struct rte_pci_id pci_id_qede_map[] = {
1775 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1777 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
1780 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
1783 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
1786 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
1789 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
1794 static struct eth_driver rte_qedevf_pmd = {
1796 .id_table = pci_id_qedevf_map,
1798 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1799 .probe = rte_eth_dev_pci_probe,
1800 .remove = rte_eth_dev_pci_remove,
1802 .eth_dev_init = qedevf_eth_dev_init,
1803 .eth_dev_uninit = qedevf_eth_dev_uninit,
1804 .dev_private_size = sizeof(struct qede_dev),
1807 static struct eth_driver rte_qede_pmd = {
1809 .id_table = pci_id_qede_map,
1811 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1812 .probe = rte_eth_dev_pci_probe,
1813 .remove = rte_eth_dev_pci_remove,
1815 .eth_dev_init = qede_eth_dev_init,
1816 .eth_dev_uninit = qede_eth_dev_uninit,
1817 .dev_private_size = sizeof(struct qede_dev),
1820 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
1821 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
1822 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
1823 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);