4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_ethdev.h>
34 #include <rte_ethdev_pci.h>
36 #include <rte_bus_pci.h>
37 #include <rte_errno.h>
38 #include <rte_string_fns.h>
43 #include "sfc_debug.h"
45 #include "sfc_kvargs.h"
51 #include "sfc_dp_rx.h"
53 static struct sfc_dp_list sfc_dp_head =
54 TAILQ_HEAD_INITIALIZER(sfc_dp_head);
57 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
59 struct sfc_adapter *sa = dev->data->dev_private;
60 efx_nic_fw_info_t enfi;
65 * Return value of the callback is likely supposed to be
66 * equal to or greater than 0, nevertheless, if an error
67 * occurs, it will be desirable to pass it to the caller
69 if ((fw_version == NULL) || (fw_size == 0))
72 rc = efx_nic_get_fw_version(sa->nic, &enfi);
76 ret = snprintf(fw_version, fw_size,
77 "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
78 enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
79 enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
83 if (enfi.enfi_dpcpu_fw_ids_valid) {
84 size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
87 ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
88 fw_size - dpcpu_fw_ids_offset,
89 " rx%" PRIx16 " tx%" PRIx16,
90 enfi.enfi_rx_dpcpu_fw_id,
91 enfi.enfi_tx_dpcpu_fw_id);
98 if (fw_size < (size_t)(++ret))
105 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
107 struct sfc_adapter *sa = dev->data->dev_private;
108 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
110 sfc_log_init(sa, "entry");
112 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
113 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
115 /* Autonegotiation may be disabled */
116 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
117 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
118 dev_info->speed_capa |= ETH_LINK_SPEED_1G;
119 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
120 dev_info->speed_capa |= ETH_LINK_SPEED_10G;
121 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
122 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
124 dev_info->max_rx_queues = sa->rxq_max;
125 dev_info->max_tx_queues = sa->txq_max;
127 /* By default packets are dropped if no descriptors are available */
128 dev_info->default_rxconf.rx_drop_en = 1;
130 dev_info->rx_offload_capa =
131 DEV_RX_OFFLOAD_IPV4_CKSUM |
132 DEV_RX_OFFLOAD_UDP_CKSUM |
133 DEV_RX_OFFLOAD_TCP_CKSUM;
135 dev_info->tx_offload_capa =
136 DEV_TX_OFFLOAD_IPV4_CKSUM |
137 DEV_TX_OFFLOAD_UDP_CKSUM |
138 DEV_TX_OFFLOAD_TCP_CKSUM;
140 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
141 if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) ||
142 !encp->enc_hw_tx_insert_vlan_enabled)
143 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
145 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
147 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
148 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
150 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)
151 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP;
153 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)
154 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
156 #if EFSYS_OPT_RX_SCALE
157 if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) {
158 dev_info->reta_size = EFX_RSS_TBL_SIZE;
159 dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
160 dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS;
165 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
167 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
168 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
169 /* The RXQ hardware requires that the descriptor count is a power
170 * of 2, but rx_desc_lim cannot properly describe that constraint.
172 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
174 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
175 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
177 * The TXQ hardware requires that the descriptor count is a power
178 * of 2, but tx_desc_lim cannot properly describe that constraint
180 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
183 static const uint32_t *
184 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
186 struct sfc_adapter *sa = dev->data->dev_private;
188 return sa->dp_rx->supported_ptypes_get();
192 sfc_dev_configure(struct rte_eth_dev *dev)
194 struct rte_eth_dev_data *dev_data = dev->data;
195 struct sfc_adapter *sa = dev_data->dev_private;
198 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
199 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
201 sfc_adapter_lock(sa);
203 case SFC_ADAPTER_CONFIGURED:
205 case SFC_ADAPTER_INITIALIZED:
206 rc = sfc_configure(sa);
209 sfc_err(sa, "unexpected adapter state %u to configure",
214 sfc_adapter_unlock(sa);
216 sfc_log_init(sa, "done %d", rc);
222 sfc_dev_start(struct rte_eth_dev *dev)
224 struct sfc_adapter *sa = dev->data->dev_private;
227 sfc_log_init(sa, "entry");
229 sfc_adapter_lock(sa);
231 sfc_adapter_unlock(sa);
233 sfc_log_init(sa, "done %d", rc);
239 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
241 struct sfc_adapter *sa = dev->data->dev_private;
242 struct rte_eth_link *dev_link = &dev->data->dev_link;
243 struct rte_eth_link old_link;
244 struct rte_eth_link current_link;
246 sfc_log_init(sa, "entry");
249 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
250 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
252 if (sa->state != SFC_ADAPTER_STARTED) {
253 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link);
254 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
255 *(uint64_t *)&old_link,
256 *(uint64_t *)¤t_link))
258 } else if (wait_to_complete) {
259 efx_link_mode_t link_mode;
261 if (efx_port_poll(sa->nic, &link_mode) != 0)
262 link_mode = EFX_LINK_UNKNOWN;
263 sfc_port_link_mode_to_info(link_mode, ¤t_link);
265 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
266 *(uint64_t *)&old_link,
267 *(uint64_t *)¤t_link))
270 sfc_ev_mgmt_qpoll(sa);
271 *(int64_t *)¤t_link =
272 rte_atomic64_read((rte_atomic64_t *)dev_link);
275 if (old_link.link_status != current_link.link_status)
276 sfc_info(sa, "Link status is %s",
277 current_link.link_status ? "UP" : "DOWN");
279 return old_link.link_status == current_link.link_status ? 0 : -1;
283 sfc_dev_stop(struct rte_eth_dev *dev)
285 struct sfc_adapter *sa = dev->data->dev_private;
287 sfc_log_init(sa, "entry");
289 sfc_adapter_lock(sa);
291 sfc_adapter_unlock(sa);
293 sfc_log_init(sa, "done");
297 sfc_dev_set_link_up(struct rte_eth_dev *dev)
299 struct sfc_adapter *sa = dev->data->dev_private;
302 sfc_log_init(sa, "entry");
304 sfc_adapter_lock(sa);
306 sfc_adapter_unlock(sa);
313 sfc_dev_set_link_down(struct rte_eth_dev *dev)
315 struct sfc_adapter *sa = dev->data->dev_private;
317 sfc_log_init(sa, "entry");
319 sfc_adapter_lock(sa);
321 sfc_adapter_unlock(sa);
327 sfc_dev_close(struct rte_eth_dev *dev)
329 struct sfc_adapter *sa = dev->data->dev_private;
331 sfc_log_init(sa, "entry");
333 sfc_adapter_lock(sa);
335 case SFC_ADAPTER_STARTED:
337 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
339 case SFC_ADAPTER_CONFIGURED:
341 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
343 case SFC_ADAPTER_INITIALIZED:
346 sfc_err(sa, "unexpected adapter state %u on close", sa->state);
349 sfc_adapter_unlock(sa);
351 sfc_log_init(sa, "done");
355 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
358 struct sfc_port *port;
360 struct sfc_adapter *sa = dev->data->dev_private;
361 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
362 const char *desc = (allmulti) ? "all-multi" : "promiscuous";
364 sfc_adapter_lock(sa);
367 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
369 if (*toggle != enabled) {
372 if (port->isolated) {
373 sfc_warn(sa, "isolated mode is active on the port");
374 sfc_warn(sa, "the change is to be applied on the next "
375 "start provided that isolated mode is "
376 "disabled prior the next start");
377 } else if ((sa->state == SFC_ADAPTER_STARTED) &&
378 (sfc_set_rx_mode(sa) != 0)) {
379 *toggle = !(enabled);
380 sfc_warn(sa, "Failed to %s %s mode",
381 ((enabled) ? "enable" : "disable"), desc);
385 sfc_adapter_unlock(sa);
389 sfc_dev_promisc_enable(struct rte_eth_dev *dev)
391 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
395 sfc_dev_promisc_disable(struct rte_eth_dev *dev)
397 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
401 sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
403 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
407 sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
409 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
413 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
414 uint16_t nb_rx_desc, unsigned int socket_id,
415 const struct rte_eth_rxconf *rx_conf,
416 struct rte_mempool *mb_pool)
418 struct sfc_adapter *sa = dev->data->dev_private;
421 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
422 rx_queue_id, nb_rx_desc, socket_id);
424 sfc_adapter_lock(sa);
426 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
431 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp;
433 sfc_adapter_unlock(sa);
438 sfc_adapter_unlock(sa);
444 sfc_rx_queue_release(void *queue)
446 struct sfc_dp_rxq *dp_rxq = queue;
448 struct sfc_adapter *sa;
449 unsigned int sw_index;
454 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
456 sfc_adapter_lock(sa);
458 sw_index = sfc_rxq_sw_index(rxq);
460 sfc_log_init(sa, "RxQ=%u", sw_index);
462 sfc_rx_qfini(sa, sw_index);
464 sfc_adapter_unlock(sa);
468 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
469 uint16_t nb_tx_desc, unsigned int socket_id,
470 const struct rte_eth_txconf *tx_conf)
472 struct sfc_adapter *sa = dev->data->dev_private;
475 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
476 tx_queue_id, nb_tx_desc, socket_id);
478 sfc_adapter_lock(sa);
480 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
484 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp;
486 sfc_adapter_unlock(sa);
490 sfc_adapter_unlock(sa);
496 sfc_tx_queue_release(void *queue)
498 struct sfc_dp_txq *dp_txq = queue;
500 unsigned int sw_index;
501 struct sfc_adapter *sa;
506 txq = sfc_txq_by_dp_txq(dp_txq);
507 sw_index = sfc_txq_sw_index(txq);
509 SFC_ASSERT(txq->evq != NULL);
512 sfc_log_init(sa, "TxQ = %u", sw_index);
514 sfc_adapter_lock(sa);
516 sfc_tx_qfini(sa, sw_index);
518 sfc_adapter_unlock(sa);
522 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
524 struct sfc_adapter *sa = dev->data->dev_private;
525 struct sfc_port *port = &sa->port;
529 rte_spinlock_lock(&port->mac_stats_lock);
531 ret = sfc_port_update_mac_stats(sa);
535 mac_stats = port->mac_stats_buf;
537 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
538 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
540 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
541 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
542 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
544 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
545 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
546 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
548 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
549 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
550 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
552 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
553 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
554 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
555 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
556 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
557 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
559 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
560 stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
561 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
562 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
564 * Take into account stats which are whenever supported
565 * on EF10. If some stat is not supported by current
566 * firmware variant or HW revision, it is guaranteed
567 * to be zero in mac_stats.
570 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
571 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
572 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
573 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
574 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
575 mac_stats[EFX_MAC_PM_TRUNC_QBB] +
576 mac_stats[EFX_MAC_PM_DISCARD_QBB] +
577 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
578 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
579 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
581 mac_stats[EFX_MAC_RX_FCS_ERRORS] +
582 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
583 mac_stats[EFX_MAC_RX_JABBER_PKTS];
584 /* no oerrors counters supported on EF10 */
588 rte_spinlock_unlock(&port->mac_stats_lock);
589 SFC_ASSERT(ret >= 0);
594 sfc_stats_reset(struct rte_eth_dev *dev)
596 struct sfc_adapter *sa = dev->data->dev_private;
597 struct sfc_port *port = &sa->port;
600 if (sa->state != SFC_ADAPTER_STARTED) {
602 * The operation cannot be done if port is not started; it
603 * will be scheduled to be done during the next port start
605 port->mac_stats_reset_pending = B_TRUE;
609 rc = sfc_port_reset_mac_stats(sa);
611 sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
615 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
616 unsigned int xstats_count)
618 struct sfc_adapter *sa = dev->data->dev_private;
619 struct sfc_port *port = &sa->port;
625 rte_spinlock_lock(&port->mac_stats_lock);
627 rc = sfc_port_update_mac_stats(sa);
634 mac_stats = port->mac_stats_buf;
636 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
637 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
638 if (xstats != NULL && nstats < (int)xstats_count) {
639 xstats[nstats].id = nstats;
640 xstats[nstats].value = mac_stats[i];
647 rte_spinlock_unlock(&port->mac_stats_lock);
653 sfc_xstats_get_names(struct rte_eth_dev *dev,
654 struct rte_eth_xstat_name *xstats_names,
655 unsigned int xstats_count)
657 struct sfc_adapter *sa = dev->data->dev_private;
658 struct sfc_port *port = &sa->port;
660 unsigned int nstats = 0;
662 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
663 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
664 if (xstats_names != NULL && nstats < xstats_count)
665 strlcpy(xstats_names[nstats].name,
666 efx_mac_stat_name(sa->nic, i),
667 sizeof(xstats_names[0].name));
676 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
677 uint64_t *values, unsigned int n)
679 struct sfc_adapter *sa = dev->data->dev_private;
680 struct sfc_port *port = &sa->port;
682 unsigned int nb_supported = 0;
683 unsigned int nb_written = 0;
688 if (unlikely(values == NULL) ||
689 unlikely((ids == NULL) && (n < port->mac_stats_nb_supported)))
690 return port->mac_stats_nb_supported;
692 rte_spinlock_lock(&port->mac_stats_lock);
694 rc = sfc_port_update_mac_stats(sa);
701 mac_stats = port->mac_stats_buf;
703 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
704 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
707 if ((ids == NULL) || (ids[nb_written] == nb_supported))
708 values[nb_written++] = mac_stats[i];
716 rte_spinlock_unlock(&port->mac_stats_lock);
722 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
723 struct rte_eth_xstat_name *xstats_names,
724 const uint64_t *ids, unsigned int size)
726 struct sfc_adapter *sa = dev->data->dev_private;
727 struct sfc_port *port = &sa->port;
728 unsigned int nb_supported = 0;
729 unsigned int nb_written = 0;
732 if (unlikely(xstats_names == NULL) ||
733 unlikely((ids == NULL) && (size < port->mac_stats_nb_supported)))
734 return port->mac_stats_nb_supported;
736 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
737 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
740 if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
741 char *name = xstats_names[nb_written++].name;
743 strlcpy(name, efx_mac_stat_name(sa->nic, i),
744 sizeof(xstats_names[0].name));
754 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
756 struct sfc_adapter *sa = dev->data->dev_private;
757 unsigned int wanted_fc, link_fc;
759 memset(fc_conf, 0, sizeof(*fc_conf));
761 sfc_adapter_lock(sa);
763 if (sa->state == SFC_ADAPTER_STARTED)
764 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
766 link_fc = sa->port.flow_ctrl;
770 fc_conf->mode = RTE_FC_NONE;
772 case EFX_FCNTL_RESPOND:
773 fc_conf->mode = RTE_FC_RX_PAUSE;
775 case EFX_FCNTL_GENERATE:
776 fc_conf->mode = RTE_FC_TX_PAUSE;
778 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
779 fc_conf->mode = RTE_FC_FULL;
782 sfc_err(sa, "%s: unexpected flow control value %#x",
786 fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
788 sfc_adapter_unlock(sa);
794 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
796 struct sfc_adapter *sa = dev->data->dev_private;
797 struct sfc_port *port = &sa->port;
801 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
802 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
803 fc_conf->mac_ctrl_frame_fwd != 0) {
804 sfc_err(sa, "unsupported flow control settings specified");
809 switch (fc_conf->mode) {
813 case RTE_FC_RX_PAUSE:
814 fcntl = EFX_FCNTL_RESPOND;
816 case RTE_FC_TX_PAUSE:
817 fcntl = EFX_FCNTL_GENERATE;
820 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
827 sfc_adapter_lock(sa);
829 if (sa->state == SFC_ADAPTER_STARTED) {
830 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
832 goto fail_mac_fcntl_set;
835 port->flow_ctrl = fcntl;
836 port->flow_ctrl_autoneg = fc_conf->autoneg;
838 sfc_adapter_unlock(sa);
843 sfc_adapter_unlock(sa);
850 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
852 struct sfc_adapter *sa = dev->data->dev_private;
853 size_t pdu = EFX_MAC_PDU(mtu);
857 sfc_log_init(sa, "mtu=%u", mtu);
860 if (pdu < EFX_MAC_PDU_MIN) {
861 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
862 (unsigned int)mtu, (unsigned int)pdu,
866 if (pdu > EFX_MAC_PDU_MAX) {
867 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
868 (unsigned int)mtu, (unsigned int)pdu,
873 sfc_adapter_lock(sa);
875 if (pdu != sa->port.pdu) {
876 if (sa->state == SFC_ADAPTER_STARTED) {
879 old_pdu = sa->port.pdu;
890 * The driver does not use it, but other PMDs update jumbo_frame
891 * flag and max_rx_pkt_len when MTU is set.
893 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN);
894 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
896 sfc_adapter_unlock(sa);
898 sfc_log_init(sa, "done");
902 sa->port.pdu = old_pdu;
903 if (sfc_start(sa) != 0)
904 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
905 "PDU max size - port is stopped",
906 (unsigned int)pdu, (unsigned int)old_pdu);
907 sfc_adapter_unlock(sa);
910 sfc_log_init(sa, "failed %d", rc);
915 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
917 struct sfc_adapter *sa = dev->data->dev_private;
918 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
919 struct sfc_port *port = &sa->port;
922 sfc_adapter_lock(sa);
925 * Copy the address to the device private data so that
926 * it could be recalled in the case of adapter restart.
928 ether_addr_copy(mac_addr, &port->default_mac_addr);
930 if (port->isolated) {
931 sfc_err(sa, "isolated mode is active on the port");
932 sfc_err(sa, "will not set MAC address");
936 if (sa->state != SFC_ADAPTER_STARTED) {
937 sfc_info(sa, "the port is not started");
938 sfc_info(sa, "the new MAC address will be set on port start");
943 if (encp->enc_allow_set_mac_with_installed_filters) {
944 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
946 sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
951 * Changing the MAC address by means of MCDI request
952 * has no effect on received traffic, therefore
953 * we also need to update unicast filters
955 rc = sfc_set_rx_mode(sa);
957 sfc_err(sa, "cannot set filter (rc = %u)", rc);
959 sfc_warn(sa, "cannot set MAC address with filters installed");
960 sfc_warn(sa, "adapter will be restarted to pick the new MAC");
961 sfc_warn(sa, "(some traffic may be dropped)");
964 * Since setting MAC address with filters installed is not
965 * allowed on the adapter, the new MAC address will be set
966 * by means of adapter restart. sfc_start() shall retrieve
967 * the new address from the device private data and set it.
972 sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
977 * In the case of failure sa->port->default_mac_addr does not
978 * need rollback since no error code is returned, and the upper
979 * API will anyway update the external MAC address storage.
980 * To be consistent with that new value it is better to keep
981 * the device private value the same.
983 sfc_adapter_unlock(sa);
988 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
991 struct sfc_adapter *sa = dev->data->dev_private;
992 struct sfc_port *port = &sa->port;
993 uint8_t *mc_addrs = port->mcast_addrs;
997 if (port->isolated) {
998 sfc_err(sa, "isolated mode is active on the port");
999 sfc_err(sa, "will not set multicast address list");
1003 if (mc_addrs == NULL)
1006 if (nb_mc_addr > port->max_mcast_addrs) {
1007 sfc_err(sa, "too many multicast addresses: %u > %u",
1008 nb_mc_addr, port->max_mcast_addrs);
1012 for (i = 0; i < nb_mc_addr; ++i) {
1013 rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
1015 mc_addrs += EFX_MAC_ADDR_LEN;
1018 port->nb_mcast_addrs = nb_mc_addr;
1020 if (sa->state != SFC_ADAPTER_STARTED)
1023 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
1024 port->nb_mcast_addrs);
1026 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
1028 SFC_ASSERT(rc >= 0);
1033 * The function is used by the secondary process as well. It must not
1034 * use any process-local pointers from the adapter data.
1037 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1038 struct rte_eth_rxq_info *qinfo)
1040 struct sfc_adapter *sa = dev->data->dev_private;
1041 struct sfc_rxq_info *rxq_info;
1042 struct sfc_rxq *rxq;
1044 sfc_adapter_lock(sa);
1046 SFC_ASSERT(rx_queue_id < sa->rxq_count);
1048 rxq_info = &sa->rxq_info[rx_queue_id];
1049 rxq = rxq_info->rxq;
1050 SFC_ASSERT(rxq != NULL);
1052 qinfo->mp = rxq->refill_mb_pool;
1053 qinfo->conf.rx_free_thresh = rxq->refill_threshold;
1054 qinfo->conf.rx_drop_en = 1;
1055 qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
1056 qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER);
1057 qinfo->nb_desc = rxq_info->entries;
1059 sfc_adapter_unlock(sa);
1063 * The function is used by the secondary process as well. It must not
1064 * use any process-local pointers from the adapter data.
1067 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1068 struct rte_eth_txq_info *qinfo)
1070 struct sfc_adapter *sa = dev->data->dev_private;
1071 struct sfc_txq_info *txq_info;
1073 sfc_adapter_lock(sa);
1075 SFC_ASSERT(tx_queue_id < sa->txq_count);
1077 txq_info = &sa->txq_info[tx_queue_id];
1078 SFC_ASSERT(txq_info->txq != NULL);
1080 memset(qinfo, 0, sizeof(*qinfo));
1082 qinfo->conf.txq_flags = txq_info->txq->flags;
1083 qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
1084 qinfo->conf.tx_deferred_start = txq_info->deferred_start;
1085 qinfo->nb_desc = txq_info->entries;
1087 sfc_adapter_unlock(sa);
1091 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1093 struct sfc_adapter *sa = dev->data->dev_private;
1095 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1097 return sfc_rx_qdesc_npending(sa, rx_queue_id);
1101 sfc_rx_descriptor_done(void *queue, uint16_t offset)
1103 struct sfc_dp_rxq *dp_rxq = queue;
1105 return sfc_rx_qdesc_done(dp_rxq, offset);
1109 sfc_rx_descriptor_status(void *queue, uint16_t offset)
1111 struct sfc_dp_rxq *dp_rxq = queue;
1112 struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
1114 return rxq->evq->sa->dp_rx->qdesc_status(dp_rxq, offset);
1118 sfc_tx_descriptor_status(void *queue, uint16_t offset)
1120 struct sfc_dp_txq *dp_txq = queue;
1121 struct sfc_txq *txq = sfc_txq_by_dp_txq(dp_txq);
1123 return txq->evq->sa->dp_tx->qdesc_status(dp_txq, offset);
1127 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1129 struct sfc_adapter *sa = dev->data->dev_private;
1132 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1134 sfc_adapter_lock(sa);
1137 if (sa->state != SFC_ADAPTER_STARTED)
1138 goto fail_not_started;
1140 rc = sfc_rx_qstart(sa, rx_queue_id);
1142 goto fail_rx_qstart;
1144 sa->rxq_info[rx_queue_id].deferred_started = B_TRUE;
1146 sfc_adapter_unlock(sa);
1152 sfc_adapter_unlock(sa);
1158 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1160 struct sfc_adapter *sa = dev->data->dev_private;
1162 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1164 sfc_adapter_lock(sa);
1165 sfc_rx_qstop(sa, rx_queue_id);
1167 sa->rxq_info[rx_queue_id].deferred_started = B_FALSE;
1169 sfc_adapter_unlock(sa);
1175 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1177 struct sfc_adapter *sa = dev->data->dev_private;
1180 sfc_log_init(sa, "TxQ = %u", tx_queue_id);
1182 sfc_adapter_lock(sa);
1185 if (sa->state != SFC_ADAPTER_STARTED)
1186 goto fail_not_started;
1188 rc = sfc_tx_qstart(sa, tx_queue_id);
1190 goto fail_tx_qstart;
1192 sa->txq_info[tx_queue_id].deferred_started = B_TRUE;
1194 sfc_adapter_unlock(sa);
1200 sfc_adapter_unlock(sa);
1206 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1208 struct sfc_adapter *sa = dev->data->dev_private;
1210 sfc_log_init(sa, "TxQ = %u", tx_queue_id);
1212 sfc_adapter_lock(sa);
1214 sfc_tx_qstop(sa, tx_queue_id);
1216 sa->txq_info[tx_queue_id].deferred_started = B_FALSE;
1218 sfc_adapter_unlock(sa);
1222 #if EFSYS_OPT_RX_SCALE
1224 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1225 struct rte_eth_rss_conf *rss_conf)
1227 struct sfc_adapter *sa = dev->data->dev_private;
1229 if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
1232 sfc_adapter_lock(sa);
1235 * Mapping of hash configuration between RTE and EFX is not one-to-one,
1236 * hence, conversion is done here to derive a correct set of ETH_RSS
1237 * flags which corresponds to the active EFX configuration stored
1238 * locally in 'sfc_adapter' and kept up-to-date
1240 rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types);
1241 rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
1242 if (rss_conf->rss_key != NULL)
1243 rte_memcpy(rss_conf->rss_key, sa->rss_key, EFX_RSS_KEY_SIZE);
1245 sfc_adapter_unlock(sa);
1251 sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
1252 struct rte_eth_rss_conf *rss_conf)
1254 struct sfc_adapter *sa = dev->data->dev_private;
1255 struct sfc_port *port = &sa->port;
1256 unsigned int efx_hash_types;
1262 if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
1263 sfc_err(sa, "RSS is not available");
1267 if (sa->rss_channels == 0) {
1268 sfc_err(sa, "RSS is not configured");
1272 if ((rss_conf->rss_key != NULL) &&
1273 (rss_conf->rss_key_len != sizeof(sa->rss_key))) {
1274 sfc_err(sa, "RSS key size is wrong (should be %lu)",
1275 sizeof(sa->rss_key));
1279 if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) {
1280 sfc_err(sa, "unsupported hash functions requested");
1284 sfc_adapter_lock(sa);
1286 efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf);
1288 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1289 EFX_RX_HASHALG_TOEPLITZ,
1290 efx_hash_types, B_TRUE);
1292 goto fail_scale_mode_set;
1294 if (rss_conf->rss_key != NULL) {
1295 if (sa->state == SFC_ADAPTER_STARTED) {
1296 rc = efx_rx_scale_key_set(sa->nic,
1297 EFX_RSS_CONTEXT_DEFAULT,
1299 sizeof(sa->rss_key));
1301 goto fail_scale_key_set;
1304 rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key));
1307 sa->rss_hash_types = efx_hash_types;
1309 sfc_adapter_unlock(sa);
1314 if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1315 EFX_RX_HASHALG_TOEPLITZ,
1316 sa->rss_hash_types, B_TRUE) != 0)
1317 sfc_err(sa, "failed to restore RSS mode");
1319 fail_scale_mode_set:
1320 sfc_adapter_unlock(sa);
1325 sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
1326 struct rte_eth_rss_reta_entry64 *reta_conf,
1329 struct sfc_adapter *sa = dev->data->dev_private;
1330 struct sfc_port *port = &sa->port;
1333 if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
1336 if (sa->rss_channels == 0)
1339 if (reta_size != EFX_RSS_TBL_SIZE)
1342 sfc_adapter_lock(sa);
1344 for (entry = 0; entry < reta_size; entry++) {
1345 int grp = entry / RTE_RETA_GROUP_SIZE;
1346 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1348 if ((reta_conf[grp].mask >> grp_idx) & 1)
1349 reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry];
1352 sfc_adapter_unlock(sa);
1358 sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
1359 struct rte_eth_rss_reta_entry64 *reta_conf,
1362 struct sfc_adapter *sa = dev->data->dev_private;
1363 struct sfc_port *port = &sa->port;
1364 unsigned int *rss_tbl_new;
1372 if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
1373 sfc_err(sa, "RSS is not available");
1377 if (sa->rss_channels == 0) {
1378 sfc_err(sa, "RSS is not configured");
1382 if (reta_size != EFX_RSS_TBL_SIZE) {
1383 sfc_err(sa, "RETA size is wrong (should be %u)",
1388 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0);
1389 if (rss_tbl_new == NULL)
1392 sfc_adapter_lock(sa);
1394 rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl));
1396 for (entry = 0; entry < reta_size; entry++) {
1397 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1398 struct rte_eth_rss_reta_entry64 *grp;
1400 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
1402 if (grp->mask & (1ull << grp_idx)) {
1403 if (grp->reta[grp_idx] >= sa->rss_channels) {
1405 goto bad_reta_entry;
1407 rss_tbl_new[entry] = grp->reta[grp_idx];
1411 if (sa->state == SFC_ADAPTER_STARTED) {
1412 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1413 rss_tbl_new, EFX_RSS_TBL_SIZE);
1415 goto fail_scale_tbl_set;
1418 rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl));
1422 sfc_adapter_unlock(sa);
1424 rte_free(rss_tbl_new);
1426 SFC_ASSERT(rc >= 0);
1432 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
1433 enum rte_filter_op filter_op,
1436 struct sfc_adapter *sa = dev->data->dev_private;
1439 sfc_log_init(sa, "entry");
1441 switch (filter_type) {
1442 case RTE_ETH_FILTER_NONE:
1443 sfc_err(sa, "Global filters configuration not supported");
1445 case RTE_ETH_FILTER_MACVLAN:
1446 sfc_err(sa, "MACVLAN filters not supported");
1448 case RTE_ETH_FILTER_ETHERTYPE:
1449 sfc_err(sa, "EtherType filters not supported");
1451 case RTE_ETH_FILTER_FLEXIBLE:
1452 sfc_err(sa, "Flexible filters not supported");
1454 case RTE_ETH_FILTER_SYN:
1455 sfc_err(sa, "SYN filters not supported");
1457 case RTE_ETH_FILTER_NTUPLE:
1458 sfc_err(sa, "NTUPLE filters not supported");
1460 case RTE_ETH_FILTER_TUNNEL:
1461 sfc_err(sa, "Tunnel filters not supported");
1463 case RTE_ETH_FILTER_FDIR:
1464 sfc_err(sa, "Flow Director filters not supported");
1466 case RTE_ETH_FILTER_HASH:
1467 sfc_err(sa, "Hash filters not supported");
1469 case RTE_ETH_FILTER_GENERIC:
1470 if (filter_op != RTE_ETH_FILTER_GET) {
1473 *(const void **)arg = &sfc_flow_ops;
1478 sfc_err(sa, "Unknown filter type %u", filter_type);
1482 sfc_log_init(sa, "exit: %d", -rc);
1483 SFC_ASSERT(rc >= 0);
1487 static const struct eth_dev_ops sfc_eth_dev_ops = {
1488 .dev_configure = sfc_dev_configure,
1489 .dev_start = sfc_dev_start,
1490 .dev_stop = sfc_dev_stop,
1491 .dev_set_link_up = sfc_dev_set_link_up,
1492 .dev_set_link_down = sfc_dev_set_link_down,
1493 .dev_close = sfc_dev_close,
1494 .promiscuous_enable = sfc_dev_promisc_enable,
1495 .promiscuous_disable = sfc_dev_promisc_disable,
1496 .allmulticast_enable = sfc_dev_allmulti_enable,
1497 .allmulticast_disable = sfc_dev_allmulti_disable,
1498 .link_update = sfc_dev_link_update,
1499 .stats_get = sfc_stats_get,
1500 .stats_reset = sfc_stats_reset,
1501 .xstats_get = sfc_xstats_get,
1502 .xstats_reset = sfc_stats_reset,
1503 .xstats_get_names = sfc_xstats_get_names,
1504 .dev_infos_get = sfc_dev_infos_get,
1505 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
1506 .mtu_set = sfc_dev_set_mtu,
1507 .rx_queue_start = sfc_rx_queue_start,
1508 .rx_queue_stop = sfc_rx_queue_stop,
1509 .tx_queue_start = sfc_tx_queue_start,
1510 .tx_queue_stop = sfc_tx_queue_stop,
1511 .rx_queue_setup = sfc_rx_queue_setup,
1512 .rx_queue_release = sfc_rx_queue_release,
1513 .rx_queue_count = sfc_rx_queue_count,
1514 .rx_descriptor_done = sfc_rx_descriptor_done,
1515 .rx_descriptor_status = sfc_rx_descriptor_status,
1516 .tx_descriptor_status = sfc_tx_descriptor_status,
1517 .tx_queue_setup = sfc_tx_queue_setup,
1518 .tx_queue_release = sfc_tx_queue_release,
1519 .flow_ctrl_get = sfc_flow_ctrl_get,
1520 .flow_ctrl_set = sfc_flow_ctrl_set,
1521 .mac_addr_set = sfc_mac_addr_set,
1522 #if EFSYS_OPT_RX_SCALE
1523 .reta_update = sfc_dev_rss_reta_update,
1524 .reta_query = sfc_dev_rss_reta_query,
1525 .rss_hash_update = sfc_dev_rss_hash_update,
1526 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
1528 .filter_ctrl = sfc_dev_filter_ctrl,
1529 .set_mc_addr_list = sfc_set_mc_addr_list,
1530 .rxq_info_get = sfc_rx_queue_info_get,
1531 .txq_info_get = sfc_tx_queue_info_get,
1532 .fw_version_get = sfc_fw_version_get,
1533 .xstats_get_by_id = sfc_xstats_get_by_id,
1534 .xstats_get_names_by_id = sfc_xstats_get_names_by_id,
1538 * Duplicate a string in potentially shared memory required for
1539 * multi-process support.
1541 * strdup() allocates from process-local heap/memory.
1544 sfc_strdup(const char *str)
1552 size = strlen(str) + 1;
1553 copy = rte_malloc(__func__, size, 0);
1555 rte_memcpy(copy, str, size);
1561 sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
1563 struct sfc_adapter *sa = dev->data->dev_private;
1564 unsigned int avail_caps = 0;
1565 const char *rx_name = NULL;
1566 const char *tx_name = NULL;
1569 switch (sa->family) {
1570 case EFX_FAMILY_HUNTINGTON:
1571 case EFX_FAMILY_MEDFORD:
1572 avail_caps |= SFC_DP_HW_FW_CAP_EF10;
1578 rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
1579 sfc_kvarg_string_handler, &rx_name);
1581 goto fail_kvarg_rx_datapath;
1583 if (rx_name != NULL) {
1584 sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
1585 if (sa->dp_rx == NULL) {
1586 sfc_err(sa, "Rx datapath %s not found", rx_name);
1590 if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) {
1592 "Insufficient Hw/FW capabilities to use Rx datapath %s",
1595 goto fail_dp_rx_caps;
1598 sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
1599 if (sa->dp_rx == NULL) {
1600 sfc_err(sa, "Rx datapath by caps %#x not found",
1607 sa->dp_rx_name = sfc_strdup(sa->dp_rx->dp.name);
1608 if (sa->dp_rx_name == NULL) {
1610 goto fail_dp_rx_name;
1613 sfc_info(sa, "use %s Rx datapath", sa->dp_rx_name);
1615 dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
1617 rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
1618 sfc_kvarg_string_handler, &tx_name);
1620 goto fail_kvarg_tx_datapath;
1622 if (tx_name != NULL) {
1623 sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
1624 if (sa->dp_tx == NULL) {
1625 sfc_err(sa, "Tx datapath %s not found", tx_name);
1629 if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) {
1631 "Insufficient Hw/FW capabilities to use Tx datapath %s",
1634 goto fail_dp_tx_caps;
1637 sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
1638 if (sa->dp_tx == NULL) {
1639 sfc_err(sa, "Tx datapath by caps %#x not found",
1646 sa->dp_tx_name = sfc_strdup(sa->dp_tx->dp.name);
1647 if (sa->dp_tx_name == NULL) {
1649 goto fail_dp_tx_name;
1652 sfc_info(sa, "use %s Tx datapath", sa->dp_tx_name);
1654 dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
1656 dev->dev_ops = &sfc_eth_dev_ops;
1665 fail_kvarg_tx_datapath:
1666 rte_free(sa->dp_rx_name);
1667 sa->dp_rx_name = NULL;
1674 fail_kvarg_rx_datapath:
1679 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
1681 struct sfc_adapter *sa = dev->data->dev_private;
1683 dev->dev_ops = NULL;
1684 dev->rx_pkt_burst = NULL;
1685 dev->tx_pkt_burst = NULL;
1687 rte_free(sa->dp_tx_name);
1688 sa->dp_tx_name = NULL;
1691 rte_free(sa->dp_rx_name);
1692 sa->dp_rx_name = NULL;
1696 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
1697 .rxq_info_get = sfc_rx_queue_info_get,
1698 .txq_info_get = sfc_tx_queue_info_get,
1702 sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
1705 * Device private data has really many process-local pointers.
1706 * Below code should be extremely careful to use data located
1707 * in shared memory only.
1709 struct sfc_adapter *sa = dev->data->dev_private;
1710 const struct sfc_dp_rx *dp_rx;
1711 const struct sfc_dp_tx *dp_tx;
1714 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name);
1715 if (dp_rx == NULL) {
1716 sfc_err(sa, "cannot find %s Rx datapath", sa->dp_tx_name);
1720 if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
1721 sfc_err(sa, "%s Rx datapath does not support multi-process",
1724 goto fail_dp_rx_multi_process;
1727 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sa->dp_tx_name);
1728 if (dp_tx == NULL) {
1729 sfc_err(sa, "cannot find %s Tx datapath", sa->dp_tx_name);
1733 if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
1734 sfc_err(sa, "%s Tx datapath does not support multi-process",
1737 goto fail_dp_tx_multi_process;
1740 dev->rx_pkt_burst = dp_rx->pkt_burst;
1741 dev->tx_pkt_burst = dp_tx->pkt_burst;
1742 dev->dev_ops = &sfc_eth_dev_secondary_ops;
1746 fail_dp_tx_multi_process:
1748 fail_dp_rx_multi_process:
1754 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
1756 dev->dev_ops = NULL;
1757 dev->tx_pkt_burst = NULL;
1758 dev->rx_pkt_burst = NULL;
1762 sfc_register_dp(void)
1765 if (TAILQ_EMPTY(&sfc_dp_head)) {
1766 /* Prefer EF10 datapath */
1767 sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
1768 sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
1770 sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
1771 sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
1772 sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
1777 sfc_eth_dev_init(struct rte_eth_dev *dev)
1779 struct sfc_adapter *sa = dev->data->dev_private;
1780 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1782 const efx_nic_cfg_t *encp;
1783 const struct ether_addr *from;
1787 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1788 return -sfc_eth_dev_secondary_set_ops(dev);
1790 /* Required for logging */
1791 sa->pci_addr = pci_dev->addr;
1792 sa->port_id = dev->data->port_id;
1796 /* Copy PCI device info to the dev->data */
1797 rte_eth_copy_pci_info(dev, pci_dev);
1799 rc = sfc_kvargs_parse(sa);
1801 goto fail_kvargs_parse;
1803 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
1804 sfc_kvarg_bool_handler, &sa->debug_init);
1806 goto fail_kvarg_debug_init;
1808 sfc_log_init(sa, "entry");
1810 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
1811 if (dev->data->mac_addrs == NULL) {
1813 goto fail_mac_addrs;
1816 sfc_adapter_lock_init(sa);
1817 sfc_adapter_lock(sa);
1819 sfc_log_init(sa, "probing");
1824 sfc_log_init(sa, "set device ops");
1825 rc = sfc_eth_dev_set_ops(dev);
1829 sfc_log_init(sa, "attaching");
1830 rc = sfc_attach(sa);
1834 encp = efx_nic_cfg_get(sa->nic);
1837 * The arguments are really reverse order in comparison to
1838 * Linux kernel. Copy from NIC config to Ethernet device data.
1840 from = (const struct ether_addr *)(encp->enc_mac_addr);
1841 ether_addr_copy(from, &dev->data->mac_addrs[0]);
1843 sfc_adapter_unlock(sa);
1845 sfc_log_init(sa, "done");
1849 sfc_eth_dev_clear_ops(dev);
1855 sfc_adapter_unlock(sa);
1856 sfc_adapter_lock_fini(sa);
1857 rte_free(dev->data->mac_addrs);
1858 dev->data->mac_addrs = NULL;
1861 fail_kvarg_debug_init:
1862 sfc_kvargs_cleanup(sa);
1865 sfc_log_init(sa, "failed %d", rc);
1871 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
1873 struct sfc_adapter *sa;
1875 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1876 sfc_eth_dev_secondary_clear_ops(dev);
1880 sa = dev->data->dev_private;
1881 sfc_log_init(sa, "entry");
1883 sfc_adapter_lock(sa);
1885 sfc_eth_dev_clear_ops(dev);
1890 rte_free(dev->data->mac_addrs);
1891 dev->data->mac_addrs = NULL;
1893 sfc_kvargs_cleanup(sa);
1895 sfc_adapter_unlock(sa);
1896 sfc_adapter_lock_fini(sa);
1898 sfc_log_init(sa, "done");
1900 /* Required for logging, so cleanup last */
1905 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
1906 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
1907 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
1908 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
1909 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
1910 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
1911 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
1912 { .vendor_id = 0 /* sentinel */ }
1915 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1916 struct rte_pci_device *pci_dev)
1918 return rte_eth_dev_pci_generic_probe(pci_dev,
1919 sizeof(struct sfc_adapter), sfc_eth_dev_init);
1922 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
1924 return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
1927 static struct rte_pci_driver sfc_efx_pmd = {
1928 .id_table = pci_id_sfc_efx_map,
1930 RTE_PCI_DRV_INTR_LSC |
1931 RTE_PCI_DRV_NEED_MAPPING,
1932 .probe = sfc_eth_dev_pci_probe,
1933 .remove = sfc_eth_dev_pci_remove,
1936 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
1937 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
1938 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
1939 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
1940 SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
1941 SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
1942 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
1943 SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> "
1944 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
1945 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);