4 * Copyright(c) 2017 Marvell International Ltd.
5 * Copyright(c) 2017 Semihalf.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Semihalf nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_kvargs.h>
38 #include <rte_malloc.h>
39 #include <rte_bus_vdev.h>
41 /* Unluckily, container_of is defined by both DPDK and MUSDK,
42 * we'll declare only one version.
44 * Note that it is not used in this PMD anyway.
50 #include <drivers/mv_pp2.h>
51 #include <drivers/mv_pp2_bpool.h>
52 #include <drivers/mv_pp2_hif.h>
55 #include <linux/ethtool.h>
56 #include <linux/sockios.h>
58 #include <net/if_arp.h>
59 #include <sys/ioctl.h>
60 #include <sys/socket.h>
62 #include <sys/types.h>
64 #include "mrvl_ethdev.h"
67 /* bitmask with reserved hifs */
68 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
69 /* bitmask with reserved bpools */
70 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
71 /* bitmask with reserved kernel RSS tables */
72 #define MRVL_MUSDK_RSS_RESERVED 0x01
73 /* maximum number of available hifs */
74 #define MRVL_MUSDK_HIFS_MAX 9
77 #define MRVL_MUSDK_PREFETCH_SHIFT 2
79 /* TCAM has 25 entries reserved for uc/mc filter entries */
80 #define MRVL_MAC_ADDRS_MAX 25
81 #define MRVL_MATCH_LEN 16
82 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
83 /* Maximum allowable packet size */
84 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
86 #define MRVL_IFACE_NAME_ARG "iface"
87 #define MRVL_CFG_ARG "cfg"
89 #define MRVL_BURST_SIZE 64
91 #define MRVL_ARP_LENGTH 28
93 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
95 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
96 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
98 /* Memory size (in bytes) for MUSDK dma buffers */
99 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
101 static const char * const valid_args[] = {
107 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
108 static struct pp2_hif *hifs[RTE_MAX_LCORE];
109 static int used_bpools[PP2_NUM_PKT_PROC] = {
110 MRVL_MUSDK_BPOOLS_RESERVED,
111 MRVL_MUSDK_BPOOLS_RESERVED
114 struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
115 int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
116 uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
118 struct mrvl_ifnames {
119 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
124 * To use buffer harvesting based on loopback port shadow queue structure
125 * was introduced for buffers information bookkeeping.
127 * Before sending the packet, related buffer information (pp2_buff_inf) is
128 * stored in shadow queue. After packet is transmitted no longer used
129 * packet buffer is released back to it's original hardware pool,
130 * on condition it originated from interface.
131 * In case it was generated by application itself i.e: mbuf->port field is
132 * 0xff then its released to software mempool.
134 struct mrvl_shadow_txq {
135 int head; /* write index - used when sending buffers */
136 int tail; /* read index - used when releasing buffers */
137 u16 size; /* queue occupied size */
138 u16 num_to_release; /* number of buffers sent, that can be released */
139 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
143 struct mrvl_priv *priv;
144 struct rte_mempool *mp;
153 struct mrvl_priv *priv;
157 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
160 static int mrvl_lcore_first;
161 static int mrvl_lcore_last;
162 static int mrvl_dev_num;
164 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
165 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
166 struct pp2_hif *hif, unsigned int core_id,
167 struct mrvl_shadow_txq *sq, int qid, int force);
170 mrvl_get_bpool_size(int pp2_id, int pool_id)
175 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
176 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
182 mrvl_reserve_bit(int *bitmap, int max)
184 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
195 mrvl_init_hif(int core_id)
197 struct pp2_hif_params params;
198 char match[MRVL_MATCH_LEN];
201 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
203 RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
207 snprintf(match, sizeof(match), "hif-%d", ret);
208 memset(¶ms, 0, sizeof(params));
209 params.match = match;
210 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
211 ret = pp2_hif_init(¶ms, &hifs[core_id]);
213 RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id);
220 static inline struct pp2_hif*
221 mrvl_get_hif(struct mrvl_priv *priv, int core_id)
225 if (likely(hifs[core_id] != NULL))
226 return hifs[core_id];
228 rte_spinlock_lock(&priv->lock);
230 ret = mrvl_init_hif(core_id);
232 RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
236 if (core_id < mrvl_lcore_first)
237 mrvl_lcore_first = core_id;
239 if (core_id > mrvl_lcore_last)
240 mrvl_lcore_last = core_id;
242 rte_spinlock_unlock(&priv->lock);
244 return hifs[core_id];
248 * Configure rss based on dpdk rss configuration.
251 * Pointer to private structure.
253 * Pointer to RSS configuration.
256 * 0 on success, negative error value otherwise.
259 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
261 if (rss_conf->rss_key)
262 RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n");
264 if (rss_conf->rss_hf == 0) {
265 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
266 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
267 priv->ppio_params.inqs_params.hash_type =
268 PP2_PPIO_HASH_T_2_TUPLE;
269 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
270 priv->ppio_params.inqs_params.hash_type =
271 PP2_PPIO_HASH_T_5_TUPLE;
272 priv->rss_hf_tcp = 1;
273 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
274 priv->ppio_params.inqs_params.hash_type =
275 PP2_PPIO_HASH_T_5_TUPLE;
276 priv->rss_hf_tcp = 0;
285 * Ethernet device configuration.
287 * Prepare the driver for a given number of TX and RX queues and
291 * Pointer to Ethernet device structure.
294 * 0 on success, negative error value otherwise.
297 mrvl_dev_configure(struct rte_eth_dev *dev)
299 struct mrvl_priv *priv = dev->data->dev_private;
302 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
303 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
304 RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
305 dev->data->dev_conf.rxmode.mq_mode);
309 if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
311 "L2 CRC stripping is always enabled in hw\n");
312 dev->data->dev_conf.rxmode.hw_strip_crc = 1;
315 if (dev->data->dev_conf.rxmode.hw_vlan_strip) {
316 RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
320 if (dev->data->dev_conf.rxmode.split_hdr_size) {
321 RTE_LOG(INFO, PMD, "Split headers not supported\n");
325 if (dev->data->dev_conf.rxmode.enable_scatter) {
326 RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
330 if (dev->data->dev_conf.rxmode.enable_lro) {
331 RTE_LOG(INFO, PMD, "LRO not supported\n");
335 if (dev->data->dev_conf.rxmode.jumbo_frame)
336 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
337 ETHER_HDR_LEN - ETHER_CRC_LEN;
339 ret = mrvl_configure_rxqs(priv, dev->data->port_id,
340 dev->data->nb_rx_queues);
344 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
345 priv->ppio_params.maintain_stats = 1;
346 priv->nb_rx_queues = dev->data->nb_rx_queues;
348 if (dev->data->nb_rx_queues == 1 &&
349 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
350 RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n");
351 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
356 return mrvl_configure_rss(priv,
357 &dev->data->dev_conf.rx_adv_conf.rss_conf);
361 * DPDK callback to change the MTU.
363 * Setting the MTU affects hardware MRU (packets larger than the MRU
367 * Pointer to Ethernet device structure.
372 * 0 on success, negative error value otherwise.
375 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
377 struct mrvl_priv *priv = dev->data->dev_private;
378 /* extra MV_MH_SIZE bytes are required for Marvell tag */
379 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
382 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
385 ret = pp2_ppio_set_mru(priv->ppio, mru);
389 return pp2_ppio_set_mtu(priv->ppio, mtu);
393 * DPDK callback to bring the link up.
396 * Pointer to Ethernet device structure.
399 * 0 on success, negative error value otherwise.
402 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
404 struct mrvl_priv *priv = dev->data->dev_private;
407 ret = pp2_ppio_enable(priv->ppio);
412 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
413 * as pp2_ppio_enable() changes port->t_mode from default 0 to
414 * PP2_TRAFFIC_INGRESS_EGRESS.
416 * Set mtu to default DPDK value here.
418 ret = mrvl_mtu_set(dev, dev->data->mtu);
420 pp2_ppio_disable(priv->ppio);
422 dev->data->dev_link.link_status = ETH_LINK_UP;
428 * DPDK callback to bring the link down.
431 * Pointer to Ethernet device structure.
434 * 0 on success, negative error value otherwise.
437 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
439 struct mrvl_priv *priv = dev->data->dev_private;
442 ret = pp2_ppio_disable(priv->ppio);
446 dev->data->dev_link.link_status = ETH_LINK_DOWN;
452 * DPDK callback to start the device.
455 * Pointer to Ethernet device structure.
458 * 0 on success, negative errno value on failure.
461 mrvl_dev_start(struct rte_eth_dev *dev)
463 struct mrvl_priv *priv = dev->data->dev_private;
464 char match[MRVL_MATCH_LEN];
465 int ret = 0, def_init_size;
467 snprintf(match, sizeof(match), "ppio-%d:%d",
468 priv->pp_id, priv->ppio_id);
469 priv->ppio_params.match = match;
472 * Calculate the minimum bpool size for refill feature as follows:
473 * 2 default burst sizes multiply by number of rx queues.
474 * If the bpool size will be below this value, new buffers will
475 * be added to the pool.
477 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
479 /* In case initial bpool size configured in queues setup is
480 * smaller than minimum size add more buffers
482 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
483 if (priv->bpool_init_size < def_init_size) {
484 int buffs_to_add = def_init_size - priv->bpool_init_size;
486 priv->bpool_init_size += buffs_to_add;
487 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
489 RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n");
493 * Calculate the maximum bpool size for refill feature as follows:
494 * maximum number of descriptors in rx queue multiply by number
495 * of rx queues plus minimum bpool size.
496 * In case the bpool size will exceed this value, superfluous buffers
499 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
500 priv->bpool_min_size;
502 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
507 * In case there are some some stale uc/mc mac addresses flush them
508 * here. It cannot be done during mrvl_dev_close() as port information
509 * is already gone at that point (due to pp2_ppio_deinit() in
512 if (!priv->uc_mc_flushed) {
513 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
516 "Failed to flush uc/mc filter list\n");
519 priv->uc_mc_flushed = 1;
522 if (!priv->vlan_flushed) {
523 ret = pp2_ppio_flush_vlan(priv->ppio);
525 RTE_LOG(ERR, PMD, "Failed to flush vlan list\n");
528 * once pp2_ppio_flush_vlan() is supported jump to out
532 priv->vlan_flushed = 1;
535 /* For default QoS config, don't start classifier. */
537 ret = mrvl_start_qos_mapping(priv);
539 pp2_ppio_deinit(priv->ppio);
544 ret = mrvl_dev_set_link_up(dev);
550 pp2_ppio_deinit(priv->ppio);
555 * Flush receive queues.
558 * Pointer to Ethernet device structure.
561 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
565 RTE_LOG(INFO, PMD, "Flushing rx queues\n");
566 for (i = 0; i < dev->data->nb_rx_queues; i++) {
570 struct mrvl_rxq *q = dev->data->rx_queues[i];
571 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
573 num = MRVL_PP2_RXD_MAX;
574 ret = pp2_ppio_recv(q->priv->ppio,
575 q->priv->rxq_map[q->queue_id].tc,
576 q->priv->rxq_map[q->queue_id].inq,
577 descs, (uint16_t *)&num);
578 } while (ret == 0 && num);
583 * Flush transmit shadow queues.
586 * Pointer to Ethernet device structure.
589 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
592 struct mrvl_txq *txq;
594 RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
595 for (i = 0; i < dev->data->nb_tx_queues; i++) {
596 txq = (struct mrvl_txq *)dev->data->tx_queues[i];
598 for (j = 0; j < RTE_MAX_LCORE; j++) {
599 struct mrvl_shadow_txq *sq;
604 sq = &txq->shadow_txqs[j];
605 mrvl_free_sent_buffers(txq->priv->ppio,
606 hifs[j], j, sq, txq->queue_id, 1);
607 while (sq->tail != sq->head) {
608 uint64_t addr = cookie_addr_high |
609 sq->ent[sq->tail].buff.cookie;
611 (struct rte_mbuf *)addr);
612 sq->tail = (sq->tail + 1) &
613 MRVL_PP2_TX_SHADOWQ_MASK;
615 memset(sq, 0, sizeof(*sq));
621 * Flush hardware bpool (buffer-pool).
624 * Pointer to Ethernet device structure.
627 mrvl_flush_bpool(struct rte_eth_dev *dev)
629 struct mrvl_priv *priv = dev->data->dev_private;
633 unsigned int core_id = rte_lcore_id();
635 if (core_id == LCORE_ID_ANY)
638 hif = mrvl_get_hif(priv, core_id);
640 ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
642 RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
647 struct pp2_buff_inf inf;
650 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
654 addr = cookie_addr_high | inf.cookie;
655 rte_pktmbuf_free((struct rte_mbuf *)addr);
660 * DPDK callback to stop the device.
663 * Pointer to Ethernet device structure.
666 mrvl_dev_stop(struct rte_eth_dev *dev)
668 struct mrvl_priv *priv = dev->data->dev_private;
670 mrvl_dev_set_link_down(dev);
671 mrvl_flush_rx_queues(dev);
672 mrvl_flush_tx_shadow_queues(dev);
674 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
675 priv->qos_tbl = NULL;
677 pp2_ppio_deinit(priv->ppio);
682 * DPDK callback to close the device.
685 * Pointer to Ethernet device structure.
688 mrvl_dev_close(struct rte_eth_dev *dev)
690 struct mrvl_priv *priv = dev->data->dev_private;
693 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
694 struct pp2_ppio_tc_params *tc_params =
695 &priv->ppio_params.inqs_params.tcs_params[i];
697 if (tc_params->inqs_params) {
698 rte_free(tc_params->inqs_params);
699 tc_params->inqs_params = NULL;
703 mrvl_flush_bpool(dev);
707 * DPDK callback to retrieve physical link information.
710 * Pointer to Ethernet device structure.
711 * @param wait_to_complete
712 * Wait for request completion (ignored).
715 * 0 on success, negative error value otherwise.
718 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
722 * once MUSDK provides necessary API use it here
724 struct ethtool_cmd edata;
728 edata.cmd = ETHTOOL_GSET;
730 strcpy(req.ifr_name, dev->data->name);
731 req.ifr_data = (void *)&edata;
733 fd = socket(AF_INET, SOCK_DGRAM, 0);
737 ret = ioctl(fd, SIOCETHTOOL, &req);
745 switch (ethtool_cmd_speed(&edata)) {
747 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
750 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
753 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
756 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
759 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
762 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
763 ETH_LINK_HALF_DUPLEX;
764 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
771 * DPDK callback to enable promiscuous mode.
774 * Pointer to Ethernet device structure.
777 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
779 struct mrvl_priv *priv = dev->data->dev_private;
782 ret = pp2_ppio_set_uc_promisc(priv->ppio, 1);
784 RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
788 * DPDK callback to enable allmulti mode.
791 * Pointer to Ethernet device structure.
794 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
796 struct mrvl_priv *priv = dev->data->dev_private;
799 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
801 RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
805 * DPDK callback to disable promiscuous mode.
808 * Pointer to Ethernet device structure.
811 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
813 struct mrvl_priv *priv = dev->data->dev_private;
816 ret = pp2_ppio_set_uc_promisc(priv->ppio, 0);
818 RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
822 * DPDK callback to disable allmulticast mode.
825 * Pointer to Ethernet device structure.
828 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
830 struct mrvl_priv *priv = dev->data->dev_private;
833 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
835 RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
839 * DPDK callback to remove a MAC address.
842 * Pointer to Ethernet device structure.
847 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
849 struct mrvl_priv *priv = dev->data->dev_private;
850 char buf[ETHER_ADDR_FMT_SIZE];
853 ret = pp2_ppio_remove_mac_addr(priv->ppio,
854 dev->data->mac_addrs[index].addr_bytes);
856 ether_format_addr(buf, sizeof(buf),
857 &dev->data->mac_addrs[index]);
858 RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf);
863 * DPDK callback to add a MAC address.
866 * Pointer to Ethernet device structure.
868 * MAC address to register.
872 * VMDq pool index to associate address with (unused).
875 * 0 on success, negative error value otherwise.
878 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
879 uint32_t index, uint32_t vmdq __rte_unused)
881 struct mrvl_priv *priv = dev->data->dev_private;
882 char buf[ETHER_ADDR_FMT_SIZE];
886 /* For setting index 0, mrvl_mac_addr_set() should be used.*/
890 * Maximum number of uc addresses can be tuned via kernel module mvpp2x
891 * parameter uc_filter_max. Maximum number of mc addresses is then
892 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
895 * If more than uc_filter_max uc addresses were added to filter list
896 * then NIC will switch to promiscuous mode automatically.
898 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
899 * were added to filter list then NIC will switch to all-multicast mode
902 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
904 ether_format_addr(buf, sizeof(buf), mac_addr);
905 RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf);
913 * DPDK callback to set the primary MAC address.
916 * Pointer to Ethernet device structure.
918 * MAC address to register.
921 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
923 struct mrvl_priv *priv = dev->data->dev_private;
925 pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
928 * Port stops sending packets if pp2_ppio_set_mac_addr()
929 * was called after pp2_ppio_enable(). As a quick fix issue
930 * enable port once again.
932 pp2_ppio_enable(priv->ppio);
936 * DPDK callback to get device statistics.
939 * Pointer to Ethernet device structure.
941 * Stats structure output buffer.
944 * 0 on success, negative error value otherwise.
947 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
949 struct mrvl_priv *priv = dev->data->dev_private;
950 struct pp2_ppio_statistics ppio_stats;
951 uint64_t drop_mac = 0;
952 unsigned int i, idx, ret;
954 for (i = 0; i < dev->data->nb_rx_queues; i++) {
955 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
956 struct pp2_ppio_inq_statistics rx_stats;
962 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
964 "rx queue %d stats out of range (0 - %d)\n",
965 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
969 ret = pp2_ppio_inq_get_statistics(priv->ppio,
970 priv->rxq_map[idx].tc,
971 priv->rxq_map[idx].inq,
975 "Failed to update rx queue %d stats\n", idx);
979 stats->q_ibytes[idx] = rxq->bytes_recv;
980 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
981 stats->q_errors[idx] = rx_stats.drop_early +
982 rx_stats.drop_fullq +
985 stats->ibytes += rxq->bytes_recv;
986 drop_mac += rxq->drop_mac;
989 for (i = 0; i < dev->data->nb_tx_queues; i++) {
990 struct mrvl_txq *txq = dev->data->tx_queues[i];
991 struct pp2_ppio_outq_statistics tx_stats;
997 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
999 "tx queue %d stats out of range (0 - %d)\n",
1000 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1003 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
1005 if (unlikely(ret)) {
1007 "Failed to update tx queue %d stats\n", idx);
1011 stats->q_opackets[idx] = tx_stats.deq_desc;
1012 stats->q_obytes[idx] = txq->bytes_sent;
1013 stats->obytes += txq->bytes_sent;
1016 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1017 if (unlikely(ret)) {
1018 RTE_LOG(ERR, PMD, "Failed to update port statistics\n");
1022 stats->ipackets += ppio_stats.rx_packets - drop_mac;
1023 stats->opackets += ppio_stats.tx_packets;
1024 stats->imissed += ppio_stats.rx_fullq_dropped +
1025 ppio_stats.rx_bm_dropped +
1026 ppio_stats.rx_early_dropped +
1027 ppio_stats.rx_fifo_dropped +
1028 ppio_stats.rx_cls_dropped;
1029 stats->ierrors = drop_mac;
1035 * DPDK callback to clear device statistics.
1038 * Pointer to Ethernet device structure.
1041 mrvl_stats_reset(struct rte_eth_dev *dev)
1043 struct mrvl_priv *priv = dev->data->dev_private;
1046 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1047 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1049 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
1050 priv->rxq_map[i].inq, NULL, 1);
1051 rxq->bytes_recv = 0;
1055 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1056 struct mrvl_txq *txq = dev->data->tx_queues[i];
1058 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
1059 txq->bytes_sent = 0;
1062 pp2_ppio_get_statistics(priv->ppio, NULL, 1);
1066 * DPDK callback to get information about the device.
1069 * Pointer to Ethernet device structure (unused).
1071 * Info structure output buffer.
1074 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
1075 struct rte_eth_dev_info *info)
1077 info->speed_capa = ETH_LINK_SPEED_10M |
1078 ETH_LINK_SPEED_100M |
1082 info->max_rx_queues = MRVL_PP2_RXQ_MAX;
1083 info->max_tx_queues = MRVL_PP2_TXQ_MAX;
1084 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
1086 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
1087 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
1088 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
1090 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
1091 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
1092 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
1094 info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME |
1095 DEV_RX_OFFLOAD_VLAN_FILTER |
1096 DEV_RX_OFFLOAD_IPV4_CKSUM |
1097 DEV_RX_OFFLOAD_UDP_CKSUM |
1098 DEV_RX_OFFLOAD_TCP_CKSUM;
1100 info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
1101 DEV_TX_OFFLOAD_UDP_CKSUM |
1102 DEV_TX_OFFLOAD_TCP_CKSUM;
1104 info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1105 ETH_RSS_NONFRAG_IPV4_TCP |
1106 ETH_RSS_NONFRAG_IPV4_UDP;
1108 /* By default packets are dropped if no descriptors are available */
1109 info->default_rxconf.rx_drop_en = 1;
1111 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
1115 * Return supported packet types.
1118 * Pointer to Ethernet device structure (unused).
1121 * Const pointer to the table with supported packet types.
1123 static const uint32_t *
1124 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1126 static const uint32_t ptypes[] = {
1129 RTE_PTYPE_L3_IPV4_EXT,
1130 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1132 RTE_PTYPE_L3_IPV6_EXT,
1133 RTE_PTYPE_L2_ETHER_ARP,
1142 * DPDK callback to get information about specific receive queue.
1145 * Pointer to Ethernet device structure.
1146 * @param rx_queue_id
1147 * Receive queue index.
1149 * Receive queue information structure.
1151 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1152 struct rte_eth_rxq_info *qinfo)
1154 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
1155 struct mrvl_priv *priv = dev->data->dev_private;
1156 int inq = priv->rxq_map[rx_queue_id].inq;
1157 int tc = priv->rxq_map[rx_queue_id].tc;
1158 struct pp2_ppio_tc_params *tc_params =
1159 &priv->ppio_params.inqs_params.tcs_params[tc];
1162 qinfo->nb_desc = tc_params->inqs_params[inq].size;
1166 * DPDK callback to get information about specific transmit queue.
1169 * Pointer to Ethernet device structure.
1170 * @param tx_queue_id
1171 * Transmit queue index.
1173 * Transmit queue information structure.
1175 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1176 struct rte_eth_txq_info *qinfo)
1178 struct mrvl_priv *priv = dev->data->dev_private;
1181 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1185 * DPDK callback to Configure a VLAN filter.
1188 * Pointer to Ethernet device structure.
1190 * VLAN ID to filter.
1195 * 0 on success, negative error value otherwise.
1198 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1200 struct mrvl_priv *priv = dev->data->dev_private;
1202 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
1203 pp2_ppio_remove_vlan(priv->ppio, vlan_id);
1207 * Release buffers to hardware bpool (buffer-pool)
1210 * Receive queue pointer.
1212 * Number of buffers to release to bpool.
1215 * 0 on success, negative error value otherwise.
1218 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
1220 struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
1221 struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
1223 unsigned int core_id;
1224 struct pp2_hif *hif;
1225 struct pp2_bpool *bpool;
1227 core_id = rte_lcore_id();
1228 if (core_id == LCORE_ID_ANY)
1231 hif = mrvl_get_hif(rxq->priv, core_id);
1235 bpool = rxq->priv->bpool;
1237 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
1241 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
1243 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
1245 for (i = 0; i < num; i++) {
1246 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
1247 != cookie_addr_high) {
1249 "mbuf virtual addr high 0x%lx out of range\n",
1250 (uint64_t)mbufs[i] >> 32);
1254 entries[i].buff.addr =
1255 rte_mbuf_data_iova_default(mbufs[i]);
1256 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
1257 entries[i].bpool = bpool;
1260 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
1261 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
1268 for (; i < num; i++)
1269 rte_pktmbuf_free(mbufs[i]);
1275 * DPDK callback to configure the receive queue.
1278 * Pointer to Ethernet device structure.
1282 * Number of descriptors to configure in queue.
1284 * NUMA socket on which memory must be allocated.
1286 * Thresholds parameters (unused_).
1288 * Memory pool for buffer allocations.
1291 * 0 on success, negative error value otherwise.
1294 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1295 unsigned int socket,
1296 const struct rte_eth_rxconf *conf __rte_unused,
1297 struct rte_mempool *mp)
1299 struct mrvl_priv *priv = dev->data->dev_private;
1300 struct mrvl_rxq *rxq;
1302 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1305 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
1307 * Unknown TC mapping, mapping will not have a correct queue.
1309 RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
1310 idx, priv->ppio_id);
1314 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
1315 MRVL_PKT_EFFEC_OFFS;
1316 if (min_size < max_rx_pkt_len) {
1318 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
1319 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
1320 MRVL_PKT_EFFEC_OFFS,
1325 if (dev->data->rx_queues[idx]) {
1326 rte_free(dev->data->rx_queues[idx]);
1327 dev->data->rx_queues[idx] = NULL;
1330 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
1336 rxq->cksum_enabled = dev->data->dev_conf.rxmode.hw_ip_checksum;
1337 rxq->queue_id = idx;
1338 rxq->port_id = dev->data->port_id;
1339 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
1341 tc = priv->rxq_map[rxq->queue_id].tc,
1342 inq = priv->rxq_map[rxq->queue_id].inq;
1343 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
1346 ret = mrvl_fill_bpool(rxq, desc);
1352 priv->bpool_init_size += desc;
1354 dev->data->rx_queues[idx] = rxq;
1360 * DPDK callback to release the receive queue.
1363 * Generic receive queue pointer.
1366 mrvl_rx_queue_release(void *rxq)
1368 struct mrvl_rxq *q = rxq;
1369 struct pp2_ppio_tc_params *tc_params;
1370 int i, num, tc, inq;
1371 struct pp2_hif *hif;
1372 unsigned int core_id = rte_lcore_id();
1374 if (core_id == LCORE_ID_ANY)
1377 hif = mrvl_get_hif(q->priv, core_id);
1382 tc = q->priv->rxq_map[q->queue_id].tc;
1383 inq = q->priv->rxq_map[q->queue_id].inq;
1384 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
1385 num = tc_params->inqs_params[inq].size;
1386 for (i = 0; i < num; i++) {
1387 struct pp2_buff_inf inf;
1390 pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
1391 addr = cookie_addr_high | inf.cookie;
1392 rte_pktmbuf_free((struct rte_mbuf *)addr);
1399 * DPDK callback to configure the transmit queue.
1402 * Pointer to Ethernet device structure.
1404 * Transmit queue index.
1406 * Number of descriptors to configure in the queue.
1408 * NUMA socket on which memory must be allocated.
1410 * Thresholds parameters (unused).
1413 * 0 on success, negative error value otherwise.
1416 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1417 unsigned int socket,
1418 const struct rte_eth_txconf *conf __rte_unused)
1420 struct mrvl_priv *priv = dev->data->dev_private;
1421 struct mrvl_txq *txq;
1423 if (dev->data->tx_queues[idx]) {
1424 rte_free(dev->data->tx_queues[idx]);
1425 dev->data->tx_queues[idx] = NULL;
1428 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
1433 txq->queue_id = idx;
1434 txq->port_id = dev->data->port_id;
1435 dev->data->tx_queues[idx] = txq;
1437 priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
1438 priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
1444 * DPDK callback to release the transmit queue.
1447 * Generic transmit queue pointer.
1450 mrvl_tx_queue_release(void *txq)
1452 struct mrvl_txq *q = txq;
1461 * Update RSS hash configuration
1464 * Pointer to Ethernet device structure.
1466 * Pointer to RSS configuration.
1469 * 0 on success, negative error value otherwise.
1472 mrvl_rss_hash_update(struct rte_eth_dev *dev,
1473 struct rte_eth_rss_conf *rss_conf)
1475 struct mrvl_priv *priv = dev->data->dev_private;
1477 return mrvl_configure_rss(priv, rss_conf);
1481 * DPDK callback to get RSS hash configuration.
1484 * Pointer to Ethernet device structure.
1486 * Pointer to RSS configuration.
1492 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
1493 struct rte_eth_rss_conf *rss_conf)
1495 struct mrvl_priv *priv = dev->data->dev_private;
1496 enum pp2_ppio_hash_type hash_type =
1497 priv->ppio_params.inqs_params.hash_type;
1499 rss_conf->rss_key = NULL;
1501 if (hash_type == PP2_PPIO_HASH_T_NONE)
1502 rss_conf->rss_hf = 0;
1503 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
1504 rss_conf->rss_hf = ETH_RSS_IPV4;
1505 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
1506 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
1507 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
1508 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
1513 static const struct eth_dev_ops mrvl_ops = {
1514 .dev_configure = mrvl_dev_configure,
1515 .dev_start = mrvl_dev_start,
1516 .dev_stop = mrvl_dev_stop,
1517 .dev_set_link_up = mrvl_dev_set_link_up,
1518 .dev_set_link_down = mrvl_dev_set_link_down,
1519 .dev_close = mrvl_dev_close,
1520 .link_update = mrvl_link_update,
1521 .promiscuous_enable = mrvl_promiscuous_enable,
1522 .allmulticast_enable = mrvl_allmulticast_enable,
1523 .promiscuous_disable = mrvl_promiscuous_disable,
1524 .allmulticast_disable = mrvl_allmulticast_disable,
1525 .mac_addr_remove = mrvl_mac_addr_remove,
1526 .mac_addr_add = mrvl_mac_addr_add,
1527 .mac_addr_set = mrvl_mac_addr_set,
1528 .mtu_set = mrvl_mtu_set,
1529 .stats_get = mrvl_stats_get,
1530 .stats_reset = mrvl_stats_reset,
1531 .dev_infos_get = mrvl_dev_infos_get,
1532 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
1533 .rxq_info_get = mrvl_rxq_info_get,
1534 .txq_info_get = mrvl_txq_info_get,
1535 .vlan_filter_set = mrvl_vlan_filter_set,
1536 .rx_queue_setup = mrvl_rx_queue_setup,
1537 .rx_queue_release = mrvl_rx_queue_release,
1538 .tx_queue_setup = mrvl_tx_queue_setup,
1539 .tx_queue_release = mrvl_tx_queue_release,
1540 .rss_hash_update = mrvl_rss_hash_update,
1541 .rss_hash_conf_get = mrvl_rss_hash_conf_get,
1545 * Return packet type information and l3/l4 offsets.
1548 * Pointer to the received packet descriptor.
1555 * Packet type information.
1557 static inline uint64_t
1558 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
1559 uint8_t *l3_offset, uint8_t *l4_offset)
1561 enum pp2_inq_l3_type l3_type;
1562 enum pp2_inq_l4_type l4_type;
1563 uint64_t packet_type;
1565 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
1566 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
1568 packet_type = RTE_PTYPE_L2_ETHER;
1571 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
1572 packet_type |= RTE_PTYPE_L3_IPV4;
1574 case PP2_INQ_L3_TYPE_IPV4_OK:
1575 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
1577 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
1578 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
1580 case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
1581 packet_type |= RTE_PTYPE_L3_IPV6;
1583 case PP2_INQ_L3_TYPE_IPV6_EXT:
1584 packet_type |= RTE_PTYPE_L3_IPV6_EXT;
1586 case PP2_INQ_L3_TYPE_ARP:
1587 packet_type |= RTE_PTYPE_L2_ETHER_ARP;
1589 * In case of ARP l4_offset is set to wrong value.
1590 * Set it to proper one so that later on mbuf->l3_len can be
1591 * calculated subtracting l4_offset and l3_offset.
1593 *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
1596 RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n");
1601 case PP2_INQ_L4_TYPE_TCP:
1602 packet_type |= RTE_PTYPE_L4_TCP;
1604 case PP2_INQ_L4_TYPE_UDP:
1605 packet_type |= RTE_PTYPE_L4_UDP;
1608 RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n");
1616 * Get offload information from the received packet descriptor.
1619 * Pointer to the received packet descriptor.
1622 * Mbuf offload flags.
1624 static inline uint64_t
1625 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
1628 enum pp2_inq_desc_status status;
1630 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
1631 if (unlikely(status != PP2_DESC_ERR_OK))
1632 flags = PKT_RX_IP_CKSUM_BAD;
1634 flags = PKT_RX_IP_CKSUM_GOOD;
1636 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
1637 if (unlikely(status != PP2_DESC_ERR_OK))
1638 flags |= PKT_RX_L4_CKSUM_BAD;
1640 flags |= PKT_RX_L4_CKSUM_GOOD;
1646 * DPDK callback for receive.
1649 * Generic pointer to the receive queue.
1651 * Array to store received packets.
1653 * Maximum number of packets in array.
1656 * Number of packets successfully received.
1659 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1661 struct mrvl_rxq *q = rxq;
1662 struct pp2_ppio_desc descs[nb_pkts];
1663 struct pp2_bpool *bpool;
1664 int i, ret, rx_done = 0;
1666 struct pp2_hif *hif;
1667 unsigned int core_id = rte_lcore_id();
1669 hif = mrvl_get_hif(q->priv, core_id);
1671 if (unlikely(!q->priv->ppio || !hif))
1674 bpool = q->priv->bpool;
1676 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
1677 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
1678 if (unlikely(ret < 0)) {
1679 RTE_LOG(ERR, PMD, "Failed to receive packets\n");
1682 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
1684 for (i = 0; i < nb_pkts; i++) {
1685 struct rte_mbuf *mbuf;
1686 uint8_t l3_offset, l4_offset;
1687 enum pp2_inq_desc_status status;
1690 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1691 struct pp2_ppio_desc *pref_desc;
1694 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
1695 pref_addr = cookie_addr_high |
1696 pp2_ppio_inq_desc_get_cookie(pref_desc);
1697 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
1698 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
1701 addr = cookie_addr_high |
1702 pp2_ppio_inq_desc_get_cookie(&descs[i]);
1703 mbuf = (struct rte_mbuf *)addr;
1704 rte_pktmbuf_reset(mbuf);
1706 /* drop packet in case of mac, overrun or resource error */
1707 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
1708 if (unlikely(status != PP2_DESC_ERR_OK)) {
1709 struct pp2_buff_inf binf = {
1710 .addr = rte_mbuf_data_iova_default(mbuf),
1711 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
1714 pp2_bpool_put_buff(hif, bpool, &binf);
1715 mrvl_port_bpool_size
1716 [bpool->pp2_id][bpool->id][core_id]++;
1721 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
1722 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
1723 mbuf->data_len = mbuf->pkt_len;
1724 mbuf->port = q->port_id;
1726 mrvl_desc_to_packet_type_and_offset(&descs[i],
1729 mbuf->l2_len = l3_offset;
1730 mbuf->l3_len = l4_offset - l3_offset;
1732 if (likely(q->cksum_enabled))
1733 mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
1735 rx_pkts[rx_done++] = mbuf;
1736 q->bytes_recv += mbuf->pkt_len;
1739 if (rte_spinlock_trylock(&q->priv->lock) == 1) {
1740 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
1742 if (unlikely(num <= q->priv->bpool_min_size ||
1743 (!rx_done && num < q->priv->bpool_init_size))) {
1744 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
1746 RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
1747 } else if (unlikely(num > q->priv->bpool_max_size)) {
1749 int pkt_to_remove = num - q->priv->bpool_init_size;
1750 struct rte_mbuf *mbuf;
1751 struct pp2_buff_inf buff;
1754 "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
1755 bpool->pp2_id, q->priv->ppio->port_id,
1756 bpool->id, pkt_to_remove, num,
1757 q->priv->bpool_init_size);
1759 for (i = 0; i < pkt_to_remove; i++) {
1760 ret = pp2_bpool_get_buff(hif, bpool, &buff);
1763 mbuf = (struct rte_mbuf *)
1764 (cookie_addr_high | buff.cookie);
1765 rte_pktmbuf_free(mbuf);
1767 mrvl_port_bpool_size
1768 [bpool->pp2_id][bpool->id][core_id] -= i;
1770 rte_spinlock_unlock(&q->priv->lock);
1777 * Prepare offload information.
1781 * @param packet_type
1782 * Packet type bitfield.
1784 * Pointer to the pp2_ouq_l3_type structure.
1786 * Pointer to the pp2_outq_l4_type structure.
1787 * @param gen_l3_cksum
1788 * Will be set to 1 in case l3 checksum is computed.
1790 * Will be set to 1 in case l4 checksum is computed.
1793 * 0 on success, negative error value otherwise.
1796 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
1797 enum pp2_outq_l3_type *l3_type,
1798 enum pp2_outq_l4_type *l4_type,
1803 * Based on ol_flags prepare information
1804 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
1807 if (ol_flags & PKT_TX_IPV4) {
1808 *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
1809 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
1810 } else if (ol_flags & PKT_TX_IPV6) {
1811 *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
1812 /* no checksum for ipv6 header */
1815 /* if something different then stop processing */
1819 ol_flags &= PKT_TX_L4_MASK;
1820 if ((packet_type & RTE_PTYPE_L4_TCP) &&
1821 ol_flags == PKT_TX_TCP_CKSUM) {
1822 *l4_type = PP2_OUTQ_L4_TYPE_TCP;
1824 } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
1825 ol_flags == PKT_TX_UDP_CKSUM) {
1826 *l4_type = PP2_OUTQ_L4_TYPE_UDP;
1829 *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
1830 /* no checksum for other type */
1838 * Release already sent buffers to bpool (buffer-pool).
1841 * Pointer to the port structure.
1843 * Pointer to the MUSDK hardware interface.
1845 * Pointer to the shadow queue.
1849 * Force releasing packets.
1852 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
1853 unsigned int core_id, struct mrvl_shadow_txq *sq,
1856 struct buff_release_entry *entry;
1857 uint16_t nb_done = 0, num = 0, skip_bufs = 0;
1860 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
1862 sq->num_to_release += nb_done;
1864 if (likely(!force &&
1865 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
1868 nb_done = sq->num_to_release;
1869 sq->num_to_release = 0;
1871 for (i = 0; i < nb_done; i++) {
1872 entry = &sq->ent[sq->tail + num];
1873 if (unlikely(!entry->buff.addr)) {
1875 "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
1876 sq->tail, (u64)entry->buff.cookie,
1877 (u64)entry->buff.addr);
1882 if (unlikely(!entry->bpool)) {
1883 struct rte_mbuf *mbuf;
1885 mbuf = (struct rte_mbuf *)
1886 (cookie_addr_high | entry->buff.cookie);
1887 rte_pktmbuf_free(mbuf);
1892 mrvl_port_bpool_size
1893 [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
1895 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
1900 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1902 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1909 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1910 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1916 * DPDK callback for transmit.
1919 * Generic pointer transmit queue.
1921 * Packets to transmit.
1923 * Number of packets in array.
1926 * Number of packets successfully transmitted.
1929 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1931 struct mrvl_txq *q = txq;
1932 struct mrvl_shadow_txq *sq;
1933 struct pp2_hif *hif;
1934 struct pp2_ppio_desc descs[nb_pkts];
1935 unsigned int core_id = rte_lcore_id();
1936 int i, ret, bytes_sent = 0;
1937 uint16_t num, sq_free_size;
1940 hif = mrvl_get_hif(q->priv, core_id);
1941 sq = &q->shadow_txqs[core_id];
1943 if (unlikely(!q->priv->ppio || !hif))
1947 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
1948 sq, q->queue_id, 0);
1950 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
1951 if (unlikely(nb_pkts > sq_free_size)) {
1953 "No room in shadow queue for %d packets! %d packets will be sent.\n",
1954 nb_pkts, sq_free_size);
1955 nb_pkts = sq_free_size;
1958 for (i = 0; i < nb_pkts; i++) {
1959 struct rte_mbuf *mbuf = tx_pkts[i];
1960 int gen_l3_cksum, gen_l4_cksum;
1961 enum pp2_outq_l3_type l3_type;
1962 enum pp2_outq_l4_type l4_type;
1964 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1965 struct rte_mbuf *pref_pkt_hdr;
1967 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
1968 rte_mbuf_prefetch_part1(pref_pkt_hdr);
1969 rte_mbuf_prefetch_part2(pref_pkt_hdr);
1972 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
1973 sq->ent[sq->head].buff.addr =
1974 rte_mbuf_data_iova_default(mbuf);
1975 sq->ent[sq->head].bpool =
1976 (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
1977 NULL : mrvl_port_to_bpool_lookup[mbuf->port];
1978 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
1981 pp2_ppio_outq_desc_reset(&descs[i]);
1982 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
1983 rte_pktmbuf_iova(mbuf));
1984 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
1985 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
1986 rte_pktmbuf_pkt_len(mbuf));
1988 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
1990 * in case unsupported ol_flags were passed
1991 * do not update descriptor offload information
1993 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
1994 &l3_type, &l4_type, &gen_l3_cksum,
1999 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
2001 mbuf->l2_len + mbuf->l3_len,
2002 gen_l3_cksum, gen_l4_cksum);
2006 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
2007 /* number of packets that were not sent */
2008 if (unlikely(num > nb_pkts)) {
2009 for (i = nb_pkts; i < num; i++) {
2010 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
2011 MRVL_PP2_TX_SHADOWQ_MASK;
2012 addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
2014 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
2016 sq->size -= num - nb_pkts;
2019 q->bytes_sent += bytes_sent;
2025 * Initialize packet processor.
2028 * 0 on success, negative error value otherwise.
2033 struct pp2_init_params init_params;
2035 memset(&init_params, 0, sizeof(init_params));
2036 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
2037 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
2038 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
2040 return pp2_init(&init_params);
2044 * Deinitialize packet processor.
2047 * 0 on success, negative error value otherwise.
2050 mrvl_deinit_pp2(void)
2056 * Create private device structure.
2059 * Pointer to the port name passed in the initialization parameters.
2062 * Pointer to the newly allocated private device structure.
2064 static struct mrvl_priv *
2065 mrvl_priv_create(const char *dev_name)
2067 struct pp2_bpool_params bpool_params;
2068 char match[MRVL_MATCH_LEN];
2069 struct mrvl_priv *priv;
2072 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
2076 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
2077 &priv->pp_id, &priv->ppio_id);
2081 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
2082 PP2_BPOOL_NUM_POOLS);
2085 priv->bpool_bit = bpool_bit;
2087 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
2089 memset(&bpool_params, 0, sizeof(bpool_params));
2090 bpool_params.match = match;
2091 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
2092 ret = pp2_bpool_init(&bpool_params, &priv->bpool);
2094 goto out_clear_bpool_bit;
2096 priv->ppio_params.type = PP2_PPIO_T_NIC;
2097 rte_spinlock_init(&priv->lock);
2100 out_clear_bpool_bit:
2101 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2108 * Create device representing Ethernet port.
2111 * Pointer to the port's name.
2114 * 0 on success, negative error value otherwise.
2117 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
2119 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
2120 struct rte_eth_dev *eth_dev;
2121 struct mrvl_priv *priv;
2124 eth_dev = rte_eth_dev_allocate(name);
2128 priv = mrvl_priv_create(name);
2134 eth_dev->data->mac_addrs =
2135 rte_zmalloc("mac_addrs",
2136 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
2137 if (!eth_dev->data->mac_addrs) {
2138 RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
2143 memset(&req, 0, sizeof(req));
2144 strcpy(req.ifr_name, name);
2145 ret = ioctl(fd, SIOCGIFHWADDR, &req);
2149 memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
2150 req.ifr_addr.sa_data, ETHER_ADDR_LEN);
2152 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
2153 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
2154 eth_dev->data->kdrv = RTE_KDRV_NONE;
2155 eth_dev->data->dev_private = priv;
2156 eth_dev->device = &vdev->device;
2157 eth_dev->dev_ops = &mrvl_ops;
2161 rte_free(eth_dev->data->mac_addrs);
2163 rte_eth_dev_release_port(eth_dev);
2171 * Cleanup previously created device representing Ethernet port.
2174 * Pointer to the port name.
2177 mrvl_eth_dev_destroy(const char *name)
2179 struct rte_eth_dev *eth_dev;
2180 struct mrvl_priv *priv;
2182 eth_dev = rte_eth_dev_allocated(name);
2186 priv = eth_dev->data->dev_private;
2187 pp2_bpool_deinit(priv->bpool);
2188 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2190 rte_free(eth_dev->data->mac_addrs);
2191 rte_eth_dev_release_port(eth_dev);
2195 * Callback used by rte_kvargs_process() during argument parsing.
2198 * Pointer to the parsed key (unused).
2200 * Pointer to the parsed value.
2202 * Pointer to the extra arguments which contains address of the
2203 * table of pointers to parsed interface names.
2209 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
2212 struct mrvl_ifnames *ifnames = extra_args;
2214 ifnames->names[ifnames->idx++] = value;
2220 * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
2223 mrvl_deinit_hifs(void)
2227 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
2229 pp2_hif_deinit(hifs[i]);
2231 used_hifs = MRVL_MUSDK_HIFS_RESERVED;
2232 memset(hifs, 0, sizeof(hifs));
2236 * DPDK callback to register the virtual device.
2239 * Pointer to the virtual device.
2242 * 0 on success, negative error value otherwise.
2245 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
2247 struct rte_kvargs *kvlist;
2248 struct mrvl_ifnames ifnames;
2250 uint32_t i, ifnum, cfgnum;
2253 params = rte_vdev_device_args(vdev);
2257 kvlist = rte_kvargs_parse(params, valid_args);
2261 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
2262 if (ifnum > RTE_DIM(ifnames.names))
2263 goto out_free_kvlist;
2266 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
2267 mrvl_get_ifnames, &ifnames);
2271 * The below system initialization should be done only once,
2272 * on the first provided configuration file
2274 if (!mrvl_qos_cfg) {
2275 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
2276 RTE_LOG(INFO, PMD, "Parsing config file!\n");
2278 RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
2279 goto out_free_kvlist;
2280 } else if (cfgnum == 1) {
2281 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
2282 mrvl_get_qoscfg, &mrvl_qos_cfg);
2289 RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n");
2291 * ret == -EEXIST is correct, it means DMA
2292 * has been already initialized (by another PMD).
2294 ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
2297 goto out_free_kvlist;
2300 "DMA memory has been already initialized by a different driver.\n");
2303 ret = mrvl_init_pp2();
2305 RTE_LOG(ERR, PMD, "Failed to init PP!\n");
2306 goto out_deinit_dma;
2309 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
2311 mrvl_lcore_first = RTE_MAX_LCORE;
2312 mrvl_lcore_last = 0;
2315 for (i = 0; i < ifnum; i++) {
2316 RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]);
2317 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
2321 mrvl_dev_num += ifnum;
2323 rte_kvargs_free(kvlist);
2328 mrvl_eth_dev_destroy(ifnames.names[i]);
2330 if (mrvl_dev_num == 0)
2333 if (mrvl_dev_num == 0)
2334 mv_sys_dma_mem_destroy();
2336 rte_kvargs_free(kvlist);
2342 * DPDK callback to remove virtual device.
2345 * Pointer to the removed virtual device.
2348 * 0 on success, negative error value otherwise.
2351 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
2356 name = rte_vdev_device_name(vdev);
2360 RTE_LOG(INFO, PMD, "Removing %s\n", name);
2362 for (i = 0; i < rte_eth_dev_count(); i++) {
2363 char ifname[RTE_ETH_NAME_MAX_LEN];
2365 rte_eth_dev_get_name_by_port(i, ifname);
2366 mrvl_eth_dev_destroy(ifname);
2370 if (mrvl_dev_num == 0) {
2371 RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n");
2374 mv_sys_dma_mem_destroy();
2380 static struct rte_vdev_driver pmd_mrvl_drv = {
2381 .probe = rte_pmd_mrvl_probe,
2382 .remove = rte_pmd_mrvl_remove,
2385 RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
2386 RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);