#define TX_CONS(txq) (txq->sw_tx_cons & NUM_TX_BDS(txq))
#define TX_PROD(txq) (txq->sw_tx_prod & NUM_TX_BDS(txq))
-/* Number of TX BDs per packet used currently */
-#define MAX_NUM_TX_BDS 1
-
#define QEDE_DEFAULT_TX_FREE_THRESH 32
#define QEDE_CSUM_ERROR (1 << 0)
((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+#define QEDE_MIN_RX_BUFF_SIZE (1024)
+#define QEDE_VLAN_TAG_SIZE (4)
+#define QEDE_LLC_SNAP_HDR_LEN (8)
+
/* Max supported alignment is 256 (8 shift)
* minimal alignment shift 6 is optimal for 57xxx HW performance
*/
#define QEDE_L1_CACHE_SHIFT 6
#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+ ~(QEDE_FW_RX_ALIGN_END - 1))
+#define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \
+ QEDE_FW_RX_ALIGN_END)
-#define QEDE_ETH_OVERHEAD (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional,
+ * +2 is for padding in front of L2 header
+ */
+#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN) + 2)
-/* TBD: Excluding IPV6 */
-#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP)
+#define QEDE_MAX_ETHER_HDR_LEN (ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
+
+#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
+ ETH_RSS_NONFRAG_IPV4_TCP |\
+ ETH_RSS_NONFRAG_IPV4_UDP |\
+ ETH_RSS_IPV6 |\
+ ETH_RSS_NONFRAG_IPV6_TCP |\
+ ETH_RSS_NONFRAG_IPV6_UDP |\
+ ETH_RSS_VXLAN)
#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
-#define MAX_NUM_TC 8
+#define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++)
+#define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++)
+#define QEDE_RXTX_MAX(qdev) \
+ (RTE_MAX(QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)))
+
+/* Macros for non-tunnel packet types lkup table */
+#define QEDE_PKT_TYPE_UNKNOWN 0x0
+#define QEDE_PKT_TYPE_MAX 0x3f
+
+#define QEDE_PKT_TYPE_IPV4 0x1
+#define QEDE_PKT_TYPE_IPV6 0x2
+#define QEDE_PKT_TYPE_IPV4_TCP 0x5
+#define QEDE_PKT_TYPE_IPV6_TCP 0x6
+#define QEDE_PKT_TYPE_IPV4_UDP 0x9
+#define QEDE_PKT_TYPE_IPV6_UDP 0xa
+
+/* For frag pkts, corresponding IP bits is set */
+#define QEDE_PKT_TYPE_IPV4_FRAG 0x11
+#define QEDE_PKT_TYPE_IPV6_FRAG 0x12
+
+#define QEDE_PKT_TYPE_IPV4_VLAN 0x21
+#define QEDE_PKT_TYPE_IPV6_VLAN 0x22
+#define QEDE_PKT_TYPE_IPV4_TCP_VLAN 0x25
+#define QEDE_PKT_TYPE_IPV6_TCP_VLAN 0x26
+#define QEDE_PKT_TYPE_IPV4_UDP_VLAN 0x29
+#define QEDE_PKT_TYPE_IPV6_UDP_VLAN 0x2a
+
+#define QEDE_PKT_TYPE_IPV4_VLAN_FRAG 0x31
+#define QEDE_PKT_TYPE_IPV6_VLAN_FRAG 0x32
+
+/* Macros for tunneled packets with next protocol lkup table */
+#define QEDE_PKT_TYPE_TUNN_GENEVE 0x1
+#define QEDE_PKT_TYPE_TUNN_GRE 0x2
+#define QEDE_PKT_TYPE_TUNN_VXLAN 0x3
+
+/* Bit 2 is don't care bit */
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE 0x9
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE 0xa
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN 0xb
+
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE 0xd
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE 0xe
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN 0xf
+
+
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE 0x11
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE 0x12
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN 0x13
+
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE 0x15
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE 0x16
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN 0x17
+
+
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE 0x19
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE 0x1a
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN 0x1b
-#define for_each_rss(i) for (i = 0; i < qdev->num_rss; i++)
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE 0x1d
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE 0x1e
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN 0x1f
+
+#define QEDE_PKT_TYPE_TUNN_MAX_TYPE 0x20 /* 2^5 */
+
+#define QEDE_TX_CSUM_OFFLOAD_MASK (PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_CKSUM | \
+ PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_IPV4 | \
+ PKT_TX_IPV6)
+
+#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_MASK)
+
+#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
/*
* RX BD descriptor ring
/* allows expansion .. */
};
+/* TPA related structures */
+struct qede_agg_info {
+ struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */
+ struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */
+};
+
/*
* Structure associated with each RX queue.
*/
uint16_t *hw_cons_ptr;
void OSAL_IOMEM *hw_rxq_prod_addr;
struct qede_rx_entry *sw_rx_ring;
+ struct ecore_sb_info *sb_info;
uint16_t sw_rx_cons;
uint16_t sw_rx_prod;
uint16_t nb_rx_desc;
uint16_t queue_id;
uint16_t port_id;
uint16_t rx_buf_size;
+ uint64_t rcv_pkts;
+ uint64_t rx_segs;
uint64_t rx_hw_errors;
uint64_t rx_alloc_errors;
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
struct qede_dev *qdev;
+ void *handle;
};
/*
void OSAL_IOMEM *doorbell_addr;
volatile union db_prod tx_db;
uint16_t port_id;
- uint64_t txq_counter;
+ uint64_t xmit_pkts;
+ bool is_legacy;
struct qede_dev *qdev;
+ void *handle;
};
struct qede_fastpath {
- struct qede_dev *qdev;
- uint8_t rss_id;
struct ecore_sb_info *sb_info;
struct qede_rx_queue *rxq;
- struct qede_tx_queue *txqs[MAX_NUM_TC];
- char name[80];
+ struct qede_tx_queue *txq;
};
/*
void qede_tx_queue_release(void *tx_queue);
-int qede_dev_start(struct rte_eth_dev *eth_dev);
+uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
-void qede_dev_stop(struct rte_eth_dev *eth_dev);
+uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
-void qede_reset_fp_rings(struct qede_dev *qdev);
+uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
-void qede_free_fp_arrays(struct qede_dev *qdev);
+uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
+ struct rte_mbuf **pkts,
+ uint16_t nb_pkts);
-void qede_free_mem_load(struct qede_dev *qdev);
+int qede_start_queues(struct rte_eth_dev *eth_dev);
-uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+void qede_stop_queues(struct rte_eth_dev *eth_dev);
+int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size);
-uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+/* Fastpath resource alloc/dealloc helpers */
+int qede_alloc_fp_resc(struct qede_dev *qdev);
+
+void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev);
#endif /* _QEDE_RXTX_H_ */