New upstream version 17.11.4
[deb_dpdk.git] / drivers / net / mlx5 / mlx5_rxtx.c
index 32bfa30..1bbce3b 100644 (file)
@@ -114,6 +114,14 @@ mlx5_set_ptype_table(void)
                     RTE_PTYPE_L4_TCP;
        (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
                     RTE_PTYPE_L4_TCP;
+       (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
        /* UDP */
        (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
                     RTE_PTYPE_L4_UDP;
@@ -132,6 +140,14 @@ mlx5_set_ptype_table(void)
                     RTE_PTYPE_L4_TCP;
        (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
                     RTE_PTYPE_L4_TCP;
+       (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
+       (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_L4_TCP;
        (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
                     RTE_PTYPE_L4_UDP;
        (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
@@ -169,12 +185,36 @@ mlx5_set_ptype_table(void)
        (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
        (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L4_TCP;
        (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
+       (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                    RTE_PTYPE_INNER_L4_TCP;
        /* Tunneled - UDP */
        (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
@@ -370,7 +410,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                unsigned int ds = 0;
                unsigned int sg = 0; /* counter of additional segs attached. */
                uintptr_t addr;
-               uint64_t naddr;
                uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
                uint16_t tso_header_sz = 0;
                uint16_t ehdr;
@@ -594,12 +633,12 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        ds = 3;
 use_dseg:
                        /* Add the remaining packet as a simple ds. */
-                       naddr = rte_cpu_to_be_64(addr);
+                       addr = rte_cpu_to_be_64(addr);
                        *dseg = (rte_v128u32_t){
                                rte_cpu_to_be_32(length),
                                mlx5_tx_mb2mr(txq, buf),
-                               naddr,
-                               naddr >> 32,
+                               addr,
+                               addr >> 32,
                        };
                        ++ds;
                        if (!segs_n)
@@ -633,12 +672,12 @@ next_seg:
                total_length += length;
 #endif
                /* Store segment information. */
-               naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
+               addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
                *dseg = (rte_v128u32_t){
                        rte_cpu_to_be_32(length),
                        mlx5_tx_mb2mr(txq, buf),
-                       naddr,
-                       naddr >> 32,
+                       addr,
+                       addr >> 32,
                };
                (*txq->elts)[++elts_head & elts_m] = buf;
                ++sg;
@@ -701,6 +740,8 @@ next_wqe:
        /* Check whether completion threshold has been reached. */
        comp = txq->elts_comp + i + j + k;
        if (comp >= MLX5_TX_COMP_THRESH) {
+               /* A CQE slot must always be available. */
+               assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
                /* Request completion on last WQE. */
                last_wqe->ctrl2 = rte_cpu_to_be_32(8);
                /* Save elts_head in unused "immediate" field of WQE. */
@@ -912,6 +953,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        if (comp >= MLX5_TX_COMP_THRESH) {
                volatile struct mlx5_wqe *wqe = mpw.wqe;
 
+               /* A CQE slot must always be available. */
+               assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
                /* Request completion on last WQE. */
                wqe->ctrl[2] = rte_cpu_to_be_32(8);
                /* Save elts_head in unused "immediate" field of WQE. */
@@ -1204,6 +1247,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
        if (comp >= MLX5_TX_COMP_THRESH) {
                volatile struct mlx5_wqe *wqe = mpw.wqe;
 
+               /* A CQE slot must always be available. */
+               assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
                /* Request completion on last WQE. */
                wqe->ctrl[2] = rte_cpu_to_be_32(8);
                /* Save elts_head in unused "immediate" field of WQE. */
@@ -1331,16 +1376,12 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        /* Start processing. */
        mlx5_tx_complete(txq);
        max_elts = (elts_n - (elts_head - txq->elts_tail));
-       /* A CQE slot must always be available. */
-       assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
        max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
        if (unlikely(!max_wqe))
                return 0;
        do {
                struct rte_mbuf *buf = *(pkts++);
                uintptr_t addr;
-               uint64_t naddr;
-               unsigned int n;
                unsigned int do_inline = 0; /* Whether inline is possible. */
                uint32_t length;
                unsigned int segs_n = buf->nb_segs;
@@ -1459,7 +1500,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                max_wqe--;
                        else
                                max_wqe -= 2;
-               } else if (do_inline) {
+               } else if (max_inline && do_inline) {
                        /* Inline packet into WQE. */
                        unsigned int max;
 
@@ -1517,16 +1558,13 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                        ((uintptr_t)mpw.data.raw +
                                         inl_pad);
                        (*txq->elts)[elts_head++ & elts_m] = buf;
-                       addr = rte_pktmbuf_mtod(buf, uintptr_t);
-                       for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
-                               rte_prefetch2((void *)(addr +
-                                               n * RTE_CACHE_LINE_SIZE));
-                       naddr = rte_cpu_to_be_64(addr);
+                       addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
+                                                                uintptr_t));
                        *dseg = (rte_v128u32_t) {
                                rte_cpu_to_be_32(length),
                                mlx5_tx_mb2mr(txq, buf),
-                               naddr,
-                               naddr >> 32,
+                               addr,
+                               addr >> 32,
                        };
                        mpw.data.raw = (volatile void *)(dseg + 1);
                        mpw.total_len += (inl_pad + sizeof(*dseg));
@@ -1550,13 +1588,14 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                         (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
                volatile struct mlx5_wqe *wqe = mpw.wqe;
 
+               /* A CQE slot must always be available. */
+               assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
                /* Request completion on last WQE. */
                wqe->ctrl[2] = rte_cpu_to_be_32(8);
                /* Save elts_head in unused "immediate" field of WQE. */
                wqe->ctrl[3] = elts_head;
                txq->elts_comp = 0;
                txq->mpw_comp = txq->wqe_ci;
-               txq->cq_pi++;
        } else {
                txq->elts_comp += j;
        }
@@ -1677,6 +1716,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                        return 0;
                ++rxq->cq_ci;
                op_own = cqe->op_own;
+               rte_io_rmb();
                if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
                        volatile struct mlx5_mini_cqe8 (*mc)[8] =
                                (volatile struct mlx5_mini_cqe8 (*)[8])
@@ -1934,11 +1974,10 @@ skip:
  *   Number of packets successfully transmitted (<= pkts_n).
  */
 uint16_t
-removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+removed_tx_burst(void *dpdk_txq __rte_unused,
+                struct rte_mbuf **pkts __rte_unused,
+                uint16_t pkts_n __rte_unused)
 {
-       (void)dpdk_txq;
-       (void)pkts;
-       (void)pkts_n;
        return 0;
 }
 
@@ -1959,11 +1998,10 @@ removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
  *   Number of packets successfully received (<= pkts_n).
  */
 uint16_t
-removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+removed_rx_burst(void *dpdk_txq __rte_unused,
+                struct rte_mbuf **pkts __rte_unused,
+                uint16_t pkts_n __rte_unused)
 {
-       (void)dpdk_rxq;
-       (void)pkts;
-       (void)pkts_n;
        return 0;
 }
 
@@ -1975,56 +2013,49 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
  */
 
 uint16_t __attribute__((weak))
-mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
+                     struct rte_mbuf **pkts __rte_unused,
+                     uint16_t pkts_n __rte_unused)
 {
-       (void)dpdk_txq;
-       (void)pkts;
-       (void)pkts_n;
        return 0;
 }
 
 uint16_t __attribute__((weak))
-mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
+                 struct rte_mbuf **pkts __rte_unused,
+                 uint16_t pkts_n __rte_unused)
 {
-       (void)dpdk_txq;
-       (void)pkts;
-       (void)pkts_n;
        return 0;
 }
 
 uint16_t __attribute__((weak))
-mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
+                 struct rte_mbuf **pkts __rte_unused,
+                 uint16_t pkts_n __rte_unused)
 {
-       (void)dpdk_rxq;
-       (void)pkts;
-       (void)pkts_n;
        return 0;
 }
 
 int __attribute__((weak))
-priv_check_raw_vec_tx_support(struct priv *priv)
+mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
 {
-       (void)priv;
        return -ENOTSUP;
 }
 
 int __attribute__((weak))
-priv_check_vec_tx_support(struct priv *priv)
+mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
 {
-       (void)priv;
        return -ENOTSUP;
 }
 
 int __attribute__((weak))
-rxq_check_vec_support(struct mlx5_rxq_data *rxq)
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
 {
-       (void)rxq;
        return -ENOTSUP;
 }
 
 int __attribute__((weak))
-priv_check_vec_rx_support(struct priv *priv)
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
 {
-       (void)priv;
        return -ENOTSUP;
 }