build: backporting a dpdk i40e TSO pkt exceeds buffer size patch 53/27453/5
authorSteven Luong <[email protected]>
Fri, 5 Jun 2020 13:33:25 +0000 (06:33 -0700)
committerAndrew Yourtchenko <[email protected]>
Wed, 1 Jul 2020 10:04:24 +0000 (10:04 +0000)
The subject patch appears to be critical. Backport it to DPDK_19.05

Type: fix

Signed-off-by: Steven Luong <[email protected]>
Change-Id: Ic25cb8c5798c3218f739c9dd5ce4d70da5782457

build/external/patches/dpdk_19.05/001-net-i40e-fix-TSO-pkt-exceeds-allowed-buf-size-issue.patch [new file with mode: 0644]

diff --git a/build/external/patches/dpdk_19.05/001-net-i40e-fix-TSO-pkt-exceeds-allowed-buf-size-issue.patch b/build/external/patches/dpdk_19.05/001-net-i40e-fix-TSO-pkt-exceeds-allowed-buf-size-issue.patch
new file mode 100644 (file)
index 0000000..105d60a
--- /dev/null
@@ -0,0 +1,99 @@
+From: Xiaoyun Li <[email protected]>
+Cc: Xiaoyun Li <[email protected]>, [email protected]
+Subject: [dpdk-stable] [PATCH v3] net/i40e: fix TSO pkt exceeds allowed buf size issue
+Date: Thu, 26 Dec 2019 14:45:44 +0800
+Message-ID: <[email protected]> (raw)
+In-Reply-To: <[email protected]>
+
+Hardware limits that max buffer size per tx descriptor should be
+(16K-1)B. So when TSO enabled, the mbuf data size may exceed the
+limit and cause malicious behavior to the NIC. This patch fixes
+this issue by using more tx descs for this kind of large buffer.
+
+Fixes: 4861cde46116 ("i40e: new poll mode driver")
+
+Signed-off-by: Xiaoyun Li <[email protected]>
+---
+v3:
+ * Reused the existing macros to define I40E_MAX_DATA_PER_TXD
+v2:
+ * Each pkt can have several segments so the needed tx descs should sum
+ * all segments up.
+---
+ drivers/net/i40e/i40e_rxtx.c | 45 +++++++++++++++++++++++++++++++++++-
+ 1 file changed, 44 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
+index 17dc8c78f..bbdba39b3 100644
+--- a/drivers/net/i40e/i40e_rxtx.c
++++ b/drivers/net/i40e/i40e_rxtx.c
+@@ -989,6 +989,24 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
+       return ctx_desc;
+ }
++/* HW requires that Tx buffer size ranges from 1B up to (16K-1)B. */
++#define I40E_MAX_DATA_PER_TXD \
++      (I40E_TXD_QW1_TX_BUF_SZ_MASK >> I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
++/* Calculate the number of TX descriptors needed for each pkt */
++static inline uint16_t
++i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt)
++{
++      struct rte_mbuf *txd = tx_pkt;
++      uint16_t count = 0;
++
++      while (txd != NULL) {
++              count += DIV_ROUND_UP(txd->data_len, I40E_MAX_DATA_PER_TXD);
++              txd = txd->next;
++      }
++
++      return count;
++}
++
+ uint16_t
+ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+ {
+@@ -1046,8 +1064,15 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+                * The number of descriptors that must be allocated for
+                * a packet equals to the number of the segments of that
+                * packet plus 1 context descriptor if needed.
++               * Recalculate the needed tx descs when TSO enabled in case
++               * the mbuf data size exceeds max data size that hw allows
++               * per tx desc.
+                */
+-              nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
++              if (ol_flags & PKT_TX_TCP_SEG)
++                      nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) +
++                                           nb_ctx);
++              else
++                      nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+               tx_last = (uint16_t)(tx_id + nb_used - 1);
+               /* Circular ring */
+@@ -1160,6 +1185,24 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+                       slen = m_seg->data_len;
+                       buf_dma_addr = rte_mbuf_data_iova(m_seg);
++                      while ((ol_flags & PKT_TX_TCP_SEG) &&
++                              unlikely(slen > I40E_MAX_DATA_PER_TXD)) {
++                              txd->buffer_addr =
++                                      rte_cpu_to_le_64(buf_dma_addr);
++                              txd->cmd_type_offset_bsz =
++                                      i40e_build_ctob(td_cmd,
++                                      td_offset, I40E_MAX_DATA_PER_TXD,
++                                      td_tag);
++
++                              buf_dma_addr += I40E_MAX_DATA_PER_TXD;
++                              slen -= I40E_MAX_DATA_PER_TXD;
++
++                              txe->last_id = tx_last;
++                              tx_id = txe->next_id;
++                              txe = txn;
++                              txd = &txr[tx_id];
++                              txn = &sw_ring[txe->next_id];
++                      }
+                       PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
+                               "buf_dma_addr: %#"PRIx64";\n"
+                               "td_cmd: %#x;\n"