dpdk: net/bonding: fix buffer corruption in packets 07/16407/2
authorIgor Mikhailov (imichail) <imichail@cisco.com>
Fri, 7 Dec 2018 19:27:24 +0000 (11:27 -0800)
committerDamjan Marion <dmarion@me.com>
Wed, 12 Dec 2018 00:36:14 +0000 (00:36 +0000)
This fix is needed for setup with bonded interfaces.
In some scenarios vpp might crash, this change fixes the issue.

Change-Id: I0dc66e32b6c89dc3f8d552401833d6785a12c978
Signed-off-by: Igor Mikhailov (imichail) <imichail@cisco.com>
build/external/patches/dpdk_18.08/0006-net-bonding-fix-buffer-corruption-in-packets.patch [new file with mode: 0644]

diff --git a/build/external/patches/dpdk_18.08/0006-net-bonding-fix-buffer-corruption-in-packets.patch b/build/external/patches/dpdk_18.08/0006-net-bonding-fix-buffer-corruption-in-packets.patch
new file mode 100644 (file)
index 0000000..3a13a7d
--- /dev/null
@@ -0,0 +1,203 @@
+commit 6b2a47d
+Author:     Jia Yu <jyu@vmware.com>
+AuthorDate: Sun Aug 19 22:18:45 2018 -0700
+Commit:     Ferruh Yigit <ferruh.yigit@intel.com>
+CommitDate: Tue Aug 28 15:27:39 2018 +0200
+
+    net/bonding: fix buffer corruption in packets
+    
+    When bond slave devices cannot transmit all packets in bufs array,
+    tx_burst callback shall merge the un-transmitted packets back to
+    bufs array. Recent merge logic introduced a bug which causes
+    invalid mbuf addresses being written to bufs array.
+    When caller frees the un-transmitted packets, due to invalid addresses,
+    application will crash.
+    
+    The fix is avoid shifting mbufs, and directly write un-transmitted
+    packets back to bufs array.
+    
+    Fixes: 09150784a776 ("net/bonding: burst mode hash calculation")
+    Cc: stable@dpdk.org
+    
+    Signed-off-by: Jia Yu <jyu@vmware.com>
+    Acked-by: Chas Williams <chas3@att.com>
+
+diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
+index 4417422..b84f322 100644
+--- a/drivers/net/bonding/rte_eth_bond_pmd.c
++++ b/drivers/net/bonding/rte_eth_bond_pmd.c
+@@ -301,10 +301,10 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+       /* Mapping array generated by hash function to map mbufs to slaves */
+       uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
+-      uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
++      uint16_t slave_tx_count;
+       uint16_t total_tx_count = 0, total_tx_fail_count = 0;
+-      uint16_t i, j;
++      uint16_t i;
+       if (unlikely(nb_bufs == 0))
+               return 0;
+@@ -359,34 +359,12 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+               /* If tx burst fails move packets to end of bufs */
+               if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
+-                      slave_tx_fail_count[i] = slave_nb_bufs[i] -
++                      int slave_tx_fail_count = slave_nb_bufs[i] -
+                                       slave_tx_count;
+-                      total_tx_fail_count += slave_tx_fail_count[i];
+-
+-                      /*
+-                       * Shift bufs to beginning of array to allow reordering
+-                       * later
+-                       */
+-                      for (j = 0; j < slave_tx_fail_count[i]; j++) {
+-                              slave_bufs[i][j] =
+-                                      slave_bufs[i][(slave_tx_count - 1) + j];
+-                      }
+-              }
+-      }
+-
+-      /*
+-       * If there are tx burst failures we move packets to end of bufs to
+-       * preserve expected PMD behaviour of all failed transmitted being
+-       * at the end of the input mbuf array
+-       */
+-      if (unlikely(total_tx_fail_count > 0)) {
+-              int bufs_idx = nb_bufs - total_tx_fail_count - 1;
+-
+-              for (i = 0; i < slave_count; i++) {
+-                      if (slave_tx_fail_count[i] > 0) {
+-                              for (j = 0; j < slave_tx_fail_count[i]; j++)
+-                                      bufs[bufs_idx++] = slave_bufs[i][j];
+-                      }
++                      total_tx_fail_count += slave_tx_fail_count;
++                      memcpy(&bufs[nb_bufs - total_tx_fail_count],
++                             &slave_bufs[i][slave_tx_count],
++                             slave_tx_fail_count * sizeof(bufs[0]));
+               }
+       }
+@@ -716,8 +694,8 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
+                               tx_fail_total += tx_fail_slave;
+                               memcpy(&bufs[nb_pkts - tx_fail_total],
+-                                              &slave_bufs[i][num_tx_slave],
+-                                              tx_fail_slave * sizeof(bufs[0]));
++                                     &slave_bufs[i][num_tx_slave],
++                                     tx_fail_slave * sizeof(bufs[0]));
+                       }
+                       num_tx_total += num_tx_slave;
+               }
+@@ -1222,10 +1200,10 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
+       /* Mapping array generated by hash function to map mbufs to slaves */
+       uint16_t bufs_slave_port_idxs[nb_bufs];
+-      uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
++      uint16_t slave_tx_count;
+       uint16_t total_tx_count = 0, total_tx_fail_count = 0;
+-      uint16_t i, j;
++      uint16_t i;
+       if (unlikely(nb_bufs == 0))
+               return 0;
+@@ -1266,34 +1244,12 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
+               /* If tx burst fails move packets to end of bufs */
+               if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
+-                      slave_tx_fail_count[i] = slave_nb_bufs[i] -
++                      int slave_tx_fail_count = slave_nb_bufs[i] -
+                                       slave_tx_count;
+-                      total_tx_fail_count += slave_tx_fail_count[i];
+-
+-                      /*
+-                       * Shift bufs to beginning of array to allow reordering
+-                       * later
+-                       */
+-                      for (j = 0; j < slave_tx_fail_count[i]; j++) {
+-                              slave_bufs[i][j] =
+-                                      slave_bufs[i][(slave_tx_count - 1) + j];
+-                      }
+-              }
+-      }
+-
+-      /*
+-       * If there are tx burst failures we move packets to end of bufs to
+-       * preserve expected PMD behaviour of all failed transmitted being
+-       * at the end of the input mbuf array
+-       */
+-      if (unlikely(total_tx_fail_count > 0)) {
+-              int bufs_idx = nb_bufs - total_tx_fail_count - 1;
+-
+-              for (i = 0; i < slave_count; i++) {
+-                      if (slave_tx_fail_count[i] > 0) {
+-                              for (j = 0; j < slave_tx_fail_count[i]; j++)
+-                                      bufs[bufs_idx++] = slave_bufs[i][j];
+-                      }
++                      total_tx_fail_count += slave_tx_fail_count;
++                      memcpy(&bufs[nb_bufs - total_tx_fail_count],
++                             &slave_bufs[i][slave_tx_count],
++                             slave_tx_fail_count * sizeof(bufs[0]));
+               }
+       }
+@@ -1320,10 +1276,10 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+       /* Mapping array generated by hash function to map mbufs to slaves */
+       uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
+-      uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
++      uint16_t slave_tx_count;
+       uint16_t total_tx_count = 0, total_tx_fail_count = 0;
+-      uint16_t i, j;
++      uint16_t i;
+       if (unlikely(nb_bufs == 0))
+               return 0;
+@@ -1381,39 +1337,13 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+                       /* If tx burst fails move packets to end of bufs */
+                       if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
+-                              slave_tx_fail_count[i] = slave_nb_bufs[i] -
++                              int slave_tx_fail_count = slave_nb_bufs[i] -
+                                               slave_tx_count;
+-                              total_tx_fail_count += slave_tx_fail_count[i];
+-
+-                              /*
+-                               * Shift bufs to beginning of array to allow
+-                               * reordering later
+-                               */
+-                              for (j = 0; j < slave_tx_fail_count[i]; j++)
+-                                      slave_bufs[i][j] =
+-                                              slave_bufs[i]
+-                                                      [(slave_tx_count - 1)
+-                                                      + j];
+-                      }
+-              }
++                              total_tx_fail_count += slave_tx_fail_count;
+-              /*
+-               * If there are tx burst failures we move packets to end of
+-               * bufs to preserve expected PMD behaviour of all failed
+-               * transmitted being at the end of the input mbuf array
+-               */
+-              if (unlikely(total_tx_fail_count > 0)) {
+-                      int bufs_idx = nb_bufs - total_tx_fail_count - 1;
+-
+-                      for (i = 0; i < slave_count; i++) {
+-                              if (slave_tx_fail_count[i] > 0) {
+-                                      for (j = 0;
+-                                              j < slave_tx_fail_count[i];
+-                                              j++) {
+-                                              bufs[bufs_idx++] =
+-                                                      slave_bufs[i][j];
+-                                      }
+-                              }
++                              memcpy(&bufs[nb_bufs - total_tx_fail_count],
++                                     &slave_bufs[i][slave_tx_count],
++                                     slave_tx_fail_count * sizeof(bufs[0]));
+                       }
+               }
+       }