2 Author: Jia Yu <jyu@vmware.com>
3 AuthorDate: Sun Aug 19 22:18:45 2018 -0700
4 Commit: Ferruh Yigit <ferruh.yigit@intel.com>
5 CommitDate: Tue Aug 28 15:27:39 2018 +0200
7 net/bonding: fix buffer corruption in packets
9 When bond slave devices cannot transmit all packets in bufs array,
10 tx_burst callback shall merge the un-transmitted packets back to
11 bufs array. Recent merge logic introduced a bug which causes
12 invalid mbuf addresses being written to bufs array.
13 When caller frees the un-transmitted packets, due to invalid addresses,
14 application will crash.
16 The fix is avoid shifting mbufs, and directly write un-transmitted
17 packets back to bufs array.
19 Fixes: 09150784a776 ("net/bonding: burst mode hash calculation")
22 Signed-off-by: Jia Yu <jyu@vmware.com>
23 Acked-by: Chas Williams <chas3@att.com>
25 diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
26 index 4417422..b84f322 100644
27 --- a/drivers/net/bonding/rte_eth_bond_pmd.c
28 +++ b/drivers/net/bonding/rte_eth_bond_pmd.c
29 @@ -301,10 +301,10 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
30 /* Mapping array generated by hash function to map mbufs to slaves */
31 uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
33 - uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
34 + uint16_t slave_tx_count;
35 uint16_t total_tx_count = 0, total_tx_fail_count = 0;
40 if (unlikely(nb_bufs == 0))
42 @@ -359,34 +359,12 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
44 /* If tx burst fails move packets to end of bufs */
45 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
46 - slave_tx_fail_count[i] = slave_nb_bufs[i] -
47 + int slave_tx_fail_count = slave_nb_bufs[i] -
49 - total_tx_fail_count += slave_tx_fail_count[i];
52 - * Shift bufs to beginning of array to allow reordering
55 - for (j = 0; j < slave_tx_fail_count[i]; j++) {
57 - slave_bufs[i][(slave_tx_count - 1) + j];
63 - * If there are tx burst failures we move packets to end of bufs to
64 - * preserve expected PMD behaviour of all failed transmitted being
65 - * at the end of the input mbuf array
67 - if (unlikely(total_tx_fail_count > 0)) {
68 - int bufs_idx = nb_bufs - total_tx_fail_count - 1;
70 - for (i = 0; i < slave_count; i++) {
71 - if (slave_tx_fail_count[i] > 0) {
72 - for (j = 0; j < slave_tx_fail_count[i]; j++)
73 - bufs[bufs_idx++] = slave_bufs[i][j];
75 + total_tx_fail_count += slave_tx_fail_count;
76 + memcpy(&bufs[nb_bufs - total_tx_fail_count],
77 + &slave_bufs[i][slave_tx_count],
78 + slave_tx_fail_count * sizeof(bufs[0]));
82 @@ -716,8 +694,8 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
83 tx_fail_total += tx_fail_slave;
85 memcpy(&bufs[nb_pkts - tx_fail_total],
86 - &slave_bufs[i][num_tx_slave],
87 - tx_fail_slave * sizeof(bufs[0]));
88 + &slave_bufs[i][num_tx_slave],
89 + tx_fail_slave * sizeof(bufs[0]));
91 num_tx_total += num_tx_slave;
93 @@ -1222,10 +1200,10 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
94 /* Mapping array generated by hash function to map mbufs to slaves */
95 uint16_t bufs_slave_port_idxs[nb_bufs];
97 - uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
98 + uint16_t slave_tx_count;
99 uint16_t total_tx_count = 0, total_tx_fail_count = 0;
104 if (unlikely(nb_bufs == 0))
106 @@ -1266,34 +1244,12 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
108 /* If tx burst fails move packets to end of bufs */
109 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
110 - slave_tx_fail_count[i] = slave_nb_bufs[i] -
111 + int slave_tx_fail_count = slave_nb_bufs[i] -
113 - total_tx_fail_count += slave_tx_fail_count[i];
116 - * Shift bufs to beginning of array to allow reordering
119 - for (j = 0; j < slave_tx_fail_count[i]; j++) {
121 - slave_bufs[i][(slave_tx_count - 1) + j];
127 - * If there are tx burst failures we move packets to end of bufs to
128 - * preserve expected PMD behaviour of all failed transmitted being
129 - * at the end of the input mbuf array
131 - if (unlikely(total_tx_fail_count > 0)) {
132 - int bufs_idx = nb_bufs - total_tx_fail_count - 1;
134 - for (i = 0; i < slave_count; i++) {
135 - if (slave_tx_fail_count[i] > 0) {
136 - for (j = 0; j < slave_tx_fail_count[i]; j++)
137 - bufs[bufs_idx++] = slave_bufs[i][j];
139 + total_tx_fail_count += slave_tx_fail_count;
140 + memcpy(&bufs[nb_bufs - total_tx_fail_count],
141 + &slave_bufs[i][slave_tx_count],
142 + slave_tx_fail_count * sizeof(bufs[0]));
146 @@ -1320,10 +1276,10 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
147 /* Mapping array generated by hash function to map mbufs to slaves */
148 uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
150 - uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
151 + uint16_t slave_tx_count;
152 uint16_t total_tx_count = 0, total_tx_fail_count = 0;
157 if (unlikely(nb_bufs == 0))
159 @@ -1381,39 +1337,13 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
161 /* If tx burst fails move packets to end of bufs */
162 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
163 - slave_tx_fail_count[i] = slave_nb_bufs[i] -
164 + int slave_tx_fail_count = slave_nb_bufs[i] -
166 - total_tx_fail_count += slave_tx_fail_count[i];
169 - * Shift bufs to beginning of array to allow
172 - for (j = 0; j < slave_tx_fail_count[i]; j++)
175 - [(slave_tx_count - 1)
179 + total_tx_fail_count += slave_tx_fail_count;
182 - * If there are tx burst failures we move packets to end of
183 - * bufs to preserve expected PMD behaviour of all failed
184 - * transmitted being at the end of the input mbuf array
186 - if (unlikely(total_tx_fail_count > 0)) {
187 - int bufs_idx = nb_bufs - total_tx_fail_count - 1;
189 - for (i = 0; i < slave_count; i++) {
190 - if (slave_tx_fail_count[i] > 0) {
192 - j < slave_tx_fail_count[i];
198 + memcpy(&bufs[nb_bufs - total_tx_fail_count],
199 + &slave_bufs[i][slave_tx_count],
200 + slave_tx_fail_count * sizeof(bufs[0]));