1 From e25bad4a287924d26627ffe307f8a12824b87054 Mon Sep 17 00:00:00 2001
2 From: Shahaf Shuler <shahafs@mellanox.com>
3 Date: Thu, 2 Mar 2017 11:01:31 +0200
4 Subject: [PATCH] net/mlx5: add hardware TSO support
6 Implement support for hardware TSO.
8 Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
9 Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
11 doc/guides/nics/features/mlx5.ini | 1 +
12 doc/guides/nics/mlx5.rst | 12 ++++
13 drivers/net/mlx5/mlx5.c | 18 ++++++
14 drivers/net/mlx5/mlx5.h | 2 +
15 drivers/net/mlx5/mlx5_defs.h | 3 +
16 drivers/net/mlx5/mlx5_ethdev.c | 2 +
17 drivers/net/mlx5/mlx5_rxtx.c | 123 +++++++++++++++++++++++++++++++++-----
18 drivers/net/mlx5/mlx5_rxtx.h | 2 +
19 drivers/net/mlx5/mlx5_txq.c | 13 ++++
20 9 files changed, 160 insertions(+), 16 deletions(-)
22 diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
23 index f20d214..8df25ce 100644
24 --- a/doc/guides/nics/features/mlx5.ini
25 +++ b/doc/guides/nics/features/mlx5.ini
26 @@ -11,6 +11,7 @@ Queue start/stop = Y
33 Unicast MAC filter = Y
34 diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
35 index 5f6e594..9b0ba29 100644
36 --- a/doc/guides/nics/mlx5.rst
37 +++ b/doc/guides/nics/mlx5.rst
38 @@ -90,6 +90,7 @@ Features
39 - Secondary process TX is supported.
40 - KVM and VMware ESX SR-IOV modes are supported.
41 - RSS hash result is supported.
46 @@ -186,9 +187,20 @@ Run-time configuration
47 save PCI bandwidth and improve performance at the cost of a slightly
50 + This option cannot be used in conjunction with ``tso`` below. When ``tso``
51 + is set, ``txq_mpw_en`` is disabled.
53 It is currently only supported on the ConnectX-4 Lx and ConnectX-5
54 families of adapters. Enabled by default.
56 +- ``tso`` parameter [int]
58 + A nonzero value enables hardware TSO.
59 + When hardware TSO is enabled, packets marked with TCP segmentation
60 + offload will be divided into segments by the hardware.
62 + Disabled by default.
67 diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
68 index d4bd469..03ed3b3 100644
69 --- a/drivers/net/mlx5/mlx5.c
70 +++ b/drivers/net/mlx5/mlx5.c
72 /* Device parameter to enable multi-packet send WQEs. */
73 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
75 +/* Device parameter to enable hardware TSO offload. */
76 +#define MLX5_TSO "tso"
79 * Retrieve integer value from environment variable.
81 @@ -290,6 +293,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
82 priv->txqs_inline = tmp;
83 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
84 priv->mps &= !!tmp; /* Enable MPW only if HW supports */
85 + } else if (strcmp(MLX5_TSO, key) == 0) {
88 WARN("%s: unknown parameter", key);
90 @@ -316,6 +321,7 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs)
97 struct rte_kvargs *kvlist;
98 @@ -479,6 +485,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
99 IBV_EXP_DEVICE_ATTR_RX_HASH |
100 IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
101 IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
102 + IBV_EXP_DEVICE_ATTR_TSO_CAPS |
105 DEBUG("using port %u (%08" PRIx32 ")", port, test);
106 @@ -580,11 +587,22 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
108 priv_get_num_vfs(priv, &num_vfs);
109 priv->sriov = (num_vfs || sriov);
110 + priv->tso = ((priv->tso) &&
111 + (exp_device_attr.tso_caps.max_tso > 0) &&
112 + (exp_device_attr.tso_caps.supported_qpts &
113 + (1 << IBV_QPT_RAW_ETH)));
115 + priv->max_tso_payload_sz =
116 + exp_device_attr.tso_caps.max_tso;
117 if (priv->mps && !mps) {
118 ERROR("multi-packet send not supported on this device"
119 " (" MLX5_TXQ_MPW_EN ")");
122 + } else if (priv->mps && priv->tso) {
123 + WARN("multi-packet send not supported in conjunction "
124 + "with TSO. MPS disabled");
127 /* Allocate and register default RSS hash keys. */
128 priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
129 diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
130 index 4c4b9d4..93f129b 100644
131 --- a/drivers/net/mlx5/mlx5.h
132 +++ b/drivers/net/mlx5/mlx5.h
133 @@ -126,6 +126,8 @@ struct priv {
134 unsigned int mps:1; /* Whether multi-packet send is supported. */
135 unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
136 unsigned int pending_alarm:1; /* An alarm is pending. */
137 + unsigned int tso:1; /* Whether TSO is supported. */
138 + unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
139 unsigned int txq_inline; /* Maximum packet size for inlining. */
140 unsigned int txqs_inline; /* Queue number threshold for inlining. */
142 diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
143 index e91d245..eecb908 100644
144 --- a/drivers/net/mlx5/mlx5_defs.h
145 +++ b/drivers/net/mlx5/mlx5_defs.h
147 /* Maximum number of extended statistics counters. */
148 #define MLX5_MAX_XSTATS 32
150 +/* Maximum Packet headers size (L2+L3+L4) for TSO. */
151 +#define MLX5_MAX_TSO_HEADER 128
153 #endif /* RTE_PMD_MLX5_DEFS_H_ */
154 diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
155 index 5677f03..5542193 100644
156 --- a/drivers/net/mlx5/mlx5_ethdev.c
157 +++ b/drivers/net/mlx5/mlx5_ethdev.c
158 @@ -693,6 +693,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
159 (DEV_TX_OFFLOAD_IPV4_CKSUM |
160 DEV_TX_OFFLOAD_UDP_CKSUM |
161 DEV_TX_OFFLOAD_TCP_CKSUM);
163 + info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
164 if (priv_get_ifname(priv, &ifname) == 0)
165 info->if_index = if_nametoindex(ifname);
166 /* FIXME: RETA update/query API expects the callee to know the size of
167 diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
168 index 4d5455b..98889f6 100644
169 --- a/drivers/net/mlx5/mlx5_rxtx.c
170 +++ b/drivers/net/mlx5/mlx5_rxtx.c
171 @@ -365,6 +365,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
172 const unsigned int elts_n = 1 << txq->elts_n;
175 + unsigned int k = 0;
179 @@ -392,8 +393,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
182 uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
183 + uint16_t tso_header_sz = 0;
185 uint8_t cs_flags = 0;
187 #ifdef MLX5_PMD_SOFT_COUNTERS
188 uint32_t total_length = 0;
190 @@ -465,14 +468,74 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
191 length -= pkt_inline_sz;
192 addr += pkt_inline_sz;
195 + tso = buf->ol_flags & PKT_TX_TCP_SEG;
197 + uintptr_t end = (uintptr_t)
198 + (((uintptr_t)txq->wqes) +
199 + (1 << txq->wqe_n) *
201 + unsigned int copy_b;
202 + uint8_t vlan_sz = (buf->ol_flags &
203 + PKT_TX_VLAN_PKT) ? 4 : 0;
205 + tso_header_sz = buf->l2_len + vlan_sz +
206 + buf->l3_len + buf->l4_len;
208 + if (unlikely(tso_header_sz >
209 + MLX5_MAX_TSO_HEADER))
211 + copy_b = tso_header_sz - pkt_inline_sz;
212 + /* First seg must contain all headers. */
213 + assert(copy_b <= length);
214 + raw += MLX5_WQE_DWORD_SIZE;
216 + ((end - (uintptr_t)raw) > copy_b)) {
217 + uint16_t n = (MLX5_WQE_DS(copy_b) -
220 + if (unlikely(max_wqe < n))
223 + rte_memcpy((void *)raw,
224 + (void *)addr, copy_b);
227 + pkt_inline_sz += copy_b;
229 + * Another DWORD will be added
230 + * in the inline part.
232 + raw += MLX5_WQE_DS(copy_b) *
233 + MLX5_WQE_DWORD_SIZE -
234 + MLX5_WQE_DWORD_SIZE;
237 + wqe->ctrl = (rte_v128u32_t){
238 + htonl(txq->wqe_ci << 8),
239 + htonl(txq->qp_num_8s | 1),
247 + elts_head = (elts_head - 1) &
254 /* Inline if enough room. */
255 - if (txq->max_inline) {
256 + if (txq->inline_en || tso) {
257 uintptr_t end = (uintptr_t)
258 (((uintptr_t)txq->wqes) +
259 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
260 unsigned int max_inline = txq->max_inline *
261 RTE_CACHE_LINE_SIZE -
262 - MLX5_WQE_DWORD_SIZE;
263 + (pkt_inline_sz - 2);
264 uintptr_t addr_end = (addr + max_inline) &
265 ~(RTE_CACHE_LINE_SIZE - 1);
266 unsigned int copy_b = (addr_end > addr) ?
267 @@ -491,6 +554,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
268 if (unlikely(max_wqe < n))
273 + htonl(copy_b | MLX5_INLINE_SEG);
276 + MLX5_WQE_DS(tso_header_sz) *
277 + MLX5_WQE_DWORD_SIZE;
278 + rte_memcpy((void *)raw,
279 + (void *)&inl, sizeof(inl));
280 + raw += sizeof(inl);
281 + pkt_inline_sz += sizeof(inl);
283 rte_memcpy((void *)raw, (void *)addr, copy_b);
286 @@ -591,18 +666,34 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
289 /* Initialize known and common part of the WQE structure. */
290 - wqe->ctrl = (rte_v128u32_t){
291 - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
292 - htonl(txq->qp_num_8s | ds),
296 - wqe->eseg = (rte_v128u32_t){
300 - (ehdr << 16) | htons(pkt_inline_sz),
303 + wqe->ctrl = (rte_v128u32_t){
304 + htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
305 + htonl(txq->qp_num_8s | ds),
309 + wqe->eseg = (rte_v128u32_t){
311 + cs_flags | (htons(buf->tso_segsz) << 16),
313 + (ehdr << 16) | htons(tso_header_sz),
316 + wqe->ctrl = (rte_v128u32_t){
317 + htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
318 + htonl(txq->qp_num_8s | ds),
322 + wqe->eseg = (rte_v128u32_t){
326 + (ehdr << 16) | htons(pkt_inline_sz),
330 txq->wqe_ci += (ds + 3) / 4;
331 #ifdef MLX5_PMD_SOFT_COUNTERS
332 /* Increment sent bytes counter. */
333 @@ -610,10 +701,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
336 /* Take a shortcut if nothing must be sent. */
337 - if (unlikely(i == 0))
338 + if (unlikely((i + k) == 0))
340 /* Check whether completion threshold has been reached. */
341 - comp = txq->elts_comp + i + j;
342 + comp = txq->elts_comp + i + j + k;
343 if (comp >= MLX5_TX_COMP_THRESH) {
344 volatile struct mlx5_wqe_ctrl *w =
345 (volatile struct mlx5_wqe_ctrl *)wqe;
346 diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
347 index 41a34d7..6b328cf 100644
348 --- a/drivers/net/mlx5/mlx5_rxtx.h
349 +++ b/drivers/net/mlx5/mlx5_rxtx.h
350 @@ -254,6 +254,8 @@ struct txq {
351 uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
352 uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
353 uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
354 + uint16_t inline_en:1; /* When set inline is enabled. */
355 + uint16_t tso_en:1; /* When set hardware TSO is enabled. */
356 uint32_t qp_num_8s; /* QP number shifted by 8. */
357 volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
358 volatile void *wqes; /* Work queue (use volatile to write into). */
359 diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
360 index 949035b..995b763 100644
361 --- a/drivers/net/mlx5/mlx5_txq.c
362 +++ b/drivers/net/mlx5/mlx5_txq.c
363 @@ -342,6 +342,19 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
364 RTE_CACHE_LINE_SIZE);
365 attr.init.cap.max_inline_data =
366 tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
367 + tmpl.txq.inline_en = 1;
370 + uint16_t max_tso_inline = ((MLX5_MAX_TSO_HEADER +
371 + (RTE_CACHE_LINE_SIZE - 1)) /
372 + RTE_CACHE_LINE_SIZE);
374 + attr.init.max_tso_header =
375 + max_tso_inline * RTE_CACHE_LINE_SIZE;
376 + attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
377 + tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
379 + tmpl.txq.tso_en = 1;
381 tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
382 if (tmpl.qp == NULL) {