dpdk: add support for Mellanox ConnectX-5 devices
[vpp.git] / dpdk / dpdk-17.02_patches / 0006-net-mlx5-add-hardware-TSO-support.patch
1 From e25bad4a287924d26627ffe307f8a12824b87054 Mon Sep 17 00:00:00 2001
2 From: Shahaf Shuler <shahafs@mellanox.com>
3 Date: Thu, 2 Mar 2017 11:01:31 +0200
4 Subject: [PATCH] net/mlx5: add hardware TSO support
5
6 Implement support for hardware TSO.
7
8 Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
9 Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
10 ---
11  doc/guides/nics/features/mlx5.ini |   1 +
12  doc/guides/nics/mlx5.rst          |  12 ++++
13  drivers/net/mlx5/mlx5.c           |  18 ++++++
14  drivers/net/mlx5/mlx5.h           |   2 +
15  drivers/net/mlx5/mlx5_defs.h      |   3 +
16  drivers/net/mlx5/mlx5_ethdev.c    |   2 +
17  drivers/net/mlx5/mlx5_rxtx.c      | 123 +++++++++++++++++++++++++++++++++-----
18  drivers/net/mlx5/mlx5_rxtx.h      |   2 +
19  drivers/net/mlx5/mlx5_txq.c       |  13 ++++
20  9 files changed, 160 insertions(+), 16 deletions(-)
21
22 diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
23 index f20d214..8df25ce 100644
24 --- a/doc/guides/nics/features/mlx5.ini
25 +++ b/doc/guides/nics/features/mlx5.ini
26 @@ -11,6 +11,7 @@ Queue start/stop     = Y
27  MTU update           = Y
28  Jumbo frame          = Y
29  Scattered Rx         = Y
30 +TSO                  = Y
31  Promiscuous mode     = Y
32  Allmulticast mode    = Y
33  Unicast MAC filter   = Y
34 diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
35 index 5f6e594..9b0ba29 100644
36 --- a/doc/guides/nics/mlx5.rst
37 +++ b/doc/guides/nics/mlx5.rst
38 @@ -90,6 +90,7 @@ Features
39  - Secondary process TX is supported.
40  - KVM and VMware ESX SR-IOV modes are supported.
41  - RSS hash result is supported.
42 +- Hardware TSO.
43  
44  Limitations
45  -----------
46 @@ -186,9 +187,20 @@ Run-time configuration
47    save PCI bandwidth and improve performance at the cost of a slightly
48    higher CPU usage.
49  
50 +  This option cannot be used in conjunction with ``tso`` below. When ``tso``
51 +  is set, ``txq_mpw_en`` is disabled.
52 +
53    It is currently only supported on the ConnectX-4 Lx and ConnectX-5
54    families of adapters. Enabled by default.
55  
56 +- ``tso`` parameter [int]
57 +
58 +  A nonzero value enables hardware TSO.
59 +  When hardware TSO is enabled, packets marked with TCP segmentation
60 +  offload will be divided into segments by the hardware.
61 +
62 +  Disabled by default.
63 +
64  Prerequisites
65  -------------
66  
67 diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
68 index d4bd469..03ed3b3 100644
69 --- a/drivers/net/mlx5/mlx5.c
70 +++ b/drivers/net/mlx5/mlx5.c
71 @@ -84,6 +84,9 @@
72  /* Device parameter to enable multi-packet send WQEs. */
73  #define MLX5_TXQ_MPW_EN "txq_mpw_en"
74  
75 +/* Device parameter to enable hardware TSO offload. */
76 +#define MLX5_TSO "tso"
77 +
78  /**
79   * Retrieve integer value from environment variable.
80   *
81 @@ -290,6 +293,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
82                 priv->txqs_inline = tmp;
83         } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
84                 priv->mps &= !!tmp; /* Enable MPW only if HW supports */
85 +       } else if (strcmp(MLX5_TSO, key) == 0) {
86 +               priv->tso = !!tmp;
87         } else {
88                 WARN("%s: unknown parameter", key);
89                 return -EINVAL;
90 @@ -316,6 +321,7 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs)
91                 MLX5_TXQ_INLINE,
92                 MLX5_TXQS_MIN_INLINE,
93                 MLX5_TXQ_MPW_EN,
94 +               MLX5_TSO,
95                 NULL,
96         };
97         struct rte_kvargs *kvlist;
98 @@ -479,6 +485,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
99                         IBV_EXP_DEVICE_ATTR_RX_HASH |
100                         IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
101                         IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
102 +                       IBV_EXP_DEVICE_ATTR_TSO_CAPS |
103                         0;
104  
105                 DEBUG("using port %u (%08" PRIx32 ")", port, test);
106 @@ -580,11 +587,22 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
107  
108                 priv_get_num_vfs(priv, &num_vfs);
109                 priv->sriov = (num_vfs || sriov);
110 +               priv->tso = ((priv->tso) &&
111 +                           (exp_device_attr.tso_caps.max_tso > 0) &&
112 +                           (exp_device_attr.tso_caps.supported_qpts &
113 +                           (1 << IBV_QPT_RAW_ETH)));
114 +               if (priv->tso)
115 +                       priv->max_tso_payload_sz =
116 +                               exp_device_attr.tso_caps.max_tso;
117                 if (priv->mps && !mps) {
118                         ERROR("multi-packet send not supported on this device"
119                               " (" MLX5_TXQ_MPW_EN ")");
120                         err = ENOTSUP;
121                         goto port_error;
122 +               } else if (priv->mps && priv->tso) {
123 +                       WARN("multi-packet send not supported in conjunction "
124 +                             "with TSO. MPS disabled");
125 +                       priv->mps = 0;
126                 }
127                 /* Allocate and register default RSS hash keys. */
128                 priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
129 diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
130 index 4c4b9d4..93f129b 100644
131 --- a/drivers/net/mlx5/mlx5.h
132 +++ b/drivers/net/mlx5/mlx5.h
133 @@ -126,6 +126,8 @@ struct priv {
134         unsigned int mps:1; /* Whether multi-packet send is supported. */
135         unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
136         unsigned int pending_alarm:1; /* An alarm is pending. */
137 +       unsigned int tso:1; /* Whether TSO is supported. */
138 +       unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
139         unsigned int txq_inline; /* Maximum packet size for inlining. */
140         unsigned int txqs_inline; /* Queue number threshold for inlining. */
141         /* RX/TX queues. */
142 diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
143 index e91d245..eecb908 100644
144 --- a/drivers/net/mlx5/mlx5_defs.h
145 +++ b/drivers/net/mlx5/mlx5_defs.h
146 @@ -79,4 +79,7 @@
147  /* Maximum number of extended statistics counters. */
148  #define MLX5_MAX_XSTATS 32
149  
150 +/* Maximum Packet headers size (L2+L3+L4) for TSO. */
151 +#define MLX5_MAX_TSO_HEADER 128
152 +
153  #endif /* RTE_PMD_MLX5_DEFS_H_ */
154 diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
155 index 5677f03..5542193 100644
156 --- a/drivers/net/mlx5/mlx5_ethdev.c
157 +++ b/drivers/net/mlx5/mlx5_ethdev.c
158 @@ -693,6 +693,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
159                         (DEV_TX_OFFLOAD_IPV4_CKSUM |
160                          DEV_TX_OFFLOAD_UDP_CKSUM |
161                          DEV_TX_OFFLOAD_TCP_CKSUM);
162 +       if (priv->tso)
163 +               info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
164         if (priv_get_ifname(priv, &ifname) == 0)
165                 info->if_index = if_nametoindex(ifname);
166         /* FIXME: RETA update/query API expects the callee to know the size of
167 diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
168 index 4d5455b..98889f6 100644
169 --- a/drivers/net/mlx5/mlx5_rxtx.c
170 +++ b/drivers/net/mlx5/mlx5_rxtx.c
171 @@ -365,6 +365,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
172         const unsigned int elts_n = 1 << txq->elts_n;
173         unsigned int i = 0;
174         unsigned int j = 0;
175 +       unsigned int k = 0;
176         unsigned int max;
177         uint16_t max_wqe;
178         unsigned int comp;
179 @@ -392,8 +393,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
180                 uintptr_t addr;
181                 uint64_t naddr;
182                 uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
183 +               uint16_t tso_header_sz = 0;
184                 uint16_t ehdr;
185                 uint8_t cs_flags = 0;
186 +               uint64_t tso = 0;
187  #ifdef MLX5_PMD_SOFT_COUNTERS
188                 uint32_t total_length = 0;
189  #endif
190 @@ -465,14 +468,74 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
191                         length -= pkt_inline_sz;
192                         addr += pkt_inline_sz;
193                 }
194 +               if (txq->tso_en) {
195 +                       tso = buf->ol_flags & PKT_TX_TCP_SEG;
196 +                       if (tso) {
197 +                               uintptr_t end = (uintptr_t)
198 +                                               (((uintptr_t)txq->wqes) +
199 +                                               (1 << txq->wqe_n) *
200 +                                               MLX5_WQE_SIZE);
201 +                               unsigned int copy_b;
202 +                               uint8_t vlan_sz = (buf->ol_flags &
203 +                                                 PKT_TX_VLAN_PKT) ? 4 : 0;
204 +
205 +                               tso_header_sz = buf->l2_len + vlan_sz +
206 +                                               buf->l3_len + buf->l4_len;
207 +
208 +                               if (unlikely(tso_header_sz >
209 +                                            MLX5_MAX_TSO_HEADER))
210 +                                       break;
211 +                               copy_b = tso_header_sz - pkt_inline_sz;
212 +                               /* First seg must contain all headers. */
213 +                               assert(copy_b <= length);
214 +                               raw += MLX5_WQE_DWORD_SIZE;
215 +                               if (copy_b &&
216 +                                  ((end - (uintptr_t)raw) > copy_b)) {
217 +                                       uint16_t n = (MLX5_WQE_DS(copy_b) -
218 +                                                     1 + 3) / 4;
219 +
220 +                                       if (unlikely(max_wqe < n))
221 +                                               break;
222 +                                       max_wqe -= n;
223 +                                       rte_memcpy((void *)raw,
224 +                                                  (void *)addr, copy_b);
225 +                                       addr += copy_b;
226 +                                       length -= copy_b;
227 +                                       pkt_inline_sz += copy_b;
228 +                                       /*
229 +                                        * Another DWORD will be added
230 +                                        * in the inline part.
231 +                                        */
232 +                                       raw += MLX5_WQE_DS(copy_b) *
233 +                                              MLX5_WQE_DWORD_SIZE -
234 +                                              MLX5_WQE_DWORD_SIZE;
235 +                               } else {
236 +                                       /* NOP WQE. */
237 +                                       wqe->ctrl = (rte_v128u32_t){
238 +                                                    htonl(txq->wqe_ci << 8),
239 +                                                    htonl(txq->qp_num_8s | 1),
240 +                                                    0,
241 +                                                    0,
242 +                                       };
243 +                                       ds = 1;
244 +                                       total_length = 0;
245 +                                       pkts--;
246 +                                       pkts_n++;
247 +                                       elts_head = (elts_head - 1) &
248 +                                                   (elts_n - 1);
249 +                                       k++;
250 +                                       goto next_wqe;
251 +                               }
252 +                       }
253 +               }
254                 /* Inline if enough room. */
255 -               if (txq->max_inline) {
256 +               if (txq->inline_en || tso) {
257                         uintptr_t end = (uintptr_t)
258                                 (((uintptr_t)txq->wqes) +
259                                  (1 << txq->wqe_n) * MLX5_WQE_SIZE);
260                         unsigned int max_inline = txq->max_inline *
261                                                   RTE_CACHE_LINE_SIZE -
262 -                                                 MLX5_WQE_DWORD_SIZE;
263 +                                                 (pkt_inline_sz - 2);
264                         uintptr_t addr_end = (addr + max_inline) &
265                                              ~(RTE_CACHE_LINE_SIZE - 1);
266                         unsigned int copy_b = (addr_end > addr) ?
267 @@ -491,6 +554,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
268                                 if (unlikely(max_wqe < n))
269                                         break;
270                                 max_wqe -= n;
271 +                               if (tso) {
272 +                                       uint32_t inl =
273 +                                               htonl(copy_b | MLX5_INLINE_SEG);
274 +
275 +                                       pkt_inline_sz =
276 +                                               MLX5_WQE_DS(tso_header_sz) *
277 +                                               MLX5_WQE_DWORD_SIZE;
278 +                                       rte_memcpy((void *)raw,
279 +                                                  (void *)&inl, sizeof(inl));
280 +                                       raw += sizeof(inl);
281 +                                       pkt_inline_sz += sizeof(inl);
282 +                               }
283                                 rte_memcpy((void *)raw, (void *)addr, copy_b);
284                                 addr += copy_b;
285                                 length -= copy_b;
286 @@ -591,18 +666,34 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
287  next_pkt:
288                 ++i;
289                 /* Initialize known and common part of the WQE structure. */
290 -               wqe->ctrl = (rte_v128u32_t){
291 -                       htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
292 -                       htonl(txq->qp_num_8s | ds),
293 -                       0,
294 -                       0,
295 -               };
296 -               wqe->eseg = (rte_v128u32_t){
297 -                       0,
298 -                       cs_flags,
299 -                       0,
300 -                       (ehdr << 16) | htons(pkt_inline_sz),
301 -               };
302 +               if (tso) {
303 +                       wqe->ctrl = (rte_v128u32_t){
304 +                               htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
305 +                               htonl(txq->qp_num_8s | ds),
306 +                               0,
307 +                               0,
308 +                       };
309 +                       wqe->eseg = (rte_v128u32_t){
310 +                               0,
311 +                               cs_flags | (htons(buf->tso_segsz) << 16),
312 +                               0,
313 +                               (ehdr << 16) | htons(tso_header_sz),
314 +                       };
315 +               } else {
316 +                       wqe->ctrl = (rte_v128u32_t){
317 +                               htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
318 +                               htonl(txq->qp_num_8s | ds),
319 +                               0,
320 +                               0,
321 +                       };
322 +                       wqe->eseg = (rte_v128u32_t){
323 +                               0,
324 +                               cs_flags,
325 +                               0,
326 +                               (ehdr << 16) | htons(pkt_inline_sz),
327 +                       };
328 +               }
329 +next_wqe:
330                 txq->wqe_ci += (ds + 3) / 4;
331  #ifdef MLX5_PMD_SOFT_COUNTERS
332                 /* Increment sent bytes counter. */
333 @@ -610,10 +701,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
334  #endif
335         } while (pkts_n);
336         /* Take a shortcut if nothing must be sent. */
337 -       if (unlikely(i == 0))
338 +       if (unlikely((i + k) == 0))
339                 return 0;
340         /* Check whether completion threshold has been reached. */
341 -       comp = txq->elts_comp + i + j;
342 +       comp = txq->elts_comp + i + j + k;
343         if (comp >= MLX5_TX_COMP_THRESH) {
344                 volatile struct mlx5_wqe_ctrl *w =
345                         (volatile struct mlx5_wqe_ctrl *)wqe;
346 diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
347 index 41a34d7..6b328cf 100644
348 --- a/drivers/net/mlx5/mlx5_rxtx.h
349 +++ b/drivers/net/mlx5/mlx5_rxtx.h
350 @@ -254,6 +254,8 @@ struct txq {
351         uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
352         uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
353         uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
354 +       uint16_t inline_en:1; /* When set inline is enabled. */
355 +       uint16_t tso_en:1; /* When set hardware TSO is enabled. */
356         uint32_t qp_num_8s; /* QP number shifted by 8. */
357         volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
358         volatile void *wqes; /* Work queue (use volatile to write into). */
359 diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
360 index 949035b..995b763 100644
361 --- a/drivers/net/mlx5/mlx5_txq.c
362 +++ b/drivers/net/mlx5/mlx5_txq.c
363 @@ -342,6 +342,19 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
364                          RTE_CACHE_LINE_SIZE);
365                 attr.init.cap.max_inline_data =
366                         tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
367 +               tmpl.txq.inline_en = 1;
368 +       }
369 +       if (priv->tso) {
370 +               uint16_t max_tso_inline = ((MLX5_MAX_TSO_HEADER +
371 +                                          (RTE_CACHE_LINE_SIZE - 1)) /
372 +                                           RTE_CACHE_LINE_SIZE);
373 +
374 +               attr.init.max_tso_header =
375 +                       max_tso_inline * RTE_CACHE_LINE_SIZE;
376 +               attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
377 +               tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
378 +                                             max_tso_inline);
379 +               tmpl.txq.tso_en = 1;
380         }
381         tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
382         if (tmpl.qp == NULL) {
383 -- 
384 2.7.4
385