New upstream version 18.02
[deb_dpdk.git] / drivers / net / enic / enic_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_net.h>
9 #include <rte_prefetch.h>
10
11 #include "enic_compat.h"
12 #include "rq_enet_desc.h"
13 #include "enic.h"
14 #include <rte_ether.h>
15 #include <rte_ip.h>
16 #include <rte_tcp.h>
17
18 #define ENIC_TX_OFFLOAD_MASK (                   \
19                 PKT_TX_VLAN_PKT |                \
20                 PKT_TX_IP_CKSUM |                \
21                 PKT_TX_L4_MASK |                 \
22                 PKT_TX_TCP_SEG)
23
24 #define ENIC_TX_OFFLOAD_NOTSUP_MASK \
25         (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK)
26
27 #define RTE_PMD_USE_PREFETCH
28
29 #ifdef RTE_PMD_USE_PREFETCH
30 /*Prefetch a cache line into all cache levels. */
31 #define rte_enic_prefetch(p) rte_prefetch0(p)
32 #else
33 #define rte_enic_prefetch(p) do {} while (0)
34 #endif
35
36 #ifdef RTE_PMD_PACKET_PREFETCH
37 #define rte_packet_prefetch(p) rte_prefetch1(p)
38 #else
39 #define rte_packet_prefetch(p) do {} while (0)
40 #endif
41
42 static inline uint16_t
43 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
44 {
45         return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
46 }
47
48 static inline uint16_t
49 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
50 {
51         return le16_to_cpu(crd->bytes_written_flags) &
52                            ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
53 }
54
55 static inline uint8_t
56 enic_cq_rx_desc_packet_error(uint16_t bwflags)
57 {
58         return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
59                 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
60 }
61
62 static inline uint8_t
63 enic_cq_rx_desc_eop(uint16_t ciflags)
64 {
65         return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
66                 == CQ_ENET_RQ_DESC_FLAGS_EOP;
67 }
68
69 static inline uint8_t
70 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
71 {
72         return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
73                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
74                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
75 }
76
77 static inline uint8_t
78 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
79 {
80         return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
81                 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
82 }
83
84 static inline uint8_t
85 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
86 {
87         return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
88                 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
89 }
90
91 static inline uint8_t
92 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
93 {
94         return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
95                 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
96 }
97
98 static inline uint32_t
99 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
100 {
101         return le32_to_cpu(cqrd->rss_hash);
102 }
103
104 static inline uint16_t
105 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
106 {
107         return le16_to_cpu(cqrd->vlan);
108 }
109
110 static inline uint16_t
111 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
112 {
113         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
114         return le16_to_cpu(cqrd->bytes_written_flags) &
115                 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
116 }
117
118
119 static inline uint8_t
120 enic_cq_rx_check_err(struct cq_desc *cqd)
121 {
122         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
123         uint16_t bwflags;
124
125         bwflags = enic_cq_rx_desc_bwflags(cqrd);
126         if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
127                 return 1;
128         return 0;
129 }
130
131 /* Lookup table to translate RX CQ flags to mbuf flags. */
132 static inline uint32_t
133 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
134 {
135         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
136         uint8_t cqrd_flags = cqrd->flags;
137         static const uint32_t cq_type_table[128] __rte_cache_aligned = {
138                 [0x00] = RTE_PTYPE_UNKNOWN,
139                 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
140                 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
141                 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
142                 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
143                 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
144                 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
145                 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
146                 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
147                 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
148                 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
149                 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
150                 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
151                 /* All others reserved */
152         };
153         cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
154                 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
155                 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
156         return cq_type_table[cqrd_flags];
157 }
158
159 static inline void
160 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
161 {
162         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
163         uint16_t bwflags, pkt_flags = 0, vlan_tci;
164         bwflags = enic_cq_rx_desc_bwflags(cqrd);
165         vlan_tci = enic_cq_rx_desc_vlan(cqrd);
166
167         /* VLAN STRIPPED flag. The L2 packet type updated here also */
168         if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
169                 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
170                 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
171         } else {
172                 if (vlan_tci != 0)
173                         mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
174                 else
175                         mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
176         }
177         mbuf->vlan_tci = vlan_tci;
178
179         if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
180                 struct cq_enet_rq_clsf_desc *clsf_cqd;
181                 uint16_t filter_id;
182                 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
183                 filter_id = clsf_cqd->filter_id;
184                 if (filter_id) {
185                         pkt_flags |= PKT_RX_FDIR;
186                         if (filter_id != ENIC_MAGIC_FILTER_ID) {
187                                 mbuf->hash.fdir.hi = clsf_cqd->filter_id;
188                                 pkt_flags |= PKT_RX_FDIR_ID;
189                         }
190                 }
191         } else if (enic_cq_rx_desc_rss_type(cqrd)) {
192                 /* RSS flag */
193                 pkt_flags |= PKT_RX_RSS_HASH;
194                 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
195         }
196
197         /* checksum flags */
198         if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
199                 if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
200                         uint32_t l4_flags;
201                         l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
202
203                         if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
204                                 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
205                         else if (mbuf->packet_type & RTE_PTYPE_L3_IPV4)
206                                 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
207
208                         if (l4_flags == RTE_PTYPE_L4_UDP ||
209                             l4_flags == RTE_PTYPE_L4_TCP) {
210                                 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
211                                         pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
212                                 else
213                                         pkt_flags |= PKT_RX_L4_CKSUM_BAD;
214                         }
215                 }
216         }
217
218         mbuf->ol_flags = pkt_flags;
219 }
220
221 /* dummy receive function to replace actual function in
222  * order to do safe reconfiguration operations.
223  */
224 uint16_t
225 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
226                      __rte_unused struct rte_mbuf **rx_pkts,
227                      __rte_unused uint16_t nb_pkts)
228 {
229         return 0;
230 }
231
232 uint16_t
233 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
234                uint16_t nb_pkts)
235 {
236         struct vnic_rq *sop_rq = rx_queue;
237         struct vnic_rq *data_rq;
238         struct vnic_rq *rq;
239         struct enic *enic = vnic_dev_priv(sop_rq->vdev);
240         uint16_t cq_idx;
241         uint16_t rq_idx;
242         uint16_t rq_num;
243         struct rte_mbuf *nmb, *rxmb;
244         uint16_t nb_rx = 0;
245         struct vnic_cq *cq;
246         volatile struct cq_desc *cqd_ptr;
247         uint8_t color;
248         uint16_t seg_length;
249         struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
250         struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
251
252         cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
253         cq_idx = cq->to_clean;          /* index of cqd, rqd, mbuf_table */
254         cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
255
256         data_rq = &enic->rq[sop_rq->data_queue_idx];
257
258         while (nb_rx < nb_pkts) {
259                 volatile struct rq_enet_desc *rqd_ptr;
260                 struct cq_desc cqd;
261                 uint8_t packet_error;
262                 uint16_t ciflags;
263
264                 /* Check for pkts available */
265                 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
266                         & CQ_DESC_COLOR_MASK;
267                 if (color == cq->last_color)
268                         break;
269
270                 /* Get the cq descriptor and extract rq info from it */
271                 cqd = *cqd_ptr;
272                 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
273                 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
274
275                 rq = &enic->rq[rq_num];
276                 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
277
278                 /* allocate a new mbuf */
279                 nmb = rte_mbuf_raw_alloc(rq->mp);
280                 if (nmb == NULL) {
281                         rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
282                         break;
283                 }
284
285                 /* A packet error means descriptor and data are untrusted */
286                 packet_error = enic_cq_rx_check_err(&cqd);
287
288                 /* Get the mbuf to return and replace with one just allocated */
289                 rxmb = rq->mbuf_ring[rq_idx];
290                 rq->mbuf_ring[rq_idx] = nmb;
291
292                 /* Increment cqd, rqd, mbuf_table index */
293                 cq_idx++;
294                 if (unlikely(cq_idx == cq->ring.desc_count)) {
295                         cq_idx = 0;
296                         cq->last_color = cq->last_color ? 0 : 1;
297                 }
298
299                 /* Prefetch next mbuf & desc while processing current one */
300                 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
301                 rte_enic_prefetch(cqd_ptr);
302
303                 ciflags = enic_cq_rx_desc_ciflags(
304                         (struct cq_enet_rq_desc *)&cqd);
305
306                 /* Push descriptor for newly allocated mbuf */
307                 nmb->data_off = RTE_PKTMBUF_HEADROOM;
308                 /*
309                  * Only the address needs to be refilled. length_type of the
310                  * descriptor it set during initialization
311                  * (enic_alloc_rx_queue_mbufs) and does not change.
312                  */
313                 rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
314                                                     RTE_PKTMBUF_HEADROOM);
315
316                 /* Fill in the rest of the mbuf */
317                 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
318
319                 if (rq->is_sop) {
320                         first_seg = rxmb;
321                         first_seg->pkt_len = seg_length;
322                 } else {
323                         first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
324                                                         + seg_length);
325                         first_seg->nb_segs++;
326                         last_seg->next = rxmb;
327                 }
328
329                 rxmb->port = enic->port_id;
330                 rxmb->data_len = seg_length;
331
332                 rq->rx_nb_hold++;
333
334                 if (!(enic_cq_rx_desc_eop(ciflags))) {
335                         last_seg = rxmb;
336                         continue;
337                 }
338
339                 /* cq rx flags are only valid if eop bit is set */
340                 first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
341                 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
342
343                 if (unlikely(packet_error)) {
344                         rte_pktmbuf_free(first_seg);
345                         rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
346                         continue;
347                 }
348
349
350                 /* prefetch mbuf data for caller */
351                 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
352                                     RTE_PKTMBUF_HEADROOM));
353
354                 /* store the mbuf address into the next entry of the array */
355                 rx_pkts[nb_rx++] = first_seg;
356         }
357
358         sop_rq->pkt_first_seg = first_seg;
359         sop_rq->pkt_last_seg = last_seg;
360
361         cq->to_clean = cq_idx;
362
363         if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
364             sop_rq->rx_free_thresh) {
365                 if (data_rq->in_use) {
366                         data_rq->posted_index =
367                                 enic_ring_add(data_rq->ring.desc_count,
368                                               data_rq->posted_index,
369                                               data_rq->rx_nb_hold);
370                         data_rq->rx_nb_hold = 0;
371                 }
372                 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
373                                                      sop_rq->posted_index,
374                                                      sop_rq->rx_nb_hold);
375                 sop_rq->rx_nb_hold = 0;
376
377                 rte_mb();
378                 if (data_rq->in_use)
379                         iowrite32_relaxed(data_rq->posted_index,
380                                           &data_rq->ctrl->posted_index);
381                 rte_compiler_barrier();
382                 iowrite32_relaxed(sop_rq->posted_index,
383                                   &sop_rq->ctrl->posted_index);
384         }
385
386
387         return nb_rx;
388 }
389
390 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
391 {
392         struct vnic_wq_buf *buf;
393         struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
394         unsigned int nb_to_free, nb_free = 0, i;
395         struct rte_mempool *pool;
396         unsigned int tail_idx;
397         unsigned int desc_count = wq->ring.desc_count;
398
399         nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
400                                    + 1;
401         tail_idx = wq->tail_idx;
402         buf = &wq->bufs[tail_idx];
403         pool = ((struct rte_mbuf *)buf->mb)->pool;
404         for (i = 0; i < nb_to_free; i++) {
405                 buf = &wq->bufs[tail_idx];
406                 m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
407                 buf->mb = NULL;
408
409                 if (unlikely(m == NULL)) {
410                         tail_idx = enic_ring_incr(desc_count, tail_idx);
411                         continue;
412                 }
413
414                 if (likely(m->pool == pool)) {
415                         RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
416                         free[nb_free++] = m;
417                 } else {
418                         rte_mempool_put_bulk(pool, (void *)free, nb_free);
419                         free[0] = m;
420                         nb_free = 1;
421                         pool = m->pool;
422                 }
423                 tail_idx = enic_ring_incr(desc_count, tail_idx);
424         }
425
426         if (nb_free > 0)
427                 rte_mempool_put_bulk(pool, (void **)free, nb_free);
428
429         wq->tail_idx = tail_idx;
430         wq->ring.desc_avail += nb_to_free;
431 }
432
433 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
434 {
435         u16 completed_index;
436
437         completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
438
439         if (wq->last_completed_index != completed_index) {
440                 enic_free_wq_bufs(wq, completed_index);
441                 wq->last_completed_index = completed_index;
442         }
443         return 0;
444 }
445
446 uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
447                         uint16_t nb_pkts)
448 {
449         int32_t ret;
450         uint16_t i;
451         uint64_t ol_flags;
452         struct rte_mbuf *m;
453
454         for (i = 0; i != nb_pkts; i++) {
455                 m = tx_pkts[i];
456                 ol_flags = m->ol_flags;
457                 if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) {
458                         rte_errno = -ENOTSUP;
459                         return i;
460                 }
461 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
462                 ret = rte_validate_tx_offload(m);
463                 if (ret != 0) {
464                         rte_errno = ret;
465                         return i;
466                 }
467 #endif
468                 ret = rte_net_intel_cksum_prepare(m);
469                 if (ret != 0) {
470                         rte_errno = ret;
471                         return i;
472                 }
473         }
474
475         return i;
476 }
477
478 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
479         uint16_t nb_pkts)
480 {
481         uint16_t index;
482         unsigned int pkt_len, data_len;
483         unsigned int nb_segs;
484         struct rte_mbuf *tx_pkt;
485         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
486         struct enic *enic = vnic_dev_priv(wq->vdev);
487         unsigned short vlan_id;
488         uint64_t ol_flags;
489         uint64_t ol_flags_mask;
490         unsigned int wq_desc_avail;
491         int head_idx;
492         struct vnic_wq_buf *buf;
493         unsigned int desc_count;
494         struct wq_enet_desc *descs, *desc_p, desc_tmp;
495         uint16_t mss;
496         uint8_t vlan_tag_insert;
497         uint8_t eop;
498         uint64_t bus_addr;
499         uint8_t offload_mode;
500         uint16_t header_len;
501         uint64_t tso;
502         rte_atomic64_t *tx_oversized;
503
504         enic_cleanup_wq(enic, wq);
505         wq_desc_avail = vnic_wq_desc_avail(wq);
506         head_idx = wq->head_idx;
507         desc_count = wq->ring.desc_count;
508         ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
509         tx_oversized = &enic->soft_stats.tx_oversized;
510
511         nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
512
513         for (index = 0; index < nb_pkts; index++) {
514                 tx_pkt = *tx_pkts++;
515                 pkt_len = tx_pkt->pkt_len;
516                 data_len = tx_pkt->data_len;
517                 ol_flags = tx_pkt->ol_flags;
518                 nb_segs = tx_pkt->nb_segs;
519                 tso = ol_flags & PKT_TX_TCP_SEG;
520
521                 /* drop packet if it's too big to send */
522                 if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
523                         rte_pktmbuf_free(tx_pkt);
524                         rte_atomic64_inc(tx_oversized);
525                         continue;
526                 }
527
528                 if (nb_segs > wq_desc_avail) {
529                         if (index > 0)
530                                 goto post;
531                         goto done;
532                 }
533
534                 mss = 0;
535                 vlan_id = tx_pkt->vlan_tci;
536                 vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
537                 bus_addr = (dma_addr_t)
538                            (tx_pkt->buf_iova + tx_pkt->data_off);
539
540                 descs = (struct wq_enet_desc *)wq->ring.descs;
541                 desc_p = descs + head_idx;
542
543                 eop = (data_len == pkt_len);
544                 offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
545                 header_len = 0;
546
547                 if (tso) {
548                         header_len = tx_pkt->l2_len + tx_pkt->l3_len +
549                                      tx_pkt->l4_len;
550
551                         /* Drop if non-TCP packet or TSO seg size is too big */
552                         if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
553                             header_len) > ENIC_TX_MAX_PKT_SIZE))) {
554                                 rte_pktmbuf_free(tx_pkt);
555                                 rte_atomic64_inc(tx_oversized);
556                                 continue;
557                         }
558
559                         offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
560                         mss = tx_pkt->tso_segsz;
561                 }
562
563                 if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
564                         if (ol_flags & PKT_TX_IP_CKSUM)
565                                 mss |= ENIC_CALC_IP_CKSUM;
566
567                         /* Nic uses just 1 bit for UDP and TCP */
568                         switch (ol_flags & PKT_TX_L4_MASK) {
569                         case PKT_TX_TCP_CKSUM:
570                         case PKT_TX_UDP_CKSUM:
571                                 mss |= ENIC_CALC_TCP_UDP_CKSUM;
572                                 break;
573                         }
574                 }
575
576
577                 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
578                                  offload_mode, eop, eop, 0, vlan_tag_insert,
579                                  vlan_id, 0);
580
581                 *desc_p = desc_tmp;
582                 buf = &wq->bufs[head_idx];
583                 buf->mb = (void *)tx_pkt;
584                 head_idx = enic_ring_incr(desc_count, head_idx);
585                 wq_desc_avail--;
586
587                 if (!eop) {
588                         for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
589                             tx_pkt->next) {
590                                 data_len = tx_pkt->data_len;
591
592                                 if (tx_pkt->next == NULL)
593                                         eop = 1;
594                                 desc_p = descs + head_idx;
595                                 bus_addr = (dma_addr_t)(tx_pkt->buf_iova
596                                            + tx_pkt->data_off);
597                                 wq_enet_desc_enc((struct wq_enet_desc *)
598                                                  &desc_tmp, bus_addr, data_len,
599                                                  mss, 0, offload_mode, eop, eop,
600                                                  0, vlan_tag_insert, vlan_id,
601                                                  0);
602
603                                 *desc_p = desc_tmp;
604                                 buf = &wq->bufs[head_idx];
605                                 buf->mb = (void *)tx_pkt;
606                                 head_idx = enic_ring_incr(desc_count, head_idx);
607                                 wq_desc_avail--;
608                         }
609                 }
610         }
611  post:
612         rte_wmb();
613         iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
614  done:
615         wq->ring.desc_avail = wq_desc_avail;
616         wq->head_idx = head_idx;
617
618         return index;
619 }
620
621