a39172f14f9641a0b9347a646839d29e20a70bf7
[deb_dpdk.git] / drivers / net / enic / enic_rxtx.c
1 /* Copyright 2008-2016 Cisco Systems, Inc.  All rights reserved.
2  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
3  *
4  * Copyright (c) 2014, Cisco Systems, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_mbuf.h>
34 #include <rte_ethdev.h>
35 #include <rte_prefetch.h>
36
37 #include "enic_compat.h"
38 #include "rq_enet_desc.h"
39 #include "enic.h"
40 #include <rte_ether.h>
41 #include <rte_ip.h>
42 #include <rte_tcp.h>
43
44 #define RTE_PMD_USE_PREFETCH
45
46 #ifdef RTE_PMD_USE_PREFETCH
47 /*Prefetch a cache line into all cache levels. */
48 #define rte_enic_prefetch(p) rte_prefetch0(p)
49 #else
50 #define rte_enic_prefetch(p) do {} while (0)
51 #endif
52
53 #ifdef RTE_PMD_PACKET_PREFETCH
54 #define rte_packet_prefetch(p) rte_prefetch1(p)
55 #else
56 #define rte_packet_prefetch(p) do {} while (0)
57 #endif
58
59 static inline uint16_t
60 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
61 {
62         return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
63 }
64
65 static inline uint16_t
66 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
67 {
68         return le16_to_cpu(crd->bytes_written_flags) &
69                            ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
70 }
71
72 static inline uint8_t
73 enic_cq_rx_desc_packet_error(uint16_t bwflags)
74 {
75         return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
76                 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
77 }
78
79 static inline uint8_t
80 enic_cq_rx_desc_eop(uint16_t ciflags)
81 {
82         return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
83                 == CQ_ENET_RQ_DESC_FLAGS_EOP;
84 }
85
86 static inline uint8_t
87 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
88 {
89         return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
90                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
91                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
92 }
93
94 static inline uint8_t
95 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
96 {
97         return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
98                 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
99 }
100
101 static inline uint8_t
102 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
103 {
104         return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
105                 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
106 }
107
108 static inline uint8_t
109 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
110 {
111         return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
112                 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
113 }
114
115 static inline uint32_t
116 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
117 {
118         return le32_to_cpu(cqrd->rss_hash);
119 }
120
121 static inline uint16_t
122 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
123 {
124         return le16_to_cpu(cqrd->vlan);
125 }
126
127 static inline uint16_t
128 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
129 {
130         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
131         return le16_to_cpu(cqrd->bytes_written_flags) &
132                 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
133 }
134
135 /* Find the offset to L5. This is needed by enic TSO implementation.
136  * Return 0 if not a TCP packet or can't figure out the length.
137  */
138 static inline uint8_t tso_header_len(struct rte_mbuf *mbuf)
139 {
140         struct ether_hdr *eh;
141         struct vlan_hdr *vh;
142         struct ipv4_hdr *ip4;
143         struct ipv6_hdr *ip6;
144         struct tcp_hdr *th;
145         uint8_t hdr_len;
146         uint16_t ether_type;
147
148         /* offset past Ethernet header */
149         eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
150         ether_type = eh->ether_type;
151         hdr_len = sizeof(struct ether_hdr);
152         if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
153                 vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len);
154                 ether_type = vh->eth_proto;
155                 hdr_len += sizeof(struct vlan_hdr);
156         }
157
158         /* offset past IP header */
159         switch (rte_be_to_cpu_16(ether_type)) {
160         case ETHER_TYPE_IPv4:
161                 ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len);
162                 if (ip4->next_proto_id != IPPROTO_TCP)
163                         return 0;
164                 hdr_len += (ip4->version_ihl & 0xf) * 4;
165                 break;
166         case ETHER_TYPE_IPv6:
167                 ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len);
168                 if (ip6->proto != IPPROTO_TCP)
169                         return 0;
170                 hdr_len += sizeof(struct ipv6_hdr);
171                 break;
172         default:
173                 return 0;
174         }
175
176         if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len)
177                 return 0;
178
179         /* offset past TCP header */
180         th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len);
181         hdr_len += (th->data_off >> 4) * 4;
182
183         if (hdr_len > mbuf->pkt_len)
184                 return 0;
185
186         return hdr_len;
187 }
188
189 static inline uint8_t
190 enic_cq_rx_check_err(struct cq_desc *cqd)
191 {
192         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
193         uint16_t bwflags;
194
195         bwflags = enic_cq_rx_desc_bwflags(cqrd);
196         if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
197                 return 1;
198         return 0;
199 }
200
201 /* Lookup table to translate RX CQ flags to mbuf flags. */
202 static inline uint32_t
203 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
204 {
205         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
206         uint8_t cqrd_flags = cqrd->flags;
207         static const uint32_t cq_type_table[128] __rte_cache_aligned = {
208                 [0x00] = RTE_PTYPE_UNKNOWN,
209                 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
210                 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
211                 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
212                 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
213                 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
214                 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
215                 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
216                 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
217                 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
218                 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
219                 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
220                 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
221                 /* All others reserved */
222         };
223         cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
224                 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
225                 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
226         return cq_type_table[cqrd_flags];
227 }
228
229 static inline void
230 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
231 {
232         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
233         uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci;
234         ciflags = enic_cq_rx_desc_ciflags(cqrd);
235         bwflags = enic_cq_rx_desc_bwflags(cqrd);
236         vlan_tci = enic_cq_rx_desc_vlan(cqrd);
237
238         mbuf->ol_flags = 0;
239
240         /* flags are meaningless if !EOP */
241         if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
242                 goto mbuf_flags_done;
243
244         /* VLAN STRIPPED flag. The L2 packet type updated here also */
245         if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
246                 pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
247                 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
248         } else {
249                 if (vlan_tci != 0)
250                         mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
251                 else
252                         mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
253         }
254         mbuf->vlan_tci = vlan_tci;
255
256         if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
257                 struct cq_enet_rq_clsf_desc *clsf_cqd;
258                 uint16_t filter_id;
259                 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
260                 filter_id = clsf_cqd->filter_id;
261                 if (filter_id) {
262                         pkt_flags |= PKT_RX_FDIR;
263                         if (filter_id != ENIC_MAGIC_FILTER_ID) {
264                                 mbuf->hash.fdir.hi = clsf_cqd->filter_id;
265                                 pkt_flags |= PKT_RX_FDIR_ID;
266                         }
267                 }
268         } else if (enic_cq_rx_desc_rss_type(cqrd)) {
269                 /* RSS flag */
270                 pkt_flags |= PKT_RX_RSS_HASH;
271                 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
272         }
273
274         /* checksum flags */
275         if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
276                 if (enic_cq_rx_desc_csum_not_calc(cqrd))
277                         pkt_flags |= (PKT_RX_IP_CKSUM_UNKNOWN &
278                                      PKT_RX_L4_CKSUM_UNKNOWN);
279                 else {
280                         uint32_t l4_flags;
281                         l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
282
283                         if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
284                                 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
285                         else
286                                 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
287
288                         if (l4_flags & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
289                                 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
290                                         pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
291                                 else
292                                         pkt_flags |= PKT_RX_L4_CKSUM_BAD;
293                         }
294                 }
295         }
296
297  mbuf_flags_done:
298         mbuf->ol_flags = pkt_flags;
299 }
300
301 /* dummy receive function to replace actual function in
302  * order to do safe reconfiguration operations.
303  */
304 uint16_t
305 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
306                      __rte_unused struct rte_mbuf **rx_pkts,
307                      __rte_unused uint16_t nb_pkts)
308 {
309         return 0;
310 }
311
312 uint16_t
313 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
314                uint16_t nb_pkts)
315 {
316         struct vnic_rq *sop_rq = rx_queue;
317         struct vnic_rq *data_rq;
318         struct vnic_rq *rq;
319         struct enic *enic = vnic_dev_priv(sop_rq->vdev);
320         uint16_t cq_idx;
321         uint16_t rq_idx;
322         uint16_t rq_num;
323         struct rte_mbuf *nmb, *rxmb;
324         uint16_t nb_rx = 0;
325         struct vnic_cq *cq;
326         volatile struct cq_desc *cqd_ptr;
327         uint8_t color;
328         uint16_t seg_length;
329         struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
330         struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
331
332         cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
333         cq_idx = cq->to_clean;          /* index of cqd, rqd, mbuf_table */
334         cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
335
336         data_rq = &enic->rq[sop_rq->data_queue_idx];
337
338         while (nb_rx < nb_pkts) {
339                 volatile struct rq_enet_desc *rqd_ptr;
340                 dma_addr_t dma_addr;
341                 struct cq_desc cqd;
342                 uint8_t packet_error;
343                 uint16_t ciflags;
344
345                 /* Check for pkts available */
346                 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
347                         & CQ_DESC_COLOR_MASK;
348                 if (color == cq->last_color)
349                         break;
350
351                 /* Get the cq descriptor and extract rq info from it */
352                 cqd = *cqd_ptr;
353                 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
354                 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
355
356                 rq = &enic->rq[rq_num];
357                 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
358
359                 /* allocate a new mbuf */
360                 nmb = rte_mbuf_raw_alloc(rq->mp);
361                 if (nmb == NULL) {
362                         rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
363                         break;
364                 }
365
366                 /* A packet error means descriptor and data are untrusted */
367                 packet_error = enic_cq_rx_check_err(&cqd);
368
369                 /* Get the mbuf to return and replace with one just allocated */
370                 rxmb = rq->mbuf_ring[rq_idx];
371                 rq->mbuf_ring[rq_idx] = nmb;
372
373                 /* Increment cqd, rqd, mbuf_table index */
374                 cq_idx++;
375                 if (unlikely(cq_idx == cq->ring.desc_count)) {
376                         cq_idx = 0;
377                         cq->last_color = cq->last_color ? 0 : 1;
378                 }
379
380                 /* Prefetch next mbuf & desc while processing current one */
381                 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
382                 rte_enic_prefetch(cqd_ptr);
383
384                 ciflags = enic_cq_rx_desc_ciflags(
385                         (struct cq_enet_rq_desc *)&cqd);
386
387                 /* Push descriptor for newly allocated mbuf */
388                 nmb->data_off = RTE_PKTMBUF_HEADROOM;
389                 dma_addr = (dma_addr_t)(nmb->buf_physaddr +
390                                         RTE_PKTMBUF_HEADROOM);
391                 rq_enet_desc_enc(rqd_ptr, dma_addr,
392                                 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
393                                 : RQ_ENET_TYPE_NOT_SOP),
394                                 nmb->buf_len - RTE_PKTMBUF_HEADROOM);
395
396                 /* Fill in the rest of the mbuf */
397                 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
398
399                 if (rq->is_sop) {
400                         first_seg = rxmb;
401                         first_seg->pkt_len = seg_length;
402                 } else {
403                         first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
404                                                         + seg_length);
405                         first_seg->nb_segs++;
406                         last_seg->next = rxmb;
407                 }
408
409                 rxmb->port = enic->port_id;
410                 rxmb->data_len = seg_length;
411
412                 rq->rx_nb_hold++;
413
414                 if (!(enic_cq_rx_desc_eop(ciflags))) {
415                         last_seg = rxmb;
416                         continue;
417                 }
418
419                 /* cq rx flags are only valid if eop bit is set */
420                 first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
421                 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
422
423                 if (unlikely(packet_error)) {
424                         rte_pktmbuf_free(first_seg);
425                         rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
426                         continue;
427                 }
428
429
430                 /* prefetch mbuf data for caller */
431                 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
432                                     RTE_PKTMBUF_HEADROOM));
433
434                 /* store the mbuf address into the next entry of the array */
435                 rx_pkts[nb_rx++] = first_seg;
436         }
437
438         sop_rq->pkt_first_seg = first_seg;
439         sop_rq->pkt_last_seg = last_seg;
440
441         cq->to_clean = cq_idx;
442
443         if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
444             sop_rq->rx_free_thresh) {
445                 if (data_rq->in_use) {
446                         data_rq->posted_index =
447                                 enic_ring_add(data_rq->ring.desc_count,
448                                               data_rq->posted_index,
449                                               data_rq->rx_nb_hold);
450                         data_rq->rx_nb_hold = 0;
451                 }
452                 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
453                                                      sop_rq->posted_index,
454                                                      sop_rq->rx_nb_hold);
455                 sop_rq->rx_nb_hold = 0;
456
457                 rte_mb();
458                 if (data_rq->in_use)
459                         iowrite32_relaxed(data_rq->posted_index,
460                                           &data_rq->ctrl->posted_index);
461                 rte_compiler_barrier();
462                 iowrite32_relaxed(sop_rq->posted_index,
463                                   &sop_rq->ctrl->posted_index);
464         }
465
466
467         return nb_rx;
468 }
469
470 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
471 {
472         struct vnic_wq_buf *buf;
473         struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
474         unsigned int nb_to_free, nb_free = 0, i;
475         struct rte_mempool *pool;
476         unsigned int tail_idx;
477         unsigned int desc_count = wq->ring.desc_count;
478
479         nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
480                                    + 1;
481         tail_idx = wq->tail_idx;
482         buf = &wq->bufs[tail_idx];
483         pool = ((struct rte_mbuf *)buf->mb)->pool;
484         for (i = 0; i < nb_to_free; i++) {
485                 buf = &wq->bufs[tail_idx];
486                 m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
487                 buf->mb = NULL;
488
489                 if (unlikely(m == NULL)) {
490                         tail_idx = enic_ring_incr(desc_count, tail_idx);
491                         continue;
492                 }
493
494                 if (likely(m->pool == pool)) {
495                         RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
496                         free[nb_free++] = m;
497                 } else {
498                         rte_mempool_put_bulk(pool, (void *)free, nb_free);
499                         free[0] = m;
500                         nb_free = 1;
501                         pool = m->pool;
502                 }
503                 tail_idx = enic_ring_incr(desc_count, tail_idx);
504         }
505
506         if (nb_free > 0)
507                 rte_mempool_put_bulk(pool, (void **)free, nb_free);
508
509         wq->tail_idx = tail_idx;
510         wq->ring.desc_avail += nb_to_free;
511 }
512
513 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
514 {
515         u16 completed_index;
516
517         completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
518
519         if (wq->last_completed_index != completed_index) {
520                 enic_free_wq_bufs(wq, completed_index);
521                 wq->last_completed_index = completed_index;
522         }
523         return 0;
524 }
525
526 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
527         uint16_t nb_pkts)
528 {
529         uint16_t index;
530         unsigned int pkt_len, data_len;
531         unsigned int nb_segs;
532         struct rte_mbuf *tx_pkt;
533         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
534         struct enic *enic = vnic_dev_priv(wq->vdev);
535         unsigned short vlan_id;
536         uint64_t ol_flags;
537         uint64_t ol_flags_mask;
538         unsigned int wq_desc_avail;
539         int head_idx;
540         struct vnic_wq_buf *buf;
541         unsigned int desc_count;
542         struct wq_enet_desc *descs, *desc_p, desc_tmp;
543         uint16_t mss;
544         uint8_t vlan_tag_insert;
545         uint8_t eop;
546         uint64_t bus_addr;
547         uint8_t offload_mode;
548         uint16_t header_len;
549
550         enic_cleanup_wq(enic, wq);
551         wq_desc_avail = vnic_wq_desc_avail(wq);
552         head_idx = wq->head_idx;
553         desc_count = wq->ring.desc_count;
554         ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
555
556         nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
557
558         for (index = 0; index < nb_pkts; index++) {
559                 tx_pkt = *tx_pkts++;
560                 pkt_len = tx_pkt->pkt_len;
561                 data_len = tx_pkt->data_len;
562                 ol_flags = tx_pkt->ol_flags;
563                 nb_segs = tx_pkt->nb_segs;
564
565                 if (pkt_len > ENIC_TX_MAX_PKT_SIZE) {
566                         rte_pktmbuf_free(tx_pkt);
567                         rte_atomic64_inc(&enic->soft_stats.tx_oversized);
568                         continue;
569                 }
570
571                 if (nb_segs > wq_desc_avail) {
572                         if (index > 0)
573                                 goto post;
574                         goto done;
575                 }
576
577                 mss = 0;
578                 vlan_id = 0;
579                 vlan_tag_insert = 0;
580                 bus_addr = (dma_addr_t)
581                            (tx_pkt->buf_physaddr + tx_pkt->data_off);
582
583                 descs = (struct wq_enet_desc *)wq->ring.descs;
584                 desc_p = descs + head_idx;
585
586                 eop = (data_len == pkt_len);
587                 offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
588                 header_len = 0;
589
590                 if (tx_pkt->tso_segsz) {
591                         header_len = tso_header_len(tx_pkt);
592                         if (header_len) {
593                                 offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
594                                 mss = tx_pkt->tso_segsz;
595                         }
596                 }
597                 if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
598                         if (ol_flags & PKT_TX_IP_CKSUM)
599                                 mss |= ENIC_CALC_IP_CKSUM;
600
601                         /* Nic uses just 1 bit for UDP and TCP */
602                         switch (ol_flags & PKT_TX_L4_MASK) {
603                         case PKT_TX_TCP_CKSUM:
604                         case PKT_TX_UDP_CKSUM:
605                                 mss |= ENIC_CALC_TCP_UDP_CKSUM;
606                                 break;
607                         }
608                 }
609
610                 if (ol_flags & PKT_TX_VLAN_PKT) {
611                         vlan_tag_insert = 1;
612                         vlan_id = tx_pkt->vlan_tci;
613                 }
614
615                 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
616                                  offload_mode, eop, eop, 0, vlan_tag_insert,
617                                  vlan_id, 0);
618
619                 *desc_p = desc_tmp;
620                 buf = &wq->bufs[head_idx];
621                 buf->mb = (void *)tx_pkt;
622                 head_idx = enic_ring_incr(desc_count, head_idx);
623                 wq_desc_avail--;
624
625                 if (!eop) {
626                         for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
627                             tx_pkt->next) {
628                                 data_len = tx_pkt->data_len;
629
630                                 if (tx_pkt->next == NULL)
631                                         eop = 1;
632                                 desc_p = descs + head_idx;
633                                 bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
634                                            + tx_pkt->data_off);
635                                 wq_enet_desc_enc((struct wq_enet_desc *)
636                                                  &desc_tmp, bus_addr, data_len,
637                                                  mss, 0, offload_mode, eop, eop,
638                                                  0, vlan_tag_insert, vlan_id,
639                                                  0);
640
641                                 *desc_p = desc_tmp;
642                                 buf = &wq->bufs[head_idx];
643                                 buf->mb = (void *)tx_pkt;
644                                 head_idx = enic_ring_incr(desc_count, head_idx);
645                                 wq_desc_avail--;
646                         }
647                 }
648         }
649  post:
650         rte_wmb();
651         iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
652  done:
653         wq->ring.desc_avail = wq_desc_avail;
654         wq->head_idx = head_idx;
655
656         return index;
657 }
658
659