New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / net / enic / enic_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_net.h>
9 #include <rte_prefetch.h>
10
11 #include "enic_compat.h"
12 #include "rq_enet_desc.h"
13 #include "enic.h"
14 #include "enic_rxtx_common.h"
15 #include <rte_ether.h>
16 #include <rte_ip.h>
17 #include <rte_tcp.h>
18
19 #define RTE_PMD_USE_PREFETCH
20
21 #ifdef RTE_PMD_USE_PREFETCH
22 /*Prefetch a cache line into all cache levels. */
23 #define rte_enic_prefetch(p) rte_prefetch0(p)
24 #else
25 #define rte_enic_prefetch(p) do {} while (0)
26 #endif
27
28 #ifdef RTE_PMD_PACKET_PREFETCH
29 #define rte_packet_prefetch(p) rte_prefetch1(p)
30 #else
31 #define rte_packet_prefetch(p) do {} while (0)
32 #endif
33
34 /* dummy receive function to replace actual function in
35  * order to do safe reconfiguration operations.
36  */
37 uint16_t
38 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
39                      __rte_unused struct rte_mbuf **rx_pkts,
40                      __rte_unused uint16_t nb_pkts)
41 {
42         return 0;
43 }
44
45 uint16_t
46 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
47                uint16_t nb_pkts)
48 {
49         struct vnic_rq *sop_rq = rx_queue;
50         struct vnic_rq *data_rq;
51         struct vnic_rq *rq;
52         struct enic *enic = vnic_dev_priv(sop_rq->vdev);
53         uint16_t cq_idx;
54         uint16_t rq_idx, max_rx;
55         uint16_t rq_num;
56         struct rte_mbuf *nmb, *rxmb;
57         uint16_t nb_rx = 0;
58         struct vnic_cq *cq;
59         volatile struct cq_desc *cqd_ptr;
60         uint8_t color;
61         uint8_t tnl;
62         uint16_t seg_length;
63         struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
64         struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
65
66         cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
67         cq_idx = cq->to_clean;          /* index of cqd, rqd, mbuf_table */
68         cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
69         color = cq->last_color;
70
71         data_rq = &enic->rq[sop_rq->data_queue_idx];
72
73         /* Receive until the end of the ring, at most. */
74         max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx);
75
76         while (max_rx) {
77                 volatile struct rq_enet_desc *rqd_ptr;
78                 struct cq_desc cqd;
79                 uint8_t packet_error;
80                 uint16_t ciflags;
81
82                 max_rx--;
83
84                 /* Check for pkts available */
85                 if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
86                         break;
87
88                 /* Get the cq descriptor and extract rq info from it */
89                 cqd = *cqd_ptr;
90                 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
91                 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
92
93                 rq = &enic->rq[rq_num];
94                 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
95
96                 /* allocate a new mbuf */
97                 nmb = rte_mbuf_raw_alloc(rq->mp);
98                 if (nmb == NULL) {
99                         rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
100                         break;
101                 }
102
103                 /* A packet error means descriptor and data are untrusted */
104                 packet_error = enic_cq_rx_check_err(&cqd);
105
106                 /* Get the mbuf to return and replace with one just allocated */
107                 rxmb = rq->mbuf_ring[rq_idx];
108                 rq->mbuf_ring[rq_idx] = nmb;
109                 cq_idx++;
110
111                 /* Prefetch next mbuf & desc while processing current one */
112                 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
113                 rte_enic_prefetch(cqd_ptr);
114
115                 ciflags = enic_cq_rx_desc_ciflags(
116                         (struct cq_enet_rq_desc *)&cqd);
117
118                 /* Push descriptor for newly allocated mbuf */
119                 nmb->data_off = RTE_PKTMBUF_HEADROOM;
120                 /*
121                  * Only the address needs to be refilled. length_type of the
122                  * descriptor it set during initialization
123                  * (enic_alloc_rx_queue_mbufs) and does not change.
124                  */
125                 rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
126                                                     RTE_PKTMBUF_HEADROOM);
127
128                 /* Fill in the rest of the mbuf */
129                 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
130
131                 if (rq->is_sop) {
132                         first_seg = rxmb;
133                         first_seg->pkt_len = seg_length;
134                 } else {
135                         first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
136                                                         + seg_length);
137                         first_seg->nb_segs++;
138                         last_seg->next = rxmb;
139                 }
140
141                 rxmb->port = enic->port_id;
142                 rxmb->data_len = seg_length;
143
144                 rq->rx_nb_hold++;
145
146                 if (!(enic_cq_rx_desc_eop(ciflags))) {
147                         last_seg = rxmb;
148                         continue;
149                 }
150
151                 /*
152                  * When overlay offload is enabled, CQ.fcoe indicates the
153                  * packet is tunnelled.
154                  */
155                 tnl = enic->overlay_offload &&
156                         (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
157                 /* cq rx flags are only valid if eop bit is set */
158                 first_seg->packet_type =
159                         enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
160                 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
161
162                 /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
163                 if (tnl) {
164                         first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
165                                                     RTE_PTYPE_L4_MASK);
166                 }
167                 if (unlikely(packet_error)) {
168                         rte_pktmbuf_free(first_seg);
169                         rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
170                         continue;
171                 }
172
173
174                 /* prefetch mbuf data for caller */
175                 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
176                                     RTE_PKTMBUF_HEADROOM));
177
178                 /* store the mbuf address into the next entry of the array */
179                 rx_pkts[nb_rx++] = first_seg;
180         }
181         if (unlikely(cq_idx == cq->ring.desc_count)) {
182                 cq_idx = 0;
183                 cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
184         }
185
186         sop_rq->pkt_first_seg = first_seg;
187         sop_rq->pkt_last_seg = last_seg;
188
189         cq->to_clean = cq_idx;
190
191         if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
192             sop_rq->rx_free_thresh) {
193                 if (data_rq->in_use) {
194                         data_rq->posted_index =
195                                 enic_ring_add(data_rq->ring.desc_count,
196                                               data_rq->posted_index,
197                                               data_rq->rx_nb_hold);
198                         data_rq->rx_nb_hold = 0;
199                 }
200                 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
201                                                      sop_rq->posted_index,
202                                                      sop_rq->rx_nb_hold);
203                 sop_rq->rx_nb_hold = 0;
204
205                 rte_mb();
206                 if (data_rq->in_use)
207                         iowrite32_relaxed(data_rq->posted_index,
208                                           &data_rq->ctrl->posted_index);
209                 rte_compiler_barrier();
210                 iowrite32_relaxed(sop_rq->posted_index,
211                                   &sop_rq->ctrl->posted_index);
212         }
213
214
215         return nb_rx;
216 }
217
218 uint16_t
219 enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
220                          uint16_t nb_pkts)
221 {
222         struct rte_mbuf *mb, **rx, **rxmb;
223         uint16_t cq_idx, nb_rx, max_rx;
224         struct cq_enet_rq_desc *cqd;
225         struct rq_enet_desc *rqd;
226         unsigned int port_id;
227         struct vnic_cq *cq;
228         struct vnic_rq *rq;
229         struct enic *enic;
230         uint8_t color;
231         bool overlay;
232         bool tnl;
233
234         rq = rx_queue;
235         enic = vnic_dev_priv(rq->vdev);
236         cq = &enic->cq[enic_cq_rq(enic, rq->index)];
237         cq_idx = cq->to_clean;
238
239         /*
240          * Fill up the reserve of free mbufs. Below, we restock the receive
241          * ring with these mbufs to avoid allocation failures.
242          */
243         if (rq->num_free_mbufs == 0) {
244                 if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
245                                          ENIC_RX_BURST_MAX))
246                         return 0;
247                 rq->num_free_mbufs = ENIC_RX_BURST_MAX;
248         }
249
250         /* Receive until the end of the ring, at most. */
251         max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
252         max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
253
254         cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
255         color = cq->last_color;
256         rxmb = rq->mbuf_ring + cq_idx;
257         port_id = enic->port_id;
258         overlay = enic->overlay_offload;
259
260         rx = rx_pkts;
261         while (max_rx) {
262                 max_rx--;
263                 if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
264                         break;
265                 if (unlikely(cqd->bytes_written_flags &
266                              CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
267                         rte_pktmbuf_free(*rxmb++);
268                         rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
269                         cqd++;
270                         continue;
271                 }
272
273                 mb = *rxmb++;
274                 /* prefetch mbuf data for caller */
275                 rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
276                                     RTE_PKTMBUF_HEADROOM));
277                 mb->data_len = cqd->bytes_written_flags &
278                         CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
279                 mb->pkt_len = mb->data_len;
280                 mb->port = port_id;
281                 tnl = overlay && (cqd->completed_index_flags &
282                                   CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
283                 mb->packet_type =
284                         enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
285                                                      tnl);
286                 enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
287                 /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
288                 if (tnl) {
289                         mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
290                                              RTE_PTYPE_L4_MASK);
291                 }
292                 cqd++;
293                 *rx++ = mb;
294         }
295         /* Number of descriptors visited */
296         nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
297         if (nb_rx == 0)
298                 return 0;
299         rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
300         rxmb = rq->mbuf_ring + cq_idx;
301         cq_idx += nb_rx;
302         rq->rx_nb_hold += nb_rx;
303         if (unlikely(cq_idx == cq->ring.desc_count)) {
304                 cq_idx = 0;
305                 cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
306         }
307         cq->to_clean = cq_idx;
308
309         memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
310                sizeof(struct rte_mbuf *) * nb_rx);
311         rq->num_free_mbufs -= nb_rx;
312         while (nb_rx) {
313                 nb_rx--;
314                 mb = *rxmb++;
315                 mb->data_off = RTE_PKTMBUF_HEADROOM;
316                 rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
317                 rqd++;
318         }
319         if (rq->rx_nb_hold > rq->rx_free_thresh) {
320                 rq->posted_index = enic_ring_add(rq->ring.desc_count,
321                                                  rq->posted_index,
322                                                  rq->rx_nb_hold);
323                 rq->rx_nb_hold = 0;
324                 rte_wmb();
325                 iowrite32_relaxed(rq->posted_index,
326                                   &rq->ctrl->posted_index);
327         }
328
329         return rx - rx_pkts;
330 }
331
332 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
333 {
334         struct rte_mbuf *buf;
335         struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
336         unsigned int nb_to_free, nb_free = 0, i;
337         struct rte_mempool *pool;
338         unsigned int tail_idx;
339         unsigned int desc_count = wq->ring.desc_count;
340
341         nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
342                                    + 1;
343         tail_idx = wq->tail_idx;
344         pool = wq->bufs[tail_idx]->pool;
345         for (i = 0; i < nb_to_free; i++) {
346                 buf = wq->bufs[tail_idx];
347                 m = rte_pktmbuf_prefree_seg(buf);
348                 if (unlikely(m == NULL)) {
349                         tail_idx = enic_ring_incr(desc_count, tail_idx);
350                         continue;
351                 }
352
353                 if (likely(m->pool == pool)) {
354                         RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
355                         free[nb_free++] = m;
356                 } else {
357                         rte_mempool_put_bulk(pool, (void *)free, nb_free);
358                         free[0] = m;
359                         nb_free = 1;
360                         pool = m->pool;
361                 }
362                 tail_idx = enic_ring_incr(desc_count, tail_idx);
363         }
364
365         if (nb_free > 0)
366                 rte_mempool_put_bulk(pool, (void **)free, nb_free);
367
368         wq->tail_idx = tail_idx;
369         wq->ring.desc_avail += nb_to_free;
370 }
371
372 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
373 {
374         u16 completed_index;
375
376         completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
377
378         if (wq->last_completed_index != completed_index) {
379                 enic_free_wq_bufs(wq, completed_index);
380                 wq->last_completed_index = completed_index;
381         }
382         return 0;
383 }
384
385 uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
386                         uint16_t nb_pkts)
387 {
388         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
389         int32_t ret;
390         uint16_t i;
391         uint64_t ol_flags;
392         struct rte_mbuf *m;
393
394         for (i = 0; i != nb_pkts; i++) {
395                 m = tx_pkts[i];
396                 if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
397                         rte_errno = EINVAL;
398                         return i;
399                 }
400                 ol_flags = m->ol_flags;
401                 if (ol_flags & wq->tx_offload_notsup_mask) {
402                         rte_errno = ENOTSUP;
403                         return i;
404                 }
405 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
406                 ret = rte_validate_tx_offload(m);
407                 if (ret != 0) {
408                         rte_errno = ret;
409                         return i;
410                 }
411 #endif
412                 ret = rte_net_intel_cksum_prepare(m);
413                 if (ret != 0) {
414                         rte_errno = ret;
415                         return i;
416                 }
417         }
418
419         return i;
420 }
421
422 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
423         uint16_t nb_pkts)
424 {
425         uint16_t index;
426         unsigned int pkt_len, data_len;
427         unsigned int nb_segs;
428         struct rte_mbuf *tx_pkt;
429         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
430         struct enic *enic = vnic_dev_priv(wq->vdev);
431         unsigned short vlan_id;
432         uint64_t ol_flags;
433         uint64_t ol_flags_mask;
434         unsigned int wq_desc_avail;
435         int head_idx;
436         unsigned int desc_count;
437         struct wq_enet_desc *descs, *desc_p, desc_tmp;
438         uint16_t mss;
439         uint8_t vlan_tag_insert;
440         uint8_t eop, cq;
441         uint64_t bus_addr;
442         uint8_t offload_mode;
443         uint16_t header_len;
444         uint64_t tso;
445         rte_atomic64_t *tx_oversized;
446
447         enic_cleanup_wq(enic, wq);
448         wq_desc_avail = vnic_wq_desc_avail(wq);
449         head_idx = wq->head_idx;
450         desc_count = wq->ring.desc_count;
451         ol_flags_mask = PKT_TX_VLAN | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
452         tx_oversized = &enic->soft_stats.tx_oversized;
453
454         nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
455
456         for (index = 0; index < nb_pkts; index++) {
457                 tx_pkt = *tx_pkts++;
458                 pkt_len = tx_pkt->pkt_len;
459                 data_len = tx_pkt->data_len;
460                 ol_flags = tx_pkt->ol_flags;
461                 nb_segs = tx_pkt->nb_segs;
462                 tso = ol_flags & PKT_TX_TCP_SEG;
463
464                 /* drop packet if it's too big to send */
465                 if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
466                         rte_pktmbuf_free(tx_pkt);
467                         rte_atomic64_inc(tx_oversized);
468                         continue;
469                 }
470
471                 if (nb_segs > wq_desc_avail) {
472                         if (index > 0)
473                                 goto post;
474                         goto done;
475                 }
476
477                 mss = 0;
478                 vlan_id = tx_pkt->vlan_tci;
479                 vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN);
480                 bus_addr = (dma_addr_t)
481                            (tx_pkt->buf_iova + tx_pkt->data_off);
482
483                 descs = (struct wq_enet_desc *)wq->ring.descs;
484                 desc_p = descs + head_idx;
485
486                 eop = (data_len == pkt_len);
487                 offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
488                 header_len = 0;
489
490                 if (tso) {
491                         header_len = tx_pkt->l2_len + tx_pkt->l3_len +
492                                      tx_pkt->l4_len;
493
494                         /* Drop if non-TCP packet or TSO seg size is too big */
495                         if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
496                             header_len) > ENIC_TX_MAX_PKT_SIZE))) {
497                                 rte_pktmbuf_free(tx_pkt);
498                                 rte_atomic64_inc(tx_oversized);
499                                 continue;
500                         }
501
502                         offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
503                         mss = tx_pkt->tso_segsz;
504                         /* For tunnel, need the size of outer+inner headers */
505                         if (ol_flags & PKT_TX_TUNNEL_MASK) {
506                                 header_len += tx_pkt->outer_l2_len +
507                                         tx_pkt->outer_l3_len;
508                         }
509                 }
510
511                 if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
512                         if (ol_flags & PKT_TX_IP_CKSUM)
513                                 mss |= ENIC_CALC_IP_CKSUM;
514
515                         /* Nic uses just 1 bit for UDP and TCP */
516                         switch (ol_flags & PKT_TX_L4_MASK) {
517                         case PKT_TX_TCP_CKSUM:
518                         case PKT_TX_UDP_CKSUM:
519                                 mss |= ENIC_CALC_TCP_UDP_CKSUM;
520                                 break;
521                         }
522                 }
523                 wq->cq_pend++;
524                 cq = 0;
525                 if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
526                         cq = 1;
527                         wq->cq_pend = 0;
528                 }
529                 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
530                                  offload_mode, eop, cq, 0, vlan_tag_insert,
531                                  vlan_id, 0);
532
533                 *desc_p = desc_tmp;
534                 wq->bufs[head_idx] = tx_pkt;
535                 head_idx = enic_ring_incr(desc_count, head_idx);
536                 wq_desc_avail--;
537
538                 if (!eop) {
539                         for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
540                             tx_pkt->next) {
541                                 data_len = tx_pkt->data_len;
542
543                                 wq->cq_pend++;
544                                 cq = 0;
545                                 if (tx_pkt->next == NULL) {
546                                         eop = 1;
547                                         if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
548                                                 cq = 1;
549                                                 wq->cq_pend = 0;
550                                         }
551                                 }
552                                 desc_p = descs + head_idx;
553                                 bus_addr = (dma_addr_t)(tx_pkt->buf_iova
554                                            + tx_pkt->data_off);
555                                 wq_enet_desc_enc((struct wq_enet_desc *)
556                                                  &desc_tmp, bus_addr, data_len,
557                                                  mss, 0, offload_mode, eop, cq,
558                                                  0, vlan_tag_insert, vlan_id,
559                                                  0);
560
561                                 *desc_p = desc_tmp;
562                                 wq->bufs[head_idx] = tx_pkt;
563                                 head_idx = enic_ring_incr(desc_count, head_idx);
564                                 wq_desc_avail--;
565                         }
566                 }
567         }
568  post:
569         rte_wmb();
570         iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
571  done:
572         wq->ring.desc_avail = wq_desc_avail;
573         wq->head_idx = head_idx;
574
575         return index;
576 }
577
578 static void enqueue_simple_pkts(struct rte_mbuf **pkts,
579                                 struct wq_enet_desc *desc,
580                                 uint16_t n,
581                                 struct enic *enic)
582 {
583         struct rte_mbuf *p;
584         uint16_t mss;
585
586         while (n) {
587                 n--;
588                 p = *pkts++;
589                 desc->address = p->buf_iova + p->data_off;
590                 desc->length = p->pkt_len;
591                 /* VLAN insert */
592                 desc->vlan_tag = p->vlan_tci;
593                 desc->header_length_flags &=
594                         ((1 << WQ_ENET_FLAGS_EOP_SHIFT) |
595                          (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT));
596                 if (p->ol_flags & PKT_TX_VLAN) {
597                         desc->header_length_flags |=
598                                 1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT;
599                 }
600                 /*
601                  * Checksum offload. We use WQ_ENET_OFFLOAD_MODE_CSUM, which
602                  * is 0, so no need to set offload_mode.
603                  */
604                 mss = 0;
605                 if (p->ol_flags & PKT_TX_IP_CKSUM)
606                         mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT;
607                 if (p->ol_flags & PKT_TX_L4_MASK)
608                         mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT;
609                 desc->mss_loopback = mss;
610
611                 /*
612                  * The app should not send oversized
613                  * packets. tx_pkt_prepare includes a check as
614                  * well. But some apps ignore the device max size and
615                  * tx_pkt_prepare. Oversized packets cause WQ errrors
616                  * and the NIC ends up disabling the whole WQ. So
617                  * truncate packets..
618                  */
619                 if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
620                         desc->length = ENIC_TX_MAX_PKT_SIZE;
621                         rte_atomic64_inc(&enic->soft_stats.tx_oversized);
622                 }
623                 desc++;
624         }
625 }
626
627 uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
628                                uint16_t nb_pkts)
629 {
630         unsigned int head_idx, desc_count;
631         struct wq_enet_desc *desc;
632         struct vnic_wq *wq;
633         struct enic *enic;
634         uint16_t rem, n;
635
636         wq = (struct vnic_wq *)tx_queue;
637         enic = vnic_dev_priv(wq->vdev);
638         enic_cleanup_wq(enic, wq);
639         /* Will enqueue this many packets in this call */
640         nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail);
641         if (nb_pkts == 0)
642                 return 0;
643
644         head_idx = wq->head_idx;
645         desc_count = wq->ring.desc_count;
646
647         /* Descriptors until the end of the ring */
648         n = desc_count - head_idx;
649         n = RTE_MIN(nb_pkts, n);
650
651         /* Save mbuf pointers to free later */
652         memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n);
653
654         /* Enqueue until the ring end */
655         rem = nb_pkts - n;
656         desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx;
657         enqueue_simple_pkts(tx_pkts, desc, n, enic);
658
659         /* Wrap to the start of the ring */
660         if (rem) {
661                 tx_pkts += n;
662                 memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem);
663                 desc = (struct wq_enet_desc *)wq->ring.descs;
664                 enqueue_simple_pkts(tx_pkts, desc, rem, enic);
665         }
666         rte_wmb();
667
668         /* Update head_idx and desc_avail */
669         wq->ring.desc_avail -= nb_pkts;
670         head_idx += nb_pkts;
671         if (head_idx >= desc_count)
672                 head_idx -= desc_count;
673         wq->head_idx = head_idx;
674         iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
675         return nb_pkts;
676 }