Imported Upstream version 16.07.2
[deb_dpdk.git] / drivers / net / fm10k / fm10k_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <inttypes.h>
35
36 #include <rte_ethdev.h>
37 #include <rte_common.h>
38 #include "fm10k.h"
39 #include "base/fm10k_type.h"
40
41 #ifdef RTE_PMD_PACKET_PREFETCH
42 #define rte_packet_prefetch(p)  rte_prefetch1(p)
43 #else
44 #define rte_packet_prefetch(p)  do {} while (0)
45 #endif
46
47 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
48 static inline void dump_rxd(union fm10k_rx_desc *rxd)
49 {
50         PMD_RX_LOG(DEBUG, "+----------------|----------------+");
51         PMD_RX_LOG(DEBUG, "|     GLORT      | PKT HDR & TYPE |");
52         PMD_RX_LOG(DEBUG, "|   0x%08x   |   0x%08x   |", rxd->d.glort,
53                         rxd->d.data);
54         PMD_RX_LOG(DEBUG, "+----------------|----------------+");
55         PMD_RX_LOG(DEBUG, "|   VLAN & LEN   |     STATUS     |");
56         PMD_RX_LOG(DEBUG, "|   0x%08x   |   0x%08x   |", rxd->d.vlan_len,
57                         rxd->d.staterr);
58         PMD_RX_LOG(DEBUG, "+----------------|----------------+");
59         PMD_RX_LOG(DEBUG, "|    RESERVED    |    RSS_HASH    |");
60         PMD_RX_LOG(DEBUG, "|   0x%08x   |   0x%08x   |", 0, rxd->d.rss);
61         PMD_RX_LOG(DEBUG, "+----------------|----------------+");
62         PMD_RX_LOG(DEBUG, "|            TIME TAG             |");
63         PMD_RX_LOG(DEBUG, "|       0x%016"PRIx64"        |", rxd->q.timestamp);
64         PMD_RX_LOG(DEBUG, "+----------------|----------------+");
65 }
66 #endif
67
68 /* @note: When this function is changed, make corresponding change to
69  * fm10k_dev_supported_ptypes_get()
70  */
71 static inline void
72 rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
73 {
74         static const uint32_t
75                 ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT]
76                         __rte_cache_aligned = {
77                 [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER,
78                 [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
79                 [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER |
80                         RTE_PTYPE_L3_IPV4_EXT,
81                 [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
82                 [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER |
83                         RTE_PTYPE_L3_IPV6_EXT,
84                 [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
85                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
86                 [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
87                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
88                 [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
89                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
90                 [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
91                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
92         };
93
94         m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK)
95                                                 >> FM10K_RXD_PKTTYPE_SHIFT];
96
97         if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
98                 m->ol_flags |= PKT_RX_RSS_HASH;
99
100         if (unlikely((d->d.staterr &
101                 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
102                 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
103                 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
104
105         if (unlikely((d->d.staterr &
106                 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
107                 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
108                 m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
109 }
110
111 uint16_t
112 fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
113         uint16_t nb_pkts)
114 {
115         struct rte_mbuf *mbuf;
116         union fm10k_rx_desc desc;
117         struct fm10k_rx_queue *q = rx_queue;
118         uint16_t count = 0;
119         int alloc = 0;
120         uint16_t next_dd;
121         int ret;
122
123         next_dd = q->next_dd;
124
125         nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh);
126         for (count = 0; count < nb_pkts; ++count) {
127                 if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
128                         break;
129                 mbuf = q->sw_ring[next_dd];
130                 desc = q->hw_ring[next_dd];
131 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
132                 dump_rxd(&desc);
133 #endif
134                 rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
135                 rte_pktmbuf_data_len(mbuf) = desc.w.length;
136
137                 mbuf->ol_flags = 0;
138 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
139                 rx_desc_to_ol_flags(mbuf, &desc);
140 #endif
141
142                 mbuf->hash.rss = desc.d.rss;
143                 /**
144                  * Packets in fm10k device always carry at least one VLAN tag.
145                  * For those packets coming in without VLAN tag,
146                  * the port default VLAN tag will be used.
147                  * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci
148                  * is valid for each RX packet's mbuf.
149                  */
150                 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
151                 mbuf->vlan_tci = desc.w.vlan;
152                 /**
153                  * mbuf->vlan_tci_outer is an idle field in fm10k driver,
154                  * so it can be selected to store sglort value.
155                  */
156                 if (q->rx_ftag_en)
157                         mbuf->vlan_tci_outer = rte_le_to_cpu_16(desc.w.sglort);
158
159                 rx_pkts[count] = mbuf;
160                 if (++next_dd == q->nb_desc) {
161                         next_dd = 0;
162                         alloc = 1;
163                 }
164
165                 /* Prefetch next mbuf while processing current one. */
166                 rte_prefetch0(q->sw_ring[next_dd]);
167
168                 /*
169                  * When next RX descriptor is on a cache-line boundary,
170                  * prefetch the next 4 RX descriptors and the next 8 pointers
171                  * to mbufs.
172                  */
173                 if ((next_dd & 0x3) == 0) {
174                         rte_prefetch0(&q->hw_ring[next_dd]);
175                         rte_prefetch0(&q->sw_ring[next_dd]);
176                 }
177         }
178
179         q->next_dd = next_dd;
180
181         if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
182                 ret = rte_mempool_get_bulk(q->mp,
183                                         (void **)&q->sw_ring[q->next_alloc],
184                                         q->alloc_thresh);
185
186                 if (unlikely(ret != 0)) {
187                         uint8_t port = q->port_id;
188                         PMD_RX_LOG(ERR, "Failed to alloc mbuf");
189                         /*
190                          * Need to restore next_dd if we cannot allocate new
191                          * buffers to replenish the old ones.
192                          */
193                         q->next_dd = (q->next_dd + q->nb_desc - count) %
194                                                                 q->nb_desc;
195                         rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
196                         return 0;
197                 }
198
199                 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
200                         mbuf = q->sw_ring[q->next_alloc];
201
202                         /* setup static mbuf fields */
203                         fm10k_pktmbuf_reset(mbuf, q->port_id);
204
205                         /* write descriptor */
206                         desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
207                         desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
208                         q->hw_ring[q->next_alloc] = desc;
209                 }
210                 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
211                 q->next_trigger += q->alloc_thresh;
212                 if (q->next_trigger >= q->nb_desc) {
213                         q->next_trigger = q->alloc_thresh - 1;
214                         q->next_alloc = 0;
215                 }
216         }
217
218         return count;
219 }
220
221 uint16_t
222 fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
223                                 uint16_t nb_pkts)
224 {
225         struct rte_mbuf *mbuf;
226         union fm10k_rx_desc desc;
227         struct fm10k_rx_queue *q = rx_queue;
228         uint16_t count = 0;
229         uint16_t nb_rcv, nb_seg;
230         int alloc = 0;
231         uint16_t next_dd;
232         struct rte_mbuf *first_seg = q->pkt_first_seg;
233         struct rte_mbuf *last_seg = q->pkt_last_seg;
234         int ret;
235
236         next_dd = q->next_dd;
237         nb_rcv = 0;
238
239         nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh);
240         for (count = 0; count < nb_seg; count++) {
241                 if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
242                         break;
243                 mbuf = q->sw_ring[next_dd];
244                 desc = q->hw_ring[next_dd];
245 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
246                 dump_rxd(&desc);
247 #endif
248
249                 if (++next_dd == q->nb_desc) {
250                         next_dd = 0;
251                         alloc = 1;
252                 }
253
254                 /* Prefetch next mbuf while processing current one. */
255                 rte_prefetch0(q->sw_ring[next_dd]);
256
257                 /*
258                  * When next RX descriptor is on a cache-line boundary,
259                  * prefetch the next 4 RX descriptors and the next 8 pointers
260                  * to mbufs.
261                  */
262                 if ((next_dd & 0x3) == 0) {
263                         rte_prefetch0(&q->hw_ring[next_dd]);
264                         rte_prefetch0(&q->sw_ring[next_dd]);
265                 }
266
267                 /* Fill data length */
268                 rte_pktmbuf_data_len(mbuf) = desc.w.length;
269
270                 /*
271                  * If this is the first buffer of the received packet,
272                  * set the pointer to the first mbuf of the packet and
273                  * initialize its context.
274                  * Otherwise, update the total length and the number of segments
275                  * of the current scattered packet, and update the pointer to
276                  * the last mbuf of the current packet.
277                  */
278                 if (!first_seg) {
279                         first_seg = mbuf;
280                         first_seg->pkt_len = desc.w.length;
281                 } else {
282                         first_seg->pkt_len =
283                                         (uint16_t)(first_seg->pkt_len +
284                                         rte_pktmbuf_data_len(mbuf));
285                         first_seg->nb_segs++;
286                         last_seg->next = mbuf;
287                 }
288
289                 /*
290                  * If this is not the last buffer of the received packet,
291                  * update the pointer to the last mbuf of the current scattered
292                  * packet and continue to parse the RX ring.
293                  */
294                 if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) {
295                         last_seg = mbuf;
296                         continue;
297                 }
298
299                 first_seg->ol_flags = 0;
300 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
301                 rx_desc_to_ol_flags(first_seg, &desc);
302 #endif
303                 first_seg->hash.rss = desc.d.rss;
304                 /**
305                  * Packets in fm10k device always carry at least one VLAN tag.
306                  * For those packets coming in without VLAN tag,
307                  * the port default VLAN tag will be used.
308                  * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci
309                  * is valid for each RX packet's mbuf.
310                  */
311                 first_seg->ol_flags |= PKT_RX_VLAN_PKT;
312                 first_seg->vlan_tci = desc.w.vlan;
313                 /**
314                  * mbuf->vlan_tci_outer is an idle field in fm10k driver,
315                  * so it can be selected to store sglort value.
316                  */
317                 if (q->rx_ftag_en)
318                         first_seg->vlan_tci_outer =
319                                 rte_le_to_cpu_16(desc.w.sglort);
320
321                 /* Prefetch data of first segment, if configured to do so. */
322                 rte_packet_prefetch((char *)first_seg->buf_addr +
323                         first_seg->data_off);
324
325                 /*
326                  * Store the mbuf address into the next entry of the array
327                  * of returned packets.
328                  */
329                 rx_pkts[nb_rcv++] = first_seg;
330
331                 /*
332                  * Setup receipt context for a new packet.
333                  */
334                 first_seg = NULL;
335         }
336
337         q->next_dd = next_dd;
338
339         if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
340                 ret = rte_mempool_get_bulk(q->mp,
341                                         (void **)&q->sw_ring[q->next_alloc],
342                                         q->alloc_thresh);
343
344                 if (unlikely(ret != 0)) {
345                         uint8_t port = q->port_id;
346                         PMD_RX_LOG(ERR, "Failed to alloc mbuf");
347                         /*
348                          * Need to restore next_dd if we cannot allocate new
349                          * buffers to replenish the old ones.
350                          */
351                         q->next_dd = (q->next_dd + q->nb_desc - count) %
352                                                                 q->nb_desc;
353                         rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
354                         return 0;
355                 }
356
357                 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
358                         mbuf = q->sw_ring[q->next_alloc];
359
360                         /* setup static mbuf fields */
361                         fm10k_pktmbuf_reset(mbuf, q->port_id);
362
363                         /* write descriptor */
364                         desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
365                         desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
366                         q->hw_ring[q->next_alloc] = desc;
367                 }
368                 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
369                 q->next_trigger += q->alloc_thresh;
370                 if (q->next_trigger >= q->nb_desc) {
371                         q->next_trigger = q->alloc_thresh - 1;
372                         q->next_alloc = 0;
373                 }
374         }
375
376         q->pkt_first_seg = first_seg;
377         q->pkt_last_seg = last_seg;
378
379         return nb_rcv;
380 }
381
382 int
383 fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
384 {
385         volatile union fm10k_rx_desc *rxdp;
386         struct fm10k_rx_queue *rxq = rx_queue;
387         uint16_t desc;
388         int ret;
389
390         if (unlikely(offset >= rxq->nb_desc)) {
391                 PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
392                 return 0;
393         }
394
395         desc = rxq->next_dd + offset;
396         if (desc >= rxq->nb_desc)
397                 desc -= rxq->nb_desc;
398
399         rxdp = &rxq->hw_ring[desc];
400
401         ret = !!(rxdp->w.status &
402                         rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
403
404         return ret;
405 }
406
407 /*
408  * Free multiple TX mbuf at a time if they are in the same pool
409  *
410  * @txep: software desc ring index that starts to free
411  * @num: number of descs to free
412  *
413  */
414 static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
415 {
416         struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
417         int i;
418         int nb_free = 0;
419
420         if (unlikely(num == 0))
421                 return;
422
423         m = __rte_pktmbuf_prefree_seg(txep[0]);
424         if (likely(m != NULL)) {
425                 free[0] = m;
426                 nb_free = 1;
427                 for (i = 1; i < num; i++) {
428                         m = __rte_pktmbuf_prefree_seg(txep[i]);
429                         if (likely(m != NULL)) {
430                                 if (likely(m->pool == free[0]->pool))
431                                         free[nb_free++] = m;
432                                 else {
433                                         rte_mempool_put_bulk(free[0]->pool,
434                                                         (void *)free, nb_free);
435                                         free[0] = m;
436                                         nb_free = 1;
437                                 }
438                         }
439                         txep[i] = NULL;
440                 }
441                 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
442         } else {
443                 for (i = 1; i < num; i++) {
444                         m = __rte_pktmbuf_prefree_seg(txep[i]);
445                         if (m != NULL)
446                                 rte_mempool_put(m->pool, m);
447                         txep[i] = NULL;
448                 }
449         }
450 }
451
452 static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
453 {
454         uint16_t next_rs, count = 0;
455
456         next_rs = fifo_peek(&q->rs_tracker);
457         if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
458                 return;
459
460         /* the DONE flag is set on this descriptor so remove the ID
461          * from the RS bit tracker and free the buffers */
462         fifo_remove(&q->rs_tracker);
463
464         /* wrap around? if so, free buffers from last_free up to but NOT
465          * including nb_desc */
466         if (q->last_free > next_rs) {
467                 count = q->nb_desc - q->last_free;
468                 tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
469                 q->last_free = 0;
470         }
471
472         /* adjust free descriptor count before the next loop */
473         q->nb_free += count + (next_rs + 1 - q->last_free);
474
475         /* free buffers from last_free, up to and including next_rs */
476         if (q->last_free <= next_rs) {
477                 count = next_rs - q->last_free + 1;
478                 tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
479                 q->last_free += count;
480         }
481
482         if (q->last_free == q->nb_desc)
483                 q->last_free = 0;
484 }
485
486 static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
487 {
488         uint16_t last_id;
489         uint8_t flags, hdrlen;
490
491         /* always set the LAST flag on the last descriptor used to
492          * transmit the packet */
493         flags = FM10K_TXD_FLAG_LAST;
494         last_id = q->next_free + mb->nb_segs - 1;
495         if (last_id >= q->nb_desc)
496                 last_id = last_id - q->nb_desc;
497
498         /* but only set the RS flag on the last descriptor if rs_thresh
499          * descriptors will be used since the RS flag was last set */
500         if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) {
501                 flags |= FM10K_TXD_FLAG_RS;
502                 fifo_insert(&q->rs_tracker, last_id);
503                 q->nb_used = 0;
504         } else {
505                 q->nb_used = q->nb_used + mb->nb_segs;
506         }
507
508         q->nb_free -= mb->nb_segs;
509
510         q->hw_ring[q->next_free].flags = 0;
511         if (q->tx_ftag_en)
512                 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_FTAG;
513         /* set checksum flags on first descriptor of packet. SCTP checksum
514          * offload is not supported, but we do not explicitly check for this
515          * case in favor of greatly simplified processing. */
516         if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
517                 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
518
519         /* set vlan if requested */
520         if (mb->ol_flags & PKT_TX_VLAN_PKT)
521                 q->hw_ring[q->next_free].vlan = mb->vlan_tci;
522
523         q->sw_ring[q->next_free] = mb;
524         q->hw_ring[q->next_free].buffer_addr =
525                         rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
526         q->hw_ring[q->next_free].buflen =
527                         rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
528
529         if (mb->ol_flags & PKT_TX_TCP_SEG) {
530                 hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len +
531                         mb->l3_len + mb->l4_len;
532                 if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
533                         hdrlen += sizeof(struct fm10k_ftag);
534
535                 if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) &&
536                                 (hdrlen <= FM10K_TSO_MAX_HEADERLEN) &&
537                                 (mb->tso_segsz >= FM10K_TSO_MINMSS))) {
538                         q->hw_ring[q->next_free].mss = mb->tso_segsz;
539                         q->hw_ring[q->next_free].hdrlen = hdrlen;
540                 }
541         }
542
543         if (++q->next_free == q->nb_desc)
544                 q->next_free = 0;
545
546         /* fill up the rings */
547         for (mb = mb->next; mb != NULL; mb = mb->next) {
548                 q->sw_ring[q->next_free] = mb;
549                 q->hw_ring[q->next_free].buffer_addr =
550                                 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
551                 q->hw_ring[q->next_free].buflen =
552                                 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
553                 q->hw_ring[q->next_free].flags = 0;
554                 if (++q->next_free == q->nb_desc)
555                         q->next_free = 0;
556         }
557
558         q->hw_ring[last_id].flags |= flags;
559 }
560
561 uint16_t
562 fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
563         uint16_t nb_pkts)
564 {
565         struct fm10k_tx_queue *q = tx_queue;
566         struct rte_mbuf *mb;
567         uint16_t count;
568
569         for (count = 0; count < nb_pkts; ++count) {
570                 mb = tx_pkts[count];
571
572                 /* running low on descriptors? try to free some... */
573                 if (q->nb_free < q->free_thresh)
574                         tx_free_descriptors(q);
575
576                 /* make sure there are enough free descriptors to transmit the
577                  * entire packet before doing anything */
578                 if (q->nb_free < mb->nb_segs)
579                         break;
580
581                 /* sanity check to make sure the mbuf is valid */
582                 if ((mb->nb_segs == 0) ||
583                     ((mb->nb_segs > 1) && (mb->next == NULL)))
584                         break;
585
586                 /* process the packet */
587                 tx_xmit_pkt(q, mb);
588         }
589
590         /* update the tail pointer if any packets were processed */
591         if (likely(count > 0))
592                 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free);
593
594         return count;
595 }