New upstream version 17.05.1
[deb_dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_mempool.h>
60 #include <rte_malloc.h>
61 #include <rte_mbuf.h>
62 #include <rte_ether.h>
63 #include <rte_ethdev.h>
64 #include <rte_prefetch.h>
65 #include <rte_udp.h>
66 #include <rte_tcp.h>
67 #include <rte_sctp.h>
68 #include <rte_net.h>
69 #include <rte_string_fns.h>
70
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
74
75 #ifdef RTE_LIBRTE_IEEE1588
76 #define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
77 #else
78 #define IGB_TX_IEEE1588_TMST 0
79 #endif
80 /* Bit Mask to indicate what bits required for building TX context */
81 #define IGB_TX_OFFLOAD_MASK (                    \
82                 PKT_TX_VLAN_PKT |                \
83                 PKT_TX_IP_CKSUM |                \
84                 PKT_TX_L4_MASK |                 \
85                 PKT_TX_TCP_SEG |                 \
86                 IGB_TX_IEEE1588_TMST)
87
88 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
89                 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
90
91 /**
92  * Structure associated with each descriptor of the RX ring of a RX queue.
93  */
94 struct igb_rx_entry {
95         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
96 };
97
98 /**
99  * Structure associated with each descriptor of the TX ring of a TX queue.
100  */
101 struct igb_tx_entry {
102         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
103         uint16_t next_id; /**< Index of next descriptor in ring. */
104         uint16_t last_id; /**< Index of last scattered descriptor. */
105 };
106
107 /**
108  * Structure associated with each RX queue.
109  */
110 struct igb_rx_queue {
111         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
112         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
113         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
114         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
115         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
116         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
117         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
118         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
119         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
120         uint16_t            rx_tail;    /**< current value of RDT register. */
121         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
122         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
123         uint16_t            queue_id;   /**< RX queue index. */
124         uint16_t            reg_idx;    /**< RX queue register index. */
125         uint8_t             port_id;    /**< Device port identifier. */
126         uint8_t             pthresh;    /**< Prefetch threshold register. */
127         uint8_t             hthresh;    /**< Host threshold register. */
128         uint8_t             wthresh;    /**< Write-back threshold register. */
129         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
130         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
131 };
132
133 /**
134  * Hardware context number
135  */
136 enum igb_advctx_num {
137         IGB_CTX_0    = 0, /**< CTX0    */
138         IGB_CTX_1    = 1, /**< CTX1    */
139         IGB_CTX_NUM  = 2, /**< CTX_NUM */
140 };
141
142 /** Offload features */
143 union igb_tx_offload {
144         uint64_t data;
145         struct {
146                 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
147                 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
148                 uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
149                 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
150                 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
151
152                 /* uint64_t unused:8; */
153         };
154 };
155
156 /*
157  * Compare mask for igb_tx_offload.data,
158  * should be in sync with igb_tx_offload layout.
159  * */
160 #define TX_MACIP_LEN_CMP_MASK   0x000000000000FFFFULL /**< L2L3 header mask. */
161 #define TX_VLAN_CMP_MASK                0x00000000FFFF0000ULL /**< Vlan mask. */
162 #define TX_TCP_LEN_CMP_MASK             0x000000FF00000000ULL /**< TCP header mask. */
163 #define TX_TSO_MSS_CMP_MASK             0x00FFFF0000000000ULL /**< TSO segsz mask. */
164 /** Mac + IP + TCP + Mss mask. */
165 #define TX_TSO_CMP_MASK \
166         (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
167
168 /**
169  * Strucutre to check if new context need be built
170  */
171 struct igb_advctx_info {
172         uint64_t flags;           /**< ol_flags related to context build. */
173         /** tx offload: vlan, tso, l2-l3-l4 lengths. */
174         union igb_tx_offload tx_offload;
175         /** compare mask for tx offload. */
176         union igb_tx_offload tx_offload_mask;
177 };
178
179 /**
180  * Structure associated with each TX queue.
181  */
182 struct igb_tx_queue {
183         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
184         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
185         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
186         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
187         uint32_t               txd_type;      /**< Device-specific TXD type */
188         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
189         uint16_t               tx_tail; /**< Current value of TDT register. */
190         uint16_t               tx_head;
191         /**< Index of first used TX descriptor. */
192         uint16_t               queue_id; /**< TX queue index. */
193         uint16_t               reg_idx;  /**< TX queue register index. */
194         uint8_t                port_id;  /**< Device port identifier. */
195         uint8_t                pthresh;  /**< Prefetch threshold register. */
196         uint8_t                hthresh;  /**< Host threshold register. */
197         uint8_t                wthresh;  /**< Write-back threshold register. */
198         uint32_t               ctx_curr;
199         /**< Current used hardware descriptor. */
200         uint32_t               ctx_start;
201         /**< Start context position for transmit queue. */
202         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
203         /**< Hardware context history.*/
204 };
205
206 #if 1
207 #define RTE_PMD_USE_PREFETCH
208 #endif
209
210 #ifdef RTE_PMD_USE_PREFETCH
211 #define rte_igb_prefetch(p)     rte_prefetch0(p)
212 #else
213 #define rte_igb_prefetch(p)     do {} while(0)
214 #endif
215
216 #ifdef RTE_PMD_PACKET_PREFETCH
217 #define rte_packet_prefetch(p) rte_prefetch1(p)
218 #else
219 #define rte_packet_prefetch(p)  do {} while(0)
220 #endif
221
222 /*
223  * Macro for VMDq feature for 1 GbE NIC.
224  */
225 #define E1000_VMOLR_SIZE                        (8)
226 #define IGB_TSO_MAX_HDRLEN                      (512)
227 #define IGB_TSO_MAX_MSS                         (9216)
228
229 /*********************************************************************
230  *
231  *  TX function
232  *
233  **********************************************************************/
234
235 /*
236  *There're some limitations in hardware for TCP segmentation offload. We
237  *should check whether the parameters are valid.
238  */
239 static inline uint64_t
240 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
241 {
242         if (!(ol_req & PKT_TX_TCP_SEG))
243                 return ol_req;
244         if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
245                         ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
246                 ol_req &= ~PKT_TX_TCP_SEG;
247                 ol_req |= PKT_TX_TCP_CKSUM;
248         }
249         return ol_req;
250 }
251
252 /*
253  * Advanced context descriptor are almost same between igb/ixgbe
254  * This is a separate function, looking for optimization opportunity here
255  * Rework required to go with the pre-defined values.
256  */
257
258 static inline void
259 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
260                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
261                 uint64_t ol_flags, union igb_tx_offload tx_offload)
262 {
263         uint32_t type_tucmd_mlhl;
264         uint32_t mss_l4len_idx;
265         uint32_t ctx_idx, ctx_curr;
266         uint32_t vlan_macip_lens;
267         union igb_tx_offload tx_offload_mask;
268
269         ctx_curr = txq->ctx_curr;
270         ctx_idx = ctx_curr + txq->ctx_start;
271
272         tx_offload_mask.data = 0;
273         type_tucmd_mlhl = 0;
274
275         /* Specify which HW CTX to upload. */
276         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
277
278         if (ol_flags & PKT_TX_VLAN_PKT)
279                 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
280
281         /* check if TCP segmentation required for this packet */
282         if (ol_flags & PKT_TX_TCP_SEG) {
283                 /* implies IP cksum in IPv4 */
284                 if (ol_flags & PKT_TX_IP_CKSUM)
285                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
286                                 E1000_ADVTXD_TUCMD_L4T_TCP |
287                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
288                 else
289                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
290                                 E1000_ADVTXD_TUCMD_L4T_TCP |
291                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
292
293                 tx_offload_mask.data |= TX_TSO_CMP_MASK;
294                 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
295                 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
296         } else { /* no TSO, check if hardware checksum is needed */
297                 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
298                         tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
299
300                 if (ol_flags & PKT_TX_IP_CKSUM)
301                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
302
303                 switch (ol_flags & PKT_TX_L4_MASK) {
304                 case PKT_TX_UDP_CKSUM:
305                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
306                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
307                         mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
308                         break;
309                 case PKT_TX_TCP_CKSUM:
310                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
311                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
312                         mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
313                         break;
314                 case PKT_TX_SCTP_CKSUM:
315                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
316                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
317                         mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
318                         break;
319                 default:
320                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
321                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
322                         break;
323                 }
324         }
325
326         txq->ctx_cache[ctx_curr].flags = ol_flags;
327         txq->ctx_cache[ctx_curr].tx_offload.data =
328                 tx_offload_mask.data & tx_offload.data;
329         txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
330
331         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
332         vlan_macip_lens = (uint32_t)tx_offload.data;
333         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
334         ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
335         ctx_txd->seqnum_seed = 0;
336 }
337
338 /*
339  * Check which hardware context can be used. Use the existing match
340  * or create a new context descriptor.
341  */
342 static inline uint32_t
343 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
344                 union igb_tx_offload tx_offload)
345 {
346         /* If match with the current context */
347         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
348                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
349                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
350                         return txq->ctx_curr;
351         }
352
353         /* If match with the second context */
354         txq->ctx_curr ^= 1;
355         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
356                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
357                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
358                         return txq->ctx_curr;
359         }
360
361         /* Mismatch, use the previous context */
362         return IGB_CTX_NUM;
363 }
364
365 static inline uint32_t
366 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
367 {
368         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
369         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
370         uint32_t tmp;
371
372         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
373         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
374         tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
375         return tmp;
376 }
377
378 static inline uint32_t
379 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
380 {
381         uint32_t cmdtype;
382         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
383         static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
384         cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
385         cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
386         return cmdtype;
387 }
388
389 uint16_t
390 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
391                uint16_t nb_pkts)
392 {
393         struct igb_tx_queue *txq;
394         struct igb_tx_entry *sw_ring;
395         struct igb_tx_entry *txe, *txn;
396         volatile union e1000_adv_tx_desc *txr;
397         volatile union e1000_adv_tx_desc *txd;
398         struct rte_mbuf     *tx_pkt;
399         struct rte_mbuf     *m_seg;
400         uint64_t buf_dma_addr;
401         uint32_t olinfo_status;
402         uint32_t cmd_type_len;
403         uint32_t pkt_len;
404         uint16_t slen;
405         uint64_t ol_flags;
406         uint16_t tx_end;
407         uint16_t tx_id;
408         uint16_t tx_last;
409         uint16_t nb_tx;
410         uint64_t tx_ol_req;
411         uint32_t new_ctx = 0;
412         uint32_t ctx = 0;
413         union igb_tx_offload tx_offload = {0};
414
415         txq = tx_queue;
416         sw_ring = txq->sw_ring;
417         txr     = txq->tx_ring;
418         tx_id   = txq->tx_tail;
419         txe = &sw_ring[tx_id];
420
421         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
422                 tx_pkt = *tx_pkts++;
423                 pkt_len = tx_pkt->pkt_len;
424
425                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
426
427                 /*
428                  * The number of descriptors that must be allocated for a
429                  * packet is the number of segments of that packet, plus 1
430                  * Context Descriptor for the VLAN Tag Identifier, if any.
431                  * Determine the last TX descriptor to allocate in the TX ring
432                  * for the packet, starting from the current position (tx_id)
433                  * in the ring.
434                  */
435                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
436
437                 ol_flags = tx_pkt->ol_flags;
438                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
439
440                 /* If a Context Descriptor need be built . */
441                 if (tx_ol_req) {
442                         tx_offload.l2_len = tx_pkt->l2_len;
443                         tx_offload.l3_len = tx_pkt->l3_len;
444                         tx_offload.l4_len = tx_pkt->l4_len;
445                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
446                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
447                         tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
448
449                         ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
450                         /* Only allocate context descriptor if required*/
451                         new_ctx = (ctx == IGB_CTX_NUM);
452                         ctx = txq->ctx_curr + txq->ctx_start;
453                         tx_last = (uint16_t) (tx_last + new_ctx);
454                 }
455                 if (tx_last >= txq->nb_tx_desc)
456                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
457
458                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
459                            " tx_first=%u tx_last=%u",
460                            (unsigned) txq->port_id,
461                            (unsigned) txq->queue_id,
462                            (unsigned) pkt_len,
463                            (unsigned) tx_id,
464                            (unsigned) tx_last);
465
466                 /*
467                  * Check if there are enough free descriptors in the TX ring
468                  * to transmit the next packet.
469                  * This operation is based on the two following rules:
470                  *
471                  *   1- Only check that the last needed TX descriptor can be
472                  *      allocated (by construction, if that descriptor is free,
473                  *      all intermediate ones are also free).
474                  *
475                  *      For this purpose, the index of the last TX descriptor
476                  *      used for a packet (the "last descriptor" of a packet)
477                  *      is recorded in the TX entries (the last one included)
478                  *      that are associated with all TX descriptors allocated
479                  *      for that packet.
480                  *
481                  *   2- Avoid to allocate the last free TX descriptor of the
482                  *      ring, in order to never set the TDT register with the
483                  *      same value stored in parallel by the NIC in the TDH
484                  *      register, which makes the TX engine of the NIC enter
485                  *      in a deadlock situation.
486                  *
487                  *      By extension, avoid to allocate a free descriptor that
488                  *      belongs to the last set of free descriptors allocated
489                  *      to the same packet previously transmitted.
490                  */
491
492                 /*
493                  * The "last descriptor" of the previously sent packet, if any,
494                  * which used the last descriptor to allocate.
495                  */
496                 tx_end = sw_ring[tx_last].last_id;
497
498                 /*
499                  * The next descriptor following that "last descriptor" in the
500                  * ring.
501                  */
502                 tx_end = sw_ring[tx_end].next_id;
503
504                 /*
505                  * The "last descriptor" associated with that next descriptor.
506                  */
507                 tx_end = sw_ring[tx_end].last_id;
508
509                 /*
510                  * Check that this descriptor is free.
511                  */
512                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
513                         if (nb_tx == 0)
514                                 return 0;
515                         goto end_of_tx;
516                 }
517
518                 /*
519                  * Set common flags of all TX Data Descriptors.
520                  *
521                  * The following bits must be set in all Data Descriptors:
522                  *   - E1000_ADVTXD_DTYP_DATA
523                  *   - E1000_ADVTXD_DCMD_DEXT
524                  *
525                  * The following bits must be set in the first Data Descriptor
526                  * and are ignored in the other ones:
527                  *   - E1000_ADVTXD_DCMD_IFCS
528                  *   - E1000_ADVTXD_MAC_1588
529                  *   - E1000_ADVTXD_DCMD_VLE
530                  *
531                  * The following bits must only be set in the last Data
532                  * Descriptor:
533                  *   - E1000_TXD_CMD_EOP
534                  *
535                  * The following bits can be set in any Data Descriptor, but
536                  * are only set in the last Data Descriptor:
537                  *   - E1000_TXD_CMD_RS
538                  */
539                 cmd_type_len = txq->txd_type |
540                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
541                 if (tx_ol_req & PKT_TX_TCP_SEG)
542                         pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
543                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
544 #if defined(RTE_LIBRTE_IEEE1588)
545                 if (ol_flags & PKT_TX_IEEE1588_TMST)
546                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
547 #endif
548                 if (tx_ol_req) {
549                         /* Setup TX Advanced context descriptor if required */
550                         if (new_ctx) {
551                                 volatile struct e1000_adv_tx_context_desc *
552                                     ctx_txd;
553
554                                 ctx_txd = (volatile struct
555                                     e1000_adv_tx_context_desc *)
556                                     &txr[tx_id];
557
558                                 txn = &sw_ring[txe->next_id];
559                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
560
561                                 if (txe->mbuf != NULL) {
562                                         rte_pktmbuf_free_seg(txe->mbuf);
563                                         txe->mbuf = NULL;
564                                 }
565
566                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
567
568                                 txe->last_id = tx_last;
569                                 tx_id = txe->next_id;
570                                 txe = txn;
571                         }
572
573                         /* Setup the TX Advanced Data Descriptor */
574                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
575                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
576                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
577                 }
578
579                 m_seg = tx_pkt;
580                 do {
581                         txn = &sw_ring[txe->next_id];
582                         txd = &txr[tx_id];
583
584                         if (txe->mbuf != NULL)
585                                 rte_pktmbuf_free_seg(txe->mbuf);
586                         txe->mbuf = m_seg;
587
588                         /*
589                          * Set up transmit descriptor.
590                          */
591                         slen = (uint16_t) m_seg->data_len;
592                         buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
593                         txd->read.buffer_addr =
594                                 rte_cpu_to_le_64(buf_dma_addr);
595                         txd->read.cmd_type_len =
596                                 rte_cpu_to_le_32(cmd_type_len | slen);
597                         txd->read.olinfo_status =
598                                 rte_cpu_to_le_32(olinfo_status);
599                         txe->last_id = tx_last;
600                         tx_id = txe->next_id;
601                         txe = txn;
602                         m_seg = m_seg->next;
603                 } while (m_seg != NULL);
604
605                 /*
606                  * The last packet data descriptor needs End Of Packet (EOP)
607                  * and Report Status (RS).
608                  */
609                 txd->read.cmd_type_len |=
610                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
611         }
612  end_of_tx:
613         rte_wmb();
614
615         /*
616          * Set the Transmit Descriptor Tail (TDT).
617          */
618         E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
619         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
620                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
621                    (unsigned) tx_id, (unsigned) nb_tx);
622         txq->tx_tail = tx_id;
623
624         return nb_tx;
625 }
626
627 /*********************************************************************
628  *
629  *  TX prep functions
630  *
631  **********************************************************************/
632 uint16_t
633 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
634                 uint16_t nb_pkts)
635 {
636         int i, ret;
637         struct rte_mbuf *m;
638
639         for (i = 0; i < nb_pkts; i++) {
640                 m = tx_pkts[i];
641
642                 /* Check some limitations for TSO in hardware */
643                 if (m->ol_flags & PKT_TX_TCP_SEG)
644                         if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
645                                         (m->l2_len + m->l3_len + m->l4_len >
646                                         IGB_TSO_MAX_HDRLEN)) {
647                                 rte_errno = -EINVAL;
648                                 return i;
649                         }
650
651                 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
652                         rte_errno = -ENOTSUP;
653                         return i;
654                 }
655
656 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
657                 ret = rte_validate_tx_offload(m);
658                 if (ret != 0) {
659                         rte_errno = ret;
660                         return i;
661                 }
662 #endif
663                 ret = rte_net_intel_cksum_prepare(m);
664                 if (ret != 0) {
665                         rte_errno = ret;
666                         return i;
667                 }
668         }
669
670         return i;
671 }
672
673 /*********************************************************************
674  *
675  *  RX functions
676  *
677  **********************************************************************/
678 #define IGB_PACKET_TYPE_IPV4              0X01
679 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
680 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
681 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
682 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
683 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
684 #define IGB_PACKET_TYPE_IPV6              0X04
685 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
686 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
687 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
688 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
689 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
690 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
691 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
692 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
693 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
694 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
695 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
696 #define IGB_PACKET_TYPE_MAX               0X80
697 #define IGB_PACKET_TYPE_MASK              0X7F
698 #define IGB_PACKET_TYPE_SHIFT             0X04
699 static inline uint32_t
700 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
701 {
702         static const uint32_t
703                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
704                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
705                         RTE_PTYPE_L3_IPV4,
706                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
707                         RTE_PTYPE_L3_IPV4_EXT,
708                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
709                         RTE_PTYPE_L3_IPV6,
710                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
711                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
712                         RTE_PTYPE_INNER_L3_IPV6,
713                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
714                         RTE_PTYPE_L3_IPV6_EXT,
715                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
716                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
717                         RTE_PTYPE_INNER_L3_IPV6_EXT,
718                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
719                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
720                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
721                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
722                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
723                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
724                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
725                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
726                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
727                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
728                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
729                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
730                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
731                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
732                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
733                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
734                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
735                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
736                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
737                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
738                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
739                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
740                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
741                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
742                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
743                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
744                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
745                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
746         };
747         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
748                 return RTE_PTYPE_UNKNOWN;
749
750         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
751
752         return ptype_table[pkt_info];
753 }
754
755 static inline uint64_t
756 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
757 {
758         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
759
760 #if defined(RTE_LIBRTE_IEEE1588)
761         static uint32_t ip_pkt_etqf_map[8] = {
762                 0, 0, 0, PKT_RX_IEEE1588_PTP,
763                 0, 0, 0, 0,
764         };
765
766         struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
767         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
768
769         /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
770         if (hw->mac.type == e1000_i210)
771                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
772         else
773                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
774 #else
775         RTE_SET_USED(rxq);
776 #endif
777
778         return pkt_flags;
779 }
780
781 static inline uint64_t
782 rx_desc_status_to_pkt_flags(uint32_t rx_status)
783 {
784         uint64_t pkt_flags;
785
786         /* Check if VLAN present */
787         pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
788                 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
789
790 #if defined(RTE_LIBRTE_IEEE1588)
791         if (rx_status & E1000_RXD_STAT_TMST)
792                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
793 #endif
794         return pkt_flags;
795 }
796
797 static inline uint64_t
798 rx_desc_error_to_pkt_flags(uint32_t rx_status)
799 {
800         /*
801          * Bit 30: IPE, IPv4 checksum error
802          * Bit 29: L4I, L4I integrity error
803          */
804
805         static uint64_t error_to_pkt_flags_map[4] = {
806                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
807                 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
808                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
809                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
810         };
811         return error_to_pkt_flags_map[(rx_status >>
812                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
813 }
814
815 uint16_t
816 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
817                uint16_t nb_pkts)
818 {
819         struct igb_rx_queue *rxq;
820         volatile union e1000_adv_rx_desc *rx_ring;
821         volatile union e1000_adv_rx_desc *rxdp;
822         struct igb_rx_entry *sw_ring;
823         struct igb_rx_entry *rxe;
824         struct rte_mbuf *rxm;
825         struct rte_mbuf *nmb;
826         union e1000_adv_rx_desc rxd;
827         uint64_t dma_addr;
828         uint32_t staterr;
829         uint32_t hlen_type_rss;
830         uint16_t pkt_len;
831         uint16_t rx_id;
832         uint16_t nb_rx;
833         uint16_t nb_hold;
834         uint64_t pkt_flags;
835
836         nb_rx = 0;
837         nb_hold = 0;
838         rxq = rx_queue;
839         rx_id = rxq->rx_tail;
840         rx_ring = rxq->rx_ring;
841         sw_ring = rxq->sw_ring;
842         while (nb_rx < nb_pkts) {
843                 /*
844                  * The order of operations here is important as the DD status
845                  * bit must not be read after any other descriptor fields.
846                  * rx_ring and rxdp are pointing to volatile data so the order
847                  * of accesses cannot be reordered by the compiler. If they were
848                  * not volatile, they could be reordered which could lead to
849                  * using invalid descriptor fields when read from rxd.
850                  */
851                 rxdp = &rx_ring[rx_id];
852                 staterr = rxdp->wb.upper.status_error;
853                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
854                         break;
855                 rxd = *rxdp;
856
857                 /*
858                  * End of packet.
859                  *
860                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
861                  * likely to be invalid and to be dropped by the various
862                  * validation checks performed by the network stack.
863                  *
864                  * Allocate a new mbuf to replenish the RX ring descriptor.
865                  * If the allocation fails:
866                  *    - arrange for that RX descriptor to be the first one
867                  *      being parsed the next time the receive function is
868                  *      invoked [on the same queue].
869                  *
870                  *    - Stop parsing the RX ring and return immediately.
871                  *
872                  * This policy do not drop the packet received in the RX
873                  * descriptor for which the allocation of a new mbuf failed.
874                  * Thus, it allows that packet to be later retrieved if
875                  * mbuf have been freed in the mean time.
876                  * As a side effect, holding RX descriptors instead of
877                  * systematically giving them back to the NIC may lead to
878                  * RX ring exhaustion situations.
879                  * However, the NIC can gracefully prevent such situations
880                  * to happen by sending specific "back-pressure" flow control
881                  * frames to its peer(s).
882                  */
883                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
884                            "staterr=0x%x pkt_len=%u",
885                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
886                            (unsigned) rx_id, (unsigned) staterr,
887                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
888
889                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
890                 if (nmb == NULL) {
891                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
892                                    "queue_id=%u", (unsigned) rxq->port_id,
893                                    (unsigned) rxq->queue_id);
894                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
895                         break;
896                 }
897
898                 nb_hold++;
899                 rxe = &sw_ring[rx_id];
900                 rx_id++;
901                 if (rx_id == rxq->nb_rx_desc)
902                         rx_id = 0;
903
904                 /* Prefetch next mbuf while processing current one. */
905                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
906
907                 /*
908                  * When next RX descriptor is on a cache-line boundary,
909                  * prefetch the next 4 RX descriptors and the next 8 pointers
910                  * to mbufs.
911                  */
912                 if ((rx_id & 0x3) == 0) {
913                         rte_igb_prefetch(&rx_ring[rx_id]);
914                         rte_igb_prefetch(&sw_ring[rx_id]);
915                 }
916
917                 rxm = rxe->mbuf;
918                 rxe->mbuf = nmb;
919                 dma_addr =
920                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
921                 rxdp->read.hdr_addr = 0;
922                 rxdp->read.pkt_addr = dma_addr;
923
924                 /*
925                  * Initialize the returned mbuf.
926                  * 1) setup generic mbuf fields:
927                  *    - number of segments,
928                  *    - next segment,
929                  *    - packet length,
930                  *    - RX port identifier.
931                  * 2) integrate hardware offload data, if any:
932                  *    - RSS flag & hash,
933                  *    - IP checksum flag,
934                  *    - VLAN TCI, if any,
935                  *    - error flags.
936                  */
937                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
938                                       rxq->crc_len);
939                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
940                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
941                 rxm->nb_segs = 1;
942                 rxm->next = NULL;
943                 rxm->pkt_len = pkt_len;
944                 rxm->data_len = pkt_len;
945                 rxm->port = rxq->port_id;
946
947                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
948                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
949                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
950                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
951
952                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
953                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
954                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
955                 rxm->ol_flags = pkt_flags;
956                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
957                                                 lo_dword.hs_rss.pkt_info);
958
959                 /*
960                  * Store the mbuf address into the next entry of the array
961                  * of returned packets.
962                  */
963                 rx_pkts[nb_rx++] = rxm;
964         }
965         rxq->rx_tail = rx_id;
966
967         /*
968          * If the number of free RX descriptors is greater than the RX free
969          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
970          * register.
971          * Update the RDT with the value of the last processed RX descriptor
972          * minus 1, to guarantee that the RDT register is never equal to the
973          * RDH register, which creates a "full" ring situtation from the
974          * hardware point of view...
975          */
976         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
977         if (nb_hold > rxq->rx_free_thresh) {
978                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
979                            "nb_hold=%u nb_rx=%u",
980                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
981                            (unsigned) rx_id, (unsigned) nb_hold,
982                            (unsigned) nb_rx);
983                 rx_id = (uint16_t) ((rx_id == 0) ?
984                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
985                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
986                 nb_hold = 0;
987         }
988         rxq->nb_rx_hold = nb_hold;
989         return nb_rx;
990 }
991
992 uint16_t
993 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
994                          uint16_t nb_pkts)
995 {
996         struct igb_rx_queue *rxq;
997         volatile union e1000_adv_rx_desc *rx_ring;
998         volatile union e1000_adv_rx_desc *rxdp;
999         struct igb_rx_entry *sw_ring;
1000         struct igb_rx_entry *rxe;
1001         struct rte_mbuf *first_seg;
1002         struct rte_mbuf *last_seg;
1003         struct rte_mbuf *rxm;
1004         struct rte_mbuf *nmb;
1005         union e1000_adv_rx_desc rxd;
1006         uint64_t dma; /* Physical address of mbuf data buffer */
1007         uint32_t staterr;
1008         uint32_t hlen_type_rss;
1009         uint16_t rx_id;
1010         uint16_t nb_rx;
1011         uint16_t nb_hold;
1012         uint16_t data_len;
1013         uint64_t pkt_flags;
1014
1015         nb_rx = 0;
1016         nb_hold = 0;
1017         rxq = rx_queue;
1018         rx_id = rxq->rx_tail;
1019         rx_ring = rxq->rx_ring;
1020         sw_ring = rxq->sw_ring;
1021
1022         /*
1023          * Retrieve RX context of current packet, if any.
1024          */
1025         first_seg = rxq->pkt_first_seg;
1026         last_seg = rxq->pkt_last_seg;
1027
1028         while (nb_rx < nb_pkts) {
1029         next_desc:
1030                 /*
1031                  * The order of operations here is important as the DD status
1032                  * bit must not be read after any other descriptor fields.
1033                  * rx_ring and rxdp are pointing to volatile data so the order
1034                  * of accesses cannot be reordered by the compiler. If they were
1035                  * not volatile, they could be reordered which could lead to
1036                  * using invalid descriptor fields when read from rxd.
1037                  */
1038                 rxdp = &rx_ring[rx_id];
1039                 staterr = rxdp->wb.upper.status_error;
1040                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1041                         break;
1042                 rxd = *rxdp;
1043
1044                 /*
1045                  * Descriptor done.
1046                  *
1047                  * Allocate a new mbuf to replenish the RX ring descriptor.
1048                  * If the allocation fails:
1049                  *    - arrange for that RX descriptor to be the first one
1050                  *      being parsed the next time the receive function is
1051                  *      invoked [on the same queue].
1052                  *
1053                  *    - Stop parsing the RX ring and return immediately.
1054                  *
1055                  * This policy does not drop the packet received in the RX
1056                  * descriptor for which the allocation of a new mbuf failed.
1057                  * Thus, it allows that packet to be later retrieved if
1058                  * mbuf have been freed in the mean time.
1059                  * As a side effect, holding RX descriptors instead of
1060                  * systematically giving them back to the NIC may lead to
1061                  * RX ring exhaustion situations.
1062                  * However, the NIC can gracefully prevent such situations
1063                  * to happen by sending specific "back-pressure" flow control
1064                  * frames to its peer(s).
1065                  */
1066                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1067                            "staterr=0x%x data_len=%u",
1068                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1069                            (unsigned) rx_id, (unsigned) staterr,
1070                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1071
1072                 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1073                 if (nmb == NULL) {
1074                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1075                                    "queue_id=%u", (unsigned) rxq->port_id,
1076                                    (unsigned) rxq->queue_id);
1077                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1078                         break;
1079                 }
1080
1081                 nb_hold++;
1082                 rxe = &sw_ring[rx_id];
1083                 rx_id++;
1084                 if (rx_id == rxq->nb_rx_desc)
1085                         rx_id = 0;
1086
1087                 /* Prefetch next mbuf while processing current one. */
1088                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1089
1090                 /*
1091                  * When next RX descriptor is on a cache-line boundary,
1092                  * prefetch the next 4 RX descriptors and the next 8 pointers
1093                  * to mbufs.
1094                  */
1095                 if ((rx_id & 0x3) == 0) {
1096                         rte_igb_prefetch(&rx_ring[rx_id]);
1097                         rte_igb_prefetch(&sw_ring[rx_id]);
1098                 }
1099
1100                 /*
1101                  * Update RX descriptor with the physical address of the new
1102                  * data buffer of the new allocated mbuf.
1103                  */
1104                 rxm = rxe->mbuf;
1105                 rxe->mbuf = nmb;
1106                 dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1107                 rxdp->read.pkt_addr = dma;
1108                 rxdp->read.hdr_addr = 0;
1109
1110                 /*
1111                  * Set data length & data buffer address of mbuf.
1112                  */
1113                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1114                 rxm->data_len = data_len;
1115                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1116
1117                 /*
1118                  * If this is the first buffer of the received packet,
1119                  * set the pointer to the first mbuf of the packet and
1120                  * initialize its context.
1121                  * Otherwise, update the total length and the number of segments
1122                  * of the current scattered packet, and update the pointer to
1123                  * the last mbuf of the current packet.
1124                  */
1125                 if (first_seg == NULL) {
1126                         first_seg = rxm;
1127                         first_seg->pkt_len = data_len;
1128                         first_seg->nb_segs = 1;
1129                 } else {
1130                         first_seg->pkt_len += data_len;
1131                         first_seg->nb_segs++;
1132                         last_seg->next = rxm;
1133                 }
1134
1135                 /*
1136                  * If this is not the last buffer of the received packet,
1137                  * update the pointer to the last mbuf of the current scattered
1138                  * packet and continue to parse the RX ring.
1139                  */
1140                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1141                         last_seg = rxm;
1142                         goto next_desc;
1143                 }
1144
1145                 /*
1146                  * This is the last buffer of the received packet.
1147                  * If the CRC is not stripped by the hardware:
1148                  *   - Subtract the CRC length from the total packet length.
1149                  *   - If the last buffer only contains the whole CRC or a part
1150                  *     of it, free the mbuf associated to the last buffer.
1151                  *     If part of the CRC is also contained in the previous
1152                  *     mbuf, subtract the length of that CRC part from the
1153                  *     data length of the previous mbuf.
1154                  */
1155                 rxm->next = NULL;
1156                 if (unlikely(rxq->crc_len > 0)) {
1157                         first_seg->pkt_len -= ETHER_CRC_LEN;
1158                         if (data_len <= ETHER_CRC_LEN) {
1159                                 rte_pktmbuf_free_seg(rxm);
1160                                 first_seg->nb_segs--;
1161                                 last_seg->data_len = (uint16_t)
1162                                         (last_seg->data_len -
1163                                          (ETHER_CRC_LEN - data_len));
1164                                 last_seg->next = NULL;
1165                         } else
1166                                 rxm->data_len =
1167                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1168                 }
1169
1170                 /*
1171                  * Initialize the first mbuf of the returned packet:
1172                  *    - RX port identifier,
1173                  *    - hardware offload data, if any:
1174                  *      - RSS flag & hash,
1175                  *      - IP checksum flag,
1176                  *      - VLAN TCI, if any,
1177                  *      - error flags.
1178                  */
1179                 first_seg->port = rxq->port_id;
1180                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1181
1182                 /*
1183                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1184                  * set in the pkt_flags field.
1185                  */
1186                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1187                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1188                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1189                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1190                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1191                 first_seg->ol_flags = pkt_flags;
1192                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1193                                         lower.lo_dword.hs_rss.pkt_info);
1194
1195                 /* Prefetch data of first segment, if configured to do so. */
1196                 rte_packet_prefetch((char *)first_seg->buf_addr +
1197                         first_seg->data_off);
1198
1199                 /*
1200                  * Store the mbuf address into the next entry of the array
1201                  * of returned packets.
1202                  */
1203                 rx_pkts[nb_rx++] = first_seg;
1204
1205                 /*
1206                  * Setup receipt context for a new packet.
1207                  */
1208                 first_seg = NULL;
1209         }
1210
1211         /*
1212          * Record index of the next RX descriptor to probe.
1213          */
1214         rxq->rx_tail = rx_id;
1215
1216         /*
1217          * Save receive context.
1218          */
1219         rxq->pkt_first_seg = first_seg;
1220         rxq->pkt_last_seg = last_seg;
1221
1222         /*
1223          * If the number of free RX descriptors is greater than the RX free
1224          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1225          * register.
1226          * Update the RDT with the value of the last processed RX descriptor
1227          * minus 1, to guarantee that the RDT register is never equal to the
1228          * RDH register, which creates a "full" ring situtation from the
1229          * hardware point of view...
1230          */
1231         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1232         if (nb_hold > rxq->rx_free_thresh) {
1233                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1234                            "nb_hold=%u nb_rx=%u",
1235                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1236                            (unsigned) rx_id, (unsigned) nb_hold,
1237                            (unsigned) nb_rx);
1238                 rx_id = (uint16_t) ((rx_id == 0) ?
1239                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1240                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1241                 nb_hold = 0;
1242         }
1243         rxq->nb_rx_hold = nb_hold;
1244         return nb_rx;
1245 }
1246
1247 /*
1248  * Maximum number of Ring Descriptors.
1249  *
1250  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1251  * desscriptors should meet the following condition:
1252  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1253  */
1254
1255 static void
1256 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1257 {
1258         unsigned i;
1259
1260         if (txq->sw_ring != NULL) {
1261                 for (i = 0; i < txq->nb_tx_desc; i++) {
1262                         if (txq->sw_ring[i].mbuf != NULL) {
1263                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1264                                 txq->sw_ring[i].mbuf = NULL;
1265                         }
1266                 }
1267         }
1268 }
1269
1270 static void
1271 igb_tx_queue_release(struct igb_tx_queue *txq)
1272 {
1273         if (txq != NULL) {
1274                 igb_tx_queue_release_mbufs(txq);
1275                 rte_free(txq->sw_ring);
1276                 rte_free(txq);
1277         }
1278 }
1279
1280 void
1281 eth_igb_tx_queue_release(void *txq)
1282 {
1283         igb_tx_queue_release(txq);
1284 }
1285
1286 static int
1287 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1288 {
1289         struct igb_tx_entry *sw_ring;
1290         volatile union e1000_adv_tx_desc *txr;
1291         uint16_t tx_first; /* First segment analyzed. */
1292         uint16_t tx_id;    /* Current segment being processed. */
1293         uint16_t tx_last;  /* Last segment in the current packet. */
1294         uint16_t tx_next;  /* First segment of the next packet. */
1295         int count;
1296
1297         if (txq != NULL) {
1298                 count = 0;
1299                 sw_ring = txq->sw_ring;
1300                 txr = txq->tx_ring;
1301
1302                 /*
1303                  * tx_tail is the last sent packet on the sw_ring. Goto the end
1304                  * of that packet (the last segment in the packet chain) and
1305                  * then the next segment will be the start of the oldest segment
1306                  * in the sw_ring. This is the first packet that will be
1307                  * attempted to be freed.
1308                  */
1309
1310                 /* Get last segment in most recently added packet. */
1311                 tx_first = sw_ring[txq->tx_tail].last_id;
1312
1313                 /* Get the next segment, which is the oldest segment in ring. */
1314                 tx_first = sw_ring[tx_first].next_id;
1315
1316                 /* Set the current index to the first. */
1317                 tx_id = tx_first;
1318
1319                 /*
1320                  * Loop through each packet. For each packet, verify that an
1321                  * mbuf exists and that the last segment is free. If so, free
1322                  * it and move on.
1323                  */
1324                 while (1) {
1325                         tx_last = sw_ring[tx_id].last_id;
1326
1327                         if (sw_ring[tx_last].mbuf) {
1328                                 if (txr[tx_last].wb.status &
1329                                                 E1000_TXD_STAT_DD) {
1330                                         /*
1331                                          * Increment the number of packets
1332                                          * freed.
1333                                          */
1334                                         count++;
1335
1336                                         /* Get the start of the next packet. */
1337                                         tx_next = sw_ring[tx_last].next_id;
1338
1339                                         /*
1340                                          * Loop through all segments in a
1341                                          * packet.
1342                                          */
1343                                         do {
1344                                                 rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
1345                                                 sw_ring[tx_id].mbuf = NULL;
1346                                                 sw_ring[tx_id].last_id = tx_id;
1347
1348                                                 /* Move to next segemnt. */
1349                                                 tx_id = sw_ring[tx_id].next_id;
1350
1351                                         } while (tx_id != tx_next);
1352
1353                                         if (unlikely(count == (int)free_cnt))
1354                                                 break;
1355                                 } else
1356                                         /*
1357                                          * mbuf still in use, nothing left to
1358                                          * free.
1359                                          */
1360                                         break;
1361                         } else {
1362                                 /*
1363                                  * There are multiple reasons to be here:
1364                                  * 1) All the packets on the ring have been
1365                                  *    freed - tx_id is equal to tx_first
1366                                  *    and some packets have been freed.
1367                                  *    - Done, exit
1368                                  * 2) Interfaces has not sent a rings worth of
1369                                  *    packets yet, so the segment after tail is
1370                                  *    still empty. Or a previous call to this
1371                                  *    function freed some of the segments but
1372                                  *    not all so there is a hole in the list.
1373                                  *    Hopefully this is a rare case.
1374                                  *    - Walk the list and find the next mbuf. If
1375                                  *      there isn't one, then done.
1376                                  */
1377                                 if (likely((tx_id == tx_first) && (count != 0)))
1378                                         break;
1379
1380                                 /*
1381                                  * Walk the list and find the next mbuf, if any.
1382                                  */
1383                                 do {
1384                                         /* Move to next segemnt. */
1385                                         tx_id = sw_ring[tx_id].next_id;
1386
1387                                         if (sw_ring[tx_id].mbuf)
1388                                                 break;
1389
1390                                 } while (tx_id != tx_first);
1391
1392                                 /*
1393                                  * Determine why previous loop bailed. If there
1394                                  * is not an mbuf, done.
1395                                  */
1396                                 if (sw_ring[tx_id].mbuf == NULL)
1397                                         break;
1398                         }
1399                 }
1400         } else
1401                 count = -ENODEV;
1402
1403         return count;
1404 }
1405
1406 int
1407 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1408 {
1409         return igb_tx_done_cleanup(txq, free_cnt);
1410 }
1411
1412 static void
1413 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1414 {
1415         txq->tx_head = 0;
1416         txq->tx_tail = 0;
1417         txq->ctx_curr = 0;
1418         memset((void*)&txq->ctx_cache, 0,
1419                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1420 }
1421
1422 static void
1423 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1424 {
1425         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1426         struct igb_tx_entry *txe = txq->sw_ring;
1427         uint16_t i, prev;
1428         struct e1000_hw *hw;
1429
1430         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1431         /* Zero out HW ring memory */
1432         for (i = 0; i < txq->nb_tx_desc; i++) {
1433                 txq->tx_ring[i] = zeroed_desc;
1434         }
1435
1436         /* Initialize ring entries */
1437         prev = (uint16_t)(txq->nb_tx_desc - 1);
1438         for (i = 0; i < txq->nb_tx_desc; i++) {
1439                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1440
1441                 txd->wb.status = E1000_TXD_STAT_DD;
1442                 txe[i].mbuf = NULL;
1443                 txe[i].last_id = i;
1444                 txe[prev].next_id = i;
1445                 prev = i;
1446         }
1447
1448         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1449         /* 82575 specific, each tx queue will use 2 hw contexts */
1450         if (hw->mac.type == e1000_82575)
1451                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1452
1453         igb_reset_tx_queue_stat(txq);
1454 }
1455
1456 int
1457 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1458                          uint16_t queue_idx,
1459                          uint16_t nb_desc,
1460                          unsigned int socket_id,
1461                          const struct rte_eth_txconf *tx_conf)
1462 {
1463         const struct rte_memzone *tz;
1464         struct igb_tx_queue *txq;
1465         struct e1000_hw     *hw;
1466         uint32_t size;
1467
1468         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1469
1470         /*
1471          * Validate number of transmit descriptors.
1472          * It must not exceed hardware maximum, and must be multiple
1473          * of E1000_ALIGN.
1474          */
1475         if (nb_desc % IGB_TXD_ALIGN != 0 ||
1476                         (nb_desc > E1000_MAX_RING_DESC) ||
1477                         (nb_desc < E1000_MIN_RING_DESC)) {
1478                 return -EINVAL;
1479         }
1480
1481         /*
1482          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1483          * driver.
1484          */
1485         if (tx_conf->tx_free_thresh != 0)
1486                 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1487                              "used for the 1G driver.");
1488         if (tx_conf->tx_rs_thresh != 0)
1489                 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1490                              "used for the 1G driver.");
1491         if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1492                 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1493                              "consider setting the TX WTHRESH value to 4, 8, "
1494                              "or 16.");
1495
1496         /* Free memory prior to re-allocation if needed */
1497         if (dev->data->tx_queues[queue_idx] != NULL) {
1498                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1499                 dev->data->tx_queues[queue_idx] = NULL;
1500         }
1501
1502         /* First allocate the tx queue data structure */
1503         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1504                                                         RTE_CACHE_LINE_SIZE);
1505         if (txq == NULL)
1506                 return -ENOMEM;
1507
1508         /*
1509          * Allocate TX ring hardware descriptors. A memzone large enough to
1510          * handle the maximum ring size is allocated in order to allow for
1511          * resizing in later calls to the queue setup function.
1512          */
1513         size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1514         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1515                                       E1000_ALIGN, socket_id);
1516         if (tz == NULL) {
1517                 igb_tx_queue_release(txq);
1518                 return -ENOMEM;
1519         }
1520
1521         txq->nb_tx_desc = nb_desc;
1522         txq->pthresh = tx_conf->tx_thresh.pthresh;
1523         txq->hthresh = tx_conf->tx_thresh.hthresh;
1524         txq->wthresh = tx_conf->tx_thresh.wthresh;
1525         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1526                 txq->wthresh = 1;
1527         txq->queue_id = queue_idx;
1528         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1529                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1530         txq->port_id = dev->data->port_id;
1531
1532         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1533         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1534
1535         txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1536         /* Allocate software ring */
1537         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1538                                    sizeof(struct igb_tx_entry) * nb_desc,
1539                                    RTE_CACHE_LINE_SIZE);
1540         if (txq->sw_ring == NULL) {
1541                 igb_tx_queue_release(txq);
1542                 return -ENOMEM;
1543         }
1544         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1545                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1546
1547         igb_reset_tx_queue(txq, dev);
1548         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1549         dev->tx_pkt_prepare = &eth_igb_prep_pkts;
1550         dev->data->tx_queues[queue_idx] = txq;
1551
1552         return 0;
1553 }
1554
1555 static void
1556 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1557 {
1558         unsigned i;
1559
1560         if (rxq->sw_ring != NULL) {
1561                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1562                         if (rxq->sw_ring[i].mbuf != NULL) {
1563                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1564                                 rxq->sw_ring[i].mbuf = NULL;
1565                         }
1566                 }
1567         }
1568 }
1569
1570 static void
1571 igb_rx_queue_release(struct igb_rx_queue *rxq)
1572 {
1573         if (rxq != NULL) {
1574                 igb_rx_queue_release_mbufs(rxq);
1575                 rte_free(rxq->sw_ring);
1576                 rte_free(rxq);
1577         }
1578 }
1579
1580 void
1581 eth_igb_rx_queue_release(void *rxq)
1582 {
1583         igb_rx_queue_release(rxq);
1584 }
1585
1586 static void
1587 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1588 {
1589         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1590         unsigned i;
1591
1592         /* Zero out HW ring memory */
1593         for (i = 0; i < rxq->nb_rx_desc; i++) {
1594                 rxq->rx_ring[i] = zeroed_desc;
1595         }
1596
1597         rxq->rx_tail = 0;
1598         rxq->pkt_first_seg = NULL;
1599         rxq->pkt_last_seg = NULL;
1600 }
1601
1602 int
1603 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1604                          uint16_t queue_idx,
1605                          uint16_t nb_desc,
1606                          unsigned int socket_id,
1607                          const struct rte_eth_rxconf *rx_conf,
1608                          struct rte_mempool *mp)
1609 {
1610         const struct rte_memzone *rz;
1611         struct igb_rx_queue *rxq;
1612         struct e1000_hw     *hw;
1613         unsigned int size;
1614
1615         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1616
1617         /*
1618          * Validate number of receive descriptors.
1619          * It must not exceed hardware maximum, and must be multiple
1620          * of E1000_ALIGN.
1621          */
1622         if (nb_desc % IGB_RXD_ALIGN != 0 ||
1623                         (nb_desc > E1000_MAX_RING_DESC) ||
1624                         (nb_desc < E1000_MIN_RING_DESC)) {
1625                 return -EINVAL;
1626         }
1627
1628         /* Free memory prior to re-allocation if needed */
1629         if (dev->data->rx_queues[queue_idx] != NULL) {
1630                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1631                 dev->data->rx_queues[queue_idx] = NULL;
1632         }
1633
1634         /* First allocate the RX queue data structure. */
1635         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1636                           RTE_CACHE_LINE_SIZE);
1637         if (rxq == NULL)
1638                 return -ENOMEM;
1639         rxq->mb_pool = mp;
1640         rxq->nb_rx_desc = nb_desc;
1641         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1642         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1643         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1644         if (rxq->wthresh > 0 &&
1645             (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1646                 rxq->wthresh = 1;
1647         rxq->drop_en = rx_conf->rx_drop_en;
1648         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1649         rxq->queue_id = queue_idx;
1650         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1651                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1652         rxq->port_id = dev->data->port_id;
1653         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1654                                   ETHER_CRC_LEN);
1655
1656         /*
1657          *  Allocate RX ring hardware descriptors. A memzone large enough to
1658          *  handle the maximum ring size is allocated in order to allow for
1659          *  resizing in later calls to the queue setup function.
1660          */
1661         size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1662         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1663                                       E1000_ALIGN, socket_id);
1664         if (rz == NULL) {
1665                 igb_rx_queue_release(rxq);
1666                 return -ENOMEM;
1667         }
1668         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1669         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1670         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1671         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1672
1673         /* Allocate software ring. */
1674         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1675                                    sizeof(struct igb_rx_entry) * nb_desc,
1676                                    RTE_CACHE_LINE_SIZE);
1677         if (rxq->sw_ring == NULL) {
1678                 igb_rx_queue_release(rxq);
1679                 return -ENOMEM;
1680         }
1681         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1682                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1683
1684         dev->data->rx_queues[queue_idx] = rxq;
1685         igb_reset_rx_queue(rxq);
1686
1687         return 0;
1688 }
1689
1690 uint32_t
1691 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1692 {
1693 #define IGB_RXQ_SCAN_INTERVAL 4
1694         volatile union e1000_adv_rx_desc *rxdp;
1695         struct igb_rx_queue *rxq;
1696         uint32_t desc = 0;
1697
1698         rxq = dev->data->rx_queues[rx_queue_id];
1699         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1700
1701         while ((desc < rxq->nb_rx_desc) &&
1702                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1703                 desc += IGB_RXQ_SCAN_INTERVAL;
1704                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1705                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1706                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1707                                 desc - rxq->nb_rx_desc]);
1708         }
1709
1710         return desc;
1711 }
1712
1713 int
1714 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1715 {
1716         volatile union e1000_adv_rx_desc *rxdp;
1717         struct igb_rx_queue *rxq = rx_queue;
1718         uint32_t desc;
1719
1720         if (unlikely(offset >= rxq->nb_rx_desc))
1721                 return 0;
1722         desc = rxq->rx_tail + offset;
1723         if (desc >= rxq->nb_rx_desc)
1724                 desc -= rxq->nb_rx_desc;
1725
1726         rxdp = &rxq->rx_ring[desc];
1727         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1728 }
1729
1730 int
1731 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1732 {
1733         struct igb_rx_queue *rxq = rx_queue;
1734         volatile uint32_t *status;
1735         uint32_t desc;
1736
1737         if (unlikely(offset >= rxq->nb_rx_desc))
1738                 return -EINVAL;
1739
1740         if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1741                 return RTE_ETH_RX_DESC_UNAVAIL;
1742
1743         desc = rxq->rx_tail + offset;
1744         if (desc >= rxq->nb_rx_desc)
1745                 desc -= rxq->nb_rx_desc;
1746
1747         status = &rxq->rx_ring[desc].wb.upper.status_error;
1748         if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1749                 return RTE_ETH_RX_DESC_DONE;
1750
1751         return RTE_ETH_RX_DESC_AVAIL;
1752 }
1753
1754 int
1755 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1756 {
1757         struct igb_tx_queue *txq = tx_queue;
1758         volatile uint32_t *status;
1759         uint32_t desc;
1760
1761         if (unlikely(offset >= txq->nb_tx_desc))
1762                 return -EINVAL;
1763
1764         desc = txq->tx_tail + offset;
1765         if (desc >= txq->nb_tx_desc)
1766                 desc -= txq->nb_tx_desc;
1767
1768         status = &txq->tx_ring[desc].wb.status;
1769         if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1770                 return RTE_ETH_TX_DESC_DONE;
1771
1772         return RTE_ETH_TX_DESC_FULL;
1773 }
1774
1775 void
1776 igb_dev_clear_queues(struct rte_eth_dev *dev)
1777 {
1778         uint16_t i;
1779         struct igb_tx_queue *txq;
1780         struct igb_rx_queue *rxq;
1781
1782         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1783                 txq = dev->data->tx_queues[i];
1784                 if (txq != NULL) {
1785                         igb_tx_queue_release_mbufs(txq);
1786                         igb_reset_tx_queue(txq, dev);
1787                 }
1788         }
1789
1790         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1791                 rxq = dev->data->rx_queues[i];
1792                 if (rxq != NULL) {
1793                         igb_rx_queue_release_mbufs(rxq);
1794                         igb_reset_rx_queue(rxq);
1795                 }
1796         }
1797 }
1798
1799 void
1800 igb_dev_free_queues(struct rte_eth_dev *dev)
1801 {
1802         uint16_t i;
1803
1804         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1805                 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1806                 dev->data->rx_queues[i] = NULL;
1807         }
1808         dev->data->nb_rx_queues = 0;
1809
1810         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1811                 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1812                 dev->data->tx_queues[i] = NULL;
1813         }
1814         dev->data->nb_tx_queues = 0;
1815 }
1816
1817 /**
1818  * Receive Side Scaling (RSS).
1819  * See section 7.1.1.7 in the following document:
1820  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1821  *
1822  * Principles:
1823  * The source and destination IP addresses of the IP header and the source and
1824  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1825  * against a configurable random key to compute a 32-bit RSS hash result.
1826  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1827  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1828  * RSS output index which is used as the RX queue index where to store the
1829  * received packets.
1830  * The following output is supplied in the RX write-back descriptor:
1831  *     - 32-bit result of the Microsoft RSS hash function,
1832  *     - 4-bit RSS type field.
1833  */
1834
1835 /*
1836  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1837  * Used as the default key.
1838  */
1839 static uint8_t rss_intel_key[40] = {
1840         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1841         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1842         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1843         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1844         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1845 };
1846
1847 static void
1848 igb_rss_disable(struct rte_eth_dev *dev)
1849 {
1850         struct e1000_hw *hw;
1851         uint32_t mrqc;
1852
1853         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1854         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1855         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1856         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1857 }
1858
1859 static void
1860 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1861 {
1862         uint8_t  *hash_key;
1863         uint32_t rss_key;
1864         uint32_t mrqc;
1865         uint64_t rss_hf;
1866         uint16_t i;
1867
1868         hash_key = rss_conf->rss_key;
1869         if (hash_key != NULL) {
1870                 /* Fill in RSS hash key */
1871                 for (i = 0; i < 10; i++) {
1872                         rss_key  = hash_key[(i * 4)];
1873                         rss_key |= hash_key[(i * 4) + 1] << 8;
1874                         rss_key |= hash_key[(i * 4) + 2] << 16;
1875                         rss_key |= hash_key[(i * 4) + 3] << 24;
1876                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1877                 }
1878         }
1879
1880         /* Set configured hashing protocols in MRQC register */
1881         rss_hf = rss_conf->rss_hf;
1882         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1883         if (rss_hf & ETH_RSS_IPV4)
1884                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1885         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1886                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1887         if (rss_hf & ETH_RSS_IPV6)
1888                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1889         if (rss_hf & ETH_RSS_IPV6_EX)
1890                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1891         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1892                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1893         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1894                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1895         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1896                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1897         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1898                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1899         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1900                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1901         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1902 }
1903
1904 int
1905 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1906                         struct rte_eth_rss_conf *rss_conf)
1907 {
1908         struct e1000_hw *hw;
1909         uint32_t mrqc;
1910         uint64_t rss_hf;
1911
1912         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1913
1914         /*
1915          * Before changing anything, first check that the update RSS operation
1916          * does not attempt to disable RSS, if RSS was enabled at
1917          * initialization time, or does not attempt to enable RSS, if RSS was
1918          * disabled at initialization time.
1919          */
1920         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1921         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1922         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1923                 if (rss_hf != 0) /* Enable RSS */
1924                         return -(EINVAL);
1925                 return 0; /* Nothing to do */
1926         }
1927         /* RSS enabled */
1928         if (rss_hf == 0) /* Disable RSS */
1929                 return -(EINVAL);
1930         igb_hw_rss_hash_set(hw, rss_conf);
1931         return 0;
1932 }
1933
1934 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1935                               struct rte_eth_rss_conf *rss_conf)
1936 {
1937         struct e1000_hw *hw;
1938         uint8_t *hash_key;
1939         uint32_t rss_key;
1940         uint32_t mrqc;
1941         uint64_t rss_hf;
1942         uint16_t i;
1943
1944         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1945         hash_key = rss_conf->rss_key;
1946         if (hash_key != NULL) {
1947                 /* Return RSS hash key */
1948                 for (i = 0; i < 10; i++) {
1949                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1950                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1951                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1952                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1953                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1954                 }
1955         }
1956
1957         /* Get RSS functions configured in MRQC register */
1958         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1959         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1960                 rss_conf->rss_hf = 0;
1961                 return 0;
1962         }
1963         rss_hf = 0;
1964         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1965                 rss_hf |= ETH_RSS_IPV4;
1966         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1967                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1968         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1969                 rss_hf |= ETH_RSS_IPV6;
1970         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1971                 rss_hf |= ETH_RSS_IPV6_EX;
1972         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1973                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1974         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1975                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1976         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1977                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1978         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1979                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1980         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1981                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1982         rss_conf->rss_hf = rss_hf;
1983         return 0;
1984 }
1985
1986 static void
1987 igb_rss_configure(struct rte_eth_dev *dev)
1988 {
1989         struct rte_eth_rss_conf rss_conf;
1990         struct e1000_hw *hw;
1991         uint32_t shift;
1992         uint16_t i;
1993
1994         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1995
1996         /* Fill in redirection table. */
1997         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1998         for (i = 0; i < 128; i++) {
1999                 union e1000_reta {
2000                         uint32_t dword;
2001                         uint8_t  bytes[4];
2002                 } reta;
2003                 uint8_t q_idx;
2004
2005                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2006                                    i % dev->data->nb_rx_queues : 0);
2007                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2008                 if ((i & 3) == 3)
2009                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2010         }
2011
2012         /*
2013          * Configure the RSS key and the RSS protocols used to compute
2014          * the RSS hash of input packets.
2015          */
2016         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2017         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2018                 igb_rss_disable(dev);
2019                 return;
2020         }
2021         if (rss_conf.rss_key == NULL)
2022                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2023         igb_hw_rss_hash_set(hw, &rss_conf);
2024 }
2025
2026 /*
2027  * Check if the mac type support VMDq or not.
2028  * Return 1 if it supports, otherwise, return 0.
2029  */
2030 static int
2031 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2032 {
2033         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2034
2035         switch (hw->mac.type) {
2036         case e1000_82576:
2037         case e1000_82580:
2038         case e1000_i350:
2039                 return 1;
2040         case e1000_82540:
2041         case e1000_82541:
2042         case e1000_82542:
2043         case e1000_82543:
2044         case e1000_82544:
2045         case e1000_82545:
2046         case e1000_82546:
2047         case e1000_82547:
2048         case e1000_82571:
2049         case e1000_82572:
2050         case e1000_82573:
2051         case e1000_82574:
2052         case e1000_82583:
2053         case e1000_i210:
2054         case e1000_i211:
2055         default:
2056                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2057                 return 0;
2058         }
2059 }
2060
2061 static int
2062 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2063 {
2064         struct rte_eth_vmdq_rx_conf *cfg;
2065         struct e1000_hw *hw;
2066         uint32_t mrqc, vt_ctl, vmolr, rctl;
2067         int i;
2068
2069         PMD_INIT_FUNC_TRACE();
2070
2071         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2072         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2073
2074         /* Check if mac type can support VMDq, return value of 0 means NOT support */
2075         if (igb_is_vmdq_supported(dev) == 0)
2076                 return -1;
2077
2078         igb_rss_disable(dev);
2079
2080         /* RCTL: eanble VLAN filter */
2081         rctl = E1000_READ_REG(hw, E1000_RCTL);
2082         rctl |= E1000_RCTL_VFE;
2083         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2084
2085         /* MRQC: enable vmdq */
2086         mrqc = E1000_READ_REG(hw, E1000_MRQC);
2087         mrqc |= E1000_MRQC_ENABLE_VMDQ;
2088         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2089
2090         /* VTCTL:  pool selection according to VLAN tag */
2091         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2092         if (cfg->enable_default_pool)
2093                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2094         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2095         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2096
2097         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2098                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2099                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2100                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2101                         E1000_VMOLR_MPME);
2102
2103                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
2104                         vmolr |= E1000_VMOLR_AUPE;
2105                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
2106                         vmolr |= E1000_VMOLR_ROMPE;
2107                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
2108                         vmolr |= E1000_VMOLR_ROPE;
2109                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
2110                         vmolr |= E1000_VMOLR_BAM;
2111                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
2112                         vmolr |= E1000_VMOLR_MPME;
2113
2114                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2115         }
2116
2117         /*
2118          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2119          * Both 82576 and 82580 support it
2120          */
2121         if (hw->mac.type != e1000_i350) {
2122                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2123                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2124                         vmolr |= E1000_VMOLR_STRVLAN;
2125                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2126                 }
2127         }
2128
2129         /* VFTA - enable all vlan filters */
2130         for (i = 0; i < IGB_VFTA_SIZE; i++)
2131                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2132
2133         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2134         if (hw->mac.type != e1000_82580)
2135                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2136
2137         /*
2138          * RAH/RAL - allow pools to read specific mac addresses
2139          * In this case, all pools should be able to read from mac addr 0
2140          */
2141         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2142         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2143
2144         /* VLVF: set up filters for vlan tags as configured */
2145         for (i = 0; i < cfg->nb_pool_maps; i++) {
2146                 /* set vlan id in VF register and set the valid bit */
2147                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
2148                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
2149                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
2150                         E1000_VLVF_POOLSEL_MASK)));
2151         }
2152
2153         E1000_WRITE_FLUSH(hw);
2154
2155         return 0;
2156 }
2157
2158
2159 /*********************************************************************
2160  *
2161  *  Enable receive unit.
2162  *
2163  **********************************************************************/
2164
2165 static int
2166 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2167 {
2168         struct igb_rx_entry *rxe = rxq->sw_ring;
2169         uint64_t dma_addr;
2170         unsigned i;
2171
2172         /* Initialize software ring entries. */
2173         for (i = 0; i < rxq->nb_rx_desc; i++) {
2174                 volatile union e1000_adv_rx_desc *rxd;
2175                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2176
2177                 if (mbuf == NULL) {
2178                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2179                                      "queue_id=%hu", rxq->queue_id);
2180                         return -ENOMEM;
2181                 }
2182                 dma_addr =
2183                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
2184                 rxd = &rxq->rx_ring[i];
2185                 rxd->read.hdr_addr = 0;
2186                 rxd->read.pkt_addr = dma_addr;
2187                 rxe[i].mbuf = mbuf;
2188         }
2189
2190         return 0;
2191 }
2192
2193 #define E1000_MRQC_DEF_Q_SHIFT               (3)
2194 static int
2195 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2196 {
2197         struct e1000_hw *hw =
2198                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2199         uint32_t mrqc;
2200
2201         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2202                 /*
2203                  * SRIOV active scheme
2204                  * FIXME if support RSS together with VMDq & SRIOV
2205                  */
2206                 mrqc = E1000_MRQC_ENABLE_VMDQ;
2207                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2208                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2209                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2210         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2211                 /*
2212                  * SRIOV inactive scheme
2213                  */
2214                 switch (dev->data->dev_conf.rxmode.mq_mode) {
2215                         case ETH_MQ_RX_RSS:
2216                                 igb_rss_configure(dev);
2217                                 break;
2218                         case ETH_MQ_RX_VMDQ_ONLY:
2219                                 /*Configure general VMDQ only RX parameters*/
2220                                 igb_vmdq_rx_hw_configure(dev);
2221                                 break;
2222                         case ETH_MQ_RX_NONE:
2223                                 /* if mq_mode is none, disable rss mode.*/
2224                         default:
2225                                 igb_rss_disable(dev);
2226                                 break;
2227                 }
2228         }
2229
2230         return 0;
2231 }
2232
2233 int
2234 eth_igb_rx_init(struct rte_eth_dev *dev)
2235 {
2236         struct e1000_hw     *hw;
2237         struct igb_rx_queue *rxq;
2238         uint32_t rctl;
2239         uint32_t rxcsum;
2240         uint32_t srrctl;
2241         uint16_t buf_size;
2242         uint16_t rctl_bsize;
2243         uint16_t i;
2244         int ret;
2245
2246         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2247         srrctl = 0;
2248
2249         /*
2250          * Make sure receives are disabled while setting
2251          * up the descriptor ring.
2252          */
2253         rctl = E1000_READ_REG(hw, E1000_RCTL);
2254         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2255
2256         /*
2257          * Configure support of jumbo frames, if any.
2258          */
2259         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2260                 rctl |= E1000_RCTL_LPE;
2261
2262                 /*
2263                  * Set maximum packet length by default, and might be updated
2264                  * together with enabling/disabling dual VLAN.
2265                  */
2266                 E1000_WRITE_REG(hw, E1000_RLPML,
2267                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
2268                                                 VLAN_TAG_SIZE);
2269         } else
2270                 rctl &= ~E1000_RCTL_LPE;
2271
2272         /* Configure and enable each RX queue. */
2273         rctl_bsize = 0;
2274         dev->rx_pkt_burst = eth_igb_recv_pkts;
2275         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2276                 uint64_t bus_addr;
2277                 uint32_t rxdctl;
2278
2279                 rxq = dev->data->rx_queues[i];
2280
2281                 /* Allocate buffers for descriptor rings and set up queue */
2282                 ret = igb_alloc_rx_queue_mbufs(rxq);
2283                 if (ret)
2284                         return ret;
2285
2286                 /*
2287                  * Reset crc_len in case it was changed after queue setup by a
2288                  *  call to configure
2289                  */
2290                 rxq->crc_len =
2291                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2292                                                         0 : ETHER_CRC_LEN);
2293
2294                 bus_addr = rxq->rx_ring_phys_addr;
2295                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2296                                 rxq->nb_rx_desc *
2297                                 sizeof(union e1000_adv_rx_desc));
2298                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2299                                 (uint32_t)(bus_addr >> 32));
2300                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2301
2302                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2303
2304                 /*
2305                  * Configure RX buffer size.
2306                  */
2307                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2308                         RTE_PKTMBUF_HEADROOM);
2309                 if (buf_size >= 1024) {
2310                         /*
2311                          * Configure the BSIZEPACKET field of the SRRCTL
2312                          * register of the queue.
2313                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2314                          * If this field is equal to 0b, then RCTL.BSIZE
2315                          * determines the RX packet buffer size.
2316                          */
2317                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2318                                    E1000_SRRCTL_BSIZEPKT_MASK);
2319                         buf_size = (uint16_t) ((srrctl &
2320                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2321                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2322
2323                         /* It adds dual VLAN length for supporting dual VLAN */
2324                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2325                                                 2 * VLAN_TAG_SIZE) > buf_size){
2326                                 if (!dev->data->scattered_rx)
2327                                         PMD_INIT_LOG(DEBUG,
2328                                                      "forcing scatter mode");
2329                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2330                                 dev->data->scattered_rx = 1;
2331                         }
2332                 } else {
2333                         /*
2334                          * Use BSIZE field of the device RCTL register.
2335                          */
2336                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2337                                 rctl_bsize = buf_size;
2338                         if (!dev->data->scattered_rx)
2339                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2340                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2341                         dev->data->scattered_rx = 1;
2342                 }
2343
2344                 /* Set if packets are dropped when no descriptors available */
2345                 if (rxq->drop_en)
2346                         srrctl |= E1000_SRRCTL_DROP_EN;
2347
2348                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2349
2350                 /* Enable this RX queue. */
2351                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2352                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2353                 rxdctl &= 0xFFF00000;
2354                 rxdctl |= (rxq->pthresh & 0x1F);
2355                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2356                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2357                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2358         }
2359
2360         if (dev->data->dev_conf.rxmode.enable_scatter) {
2361                 if (!dev->data->scattered_rx)
2362                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2363                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2364                 dev->data->scattered_rx = 1;
2365         }
2366
2367         /*
2368          * Setup BSIZE field of RCTL register, if needed.
2369          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2370          * register, since the code above configures the SRRCTL register of
2371          * the RX queue in such a case.
2372          * All configurable sizes are:
2373          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2374          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2375          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2376          *  2048: rctl |= E1000_RCTL_SZ_2048;
2377          *  1024: rctl |= E1000_RCTL_SZ_1024;
2378          *   512: rctl |= E1000_RCTL_SZ_512;
2379          *   256: rctl |= E1000_RCTL_SZ_256;
2380          */
2381         if (rctl_bsize > 0) {
2382                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2383                         rctl |= E1000_RCTL_SZ_512;
2384                 else /* 256 <= buf_size < 512 - use 256 */
2385                         rctl |= E1000_RCTL_SZ_256;
2386         }
2387
2388         /*
2389          * Configure RSS if device configured with multiple RX queues.
2390          */
2391         igb_dev_mq_rx_configure(dev);
2392
2393         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2394         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2395
2396         /*
2397          * Setup the Checksum Register.
2398          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2399          */
2400         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2401         rxcsum |= E1000_RXCSUM_PCSD;
2402
2403         /* Enable both L3/L4 rx checksum offload */
2404         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2405                 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2406                                 E1000_RXCSUM_CRCOFL);
2407         else
2408                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2409                                 E1000_RXCSUM_CRCOFL);
2410         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2411
2412         /* Setup the Receive Control Register. */
2413         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2414                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2415
2416                 /* set STRCRC bit in all queues */
2417                 if (hw->mac.type == e1000_i350 ||
2418                     hw->mac.type == e1000_i210 ||
2419                     hw->mac.type == e1000_i211 ||
2420                     hw->mac.type == e1000_i354) {
2421                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2422                                 rxq = dev->data->rx_queues[i];
2423                                 uint32_t dvmolr = E1000_READ_REG(hw,
2424                                         E1000_DVMOLR(rxq->reg_idx));
2425                                 dvmolr |= E1000_DVMOLR_STRCRC;
2426                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2427                         }
2428                 }
2429         } else {
2430                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2431
2432                 /* clear STRCRC bit in all queues */
2433                 if (hw->mac.type == e1000_i350 ||
2434                     hw->mac.type == e1000_i210 ||
2435                     hw->mac.type == e1000_i211 ||
2436                     hw->mac.type == e1000_i354) {
2437                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2438                                 rxq = dev->data->rx_queues[i];
2439                                 uint32_t dvmolr = E1000_READ_REG(hw,
2440                                         E1000_DVMOLR(rxq->reg_idx));
2441                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2442                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2443                         }
2444                 }
2445         }
2446
2447         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2448         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2449                 E1000_RCTL_RDMTS_HALF |
2450                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2451
2452         /* Make sure VLAN Filters are off. */
2453         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2454                 rctl &= ~E1000_RCTL_VFE;
2455         /* Don't store bad packets. */
2456         rctl &= ~E1000_RCTL_SBP;
2457
2458         /* Enable Receives. */
2459         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2460
2461         /*
2462          * Setup the HW Rx Head and Tail Descriptor Pointers.
2463          * This needs to be done after enable.
2464          */
2465         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2466                 rxq = dev->data->rx_queues[i];
2467                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2468                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2469         }
2470
2471         return 0;
2472 }
2473
2474 /*********************************************************************
2475  *
2476  *  Enable transmit unit.
2477  *
2478  **********************************************************************/
2479 void
2480 eth_igb_tx_init(struct rte_eth_dev *dev)
2481 {
2482         struct e1000_hw     *hw;
2483         struct igb_tx_queue *txq;
2484         uint32_t tctl;
2485         uint32_t txdctl;
2486         uint16_t i;
2487
2488         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2489
2490         /* Setup the Base and Length of the Tx Descriptor Rings. */
2491         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2492                 uint64_t bus_addr;
2493                 txq = dev->data->tx_queues[i];
2494                 bus_addr = txq->tx_ring_phys_addr;
2495
2496                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2497                                 txq->nb_tx_desc *
2498                                 sizeof(union e1000_adv_tx_desc));
2499                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2500                                 (uint32_t)(bus_addr >> 32));
2501                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2502
2503                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2504                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2505                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2506
2507                 /* Setup Transmit threshold registers. */
2508                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2509                 txdctl |= txq->pthresh & 0x1F;
2510                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2511                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2512                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2513                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2514         }
2515
2516         /* Program the Transmit Control Register. */
2517         tctl = E1000_READ_REG(hw, E1000_TCTL);
2518         tctl &= ~E1000_TCTL_CT;
2519         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2520                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2521
2522         e1000_config_collision_dist(hw);
2523
2524         /* This write will effectively turn on the transmit unit. */
2525         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2526 }
2527
2528 /*********************************************************************
2529  *
2530  *  Enable VF receive unit.
2531  *
2532  **********************************************************************/
2533 int
2534 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2535 {
2536         struct e1000_hw     *hw;
2537         struct igb_rx_queue *rxq;
2538         uint32_t srrctl;
2539         uint16_t buf_size;
2540         uint16_t rctl_bsize;
2541         uint16_t i;
2542         int ret;
2543
2544         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2545
2546         /* setup MTU */
2547         e1000_rlpml_set_vf(hw,
2548                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2549                 VLAN_TAG_SIZE));
2550
2551         /* Configure and enable each RX queue. */
2552         rctl_bsize = 0;
2553         dev->rx_pkt_burst = eth_igb_recv_pkts;
2554         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2555                 uint64_t bus_addr;
2556                 uint32_t rxdctl;
2557
2558                 rxq = dev->data->rx_queues[i];
2559
2560                 /* Allocate buffers for descriptor rings and set up queue */
2561                 ret = igb_alloc_rx_queue_mbufs(rxq);
2562                 if (ret)
2563                         return ret;
2564
2565                 bus_addr = rxq->rx_ring_phys_addr;
2566                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2567                                 rxq->nb_rx_desc *
2568                                 sizeof(union e1000_adv_rx_desc));
2569                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2570                                 (uint32_t)(bus_addr >> 32));
2571                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2572
2573                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2574
2575                 /*
2576                  * Configure RX buffer size.
2577                  */
2578                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2579                         RTE_PKTMBUF_HEADROOM);
2580                 if (buf_size >= 1024) {
2581                         /*
2582                          * Configure the BSIZEPACKET field of the SRRCTL
2583                          * register of the queue.
2584                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2585                          * If this field is equal to 0b, then RCTL.BSIZE
2586                          * determines the RX packet buffer size.
2587                          */
2588                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2589                                    E1000_SRRCTL_BSIZEPKT_MASK);
2590                         buf_size = (uint16_t) ((srrctl &
2591                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2592                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2593
2594                         /* It adds dual VLAN length for supporting dual VLAN */
2595                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2596                                                 2 * VLAN_TAG_SIZE) > buf_size){
2597                                 if (!dev->data->scattered_rx)
2598                                         PMD_INIT_LOG(DEBUG,
2599                                                      "forcing scatter mode");
2600                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2601                                 dev->data->scattered_rx = 1;
2602                         }
2603                 } else {
2604                         /*
2605                          * Use BSIZE field of the device RCTL register.
2606                          */
2607                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2608                                 rctl_bsize = buf_size;
2609                         if (!dev->data->scattered_rx)
2610                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2611                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2612                         dev->data->scattered_rx = 1;
2613                 }
2614
2615                 /* Set if packets are dropped when no descriptors available */
2616                 if (rxq->drop_en)
2617                         srrctl |= E1000_SRRCTL_DROP_EN;
2618
2619                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2620
2621                 /* Enable this RX queue. */
2622                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2623                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2624                 rxdctl &= 0xFFF00000;
2625                 rxdctl |= (rxq->pthresh & 0x1F);
2626                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2627                 if (hw->mac.type == e1000_vfadapt) {
2628                         /*
2629                          * Workaround of 82576 VF Erratum
2630                          * force set WTHRESH to 1
2631                          * to avoid Write-Back not triggered sometimes
2632                          */
2633                         rxdctl |= 0x10000;
2634                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2635                 }
2636                 else
2637                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2638                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2639         }
2640
2641         if (dev->data->dev_conf.rxmode.enable_scatter) {
2642                 if (!dev->data->scattered_rx)
2643                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2644                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2645                 dev->data->scattered_rx = 1;
2646         }
2647
2648         /*
2649          * Setup the HW Rx Head and Tail Descriptor Pointers.
2650          * This needs to be done after enable.
2651          */
2652         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2653                 rxq = dev->data->rx_queues[i];
2654                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2655                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2656         }
2657
2658         return 0;
2659 }
2660
2661 /*********************************************************************
2662  *
2663  *  Enable VF transmit unit.
2664  *
2665  **********************************************************************/
2666 void
2667 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2668 {
2669         struct e1000_hw     *hw;
2670         struct igb_tx_queue *txq;
2671         uint32_t txdctl;
2672         uint16_t i;
2673
2674         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2675
2676         /* Setup the Base and Length of the Tx Descriptor Rings. */
2677         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2678                 uint64_t bus_addr;
2679
2680                 txq = dev->data->tx_queues[i];
2681                 bus_addr = txq->tx_ring_phys_addr;
2682                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2683                                 txq->nb_tx_desc *
2684                                 sizeof(union e1000_adv_tx_desc));
2685                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2686                                 (uint32_t)(bus_addr >> 32));
2687                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2688
2689                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2690                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2691                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2692
2693                 /* Setup Transmit threshold registers. */
2694                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2695                 txdctl |= txq->pthresh & 0x1F;
2696                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2697                 if (hw->mac.type == e1000_82576) {
2698                         /*
2699                          * Workaround of 82576 VF Erratum
2700                          * force set WTHRESH to 1
2701                          * to avoid Write-Back not triggered sometimes
2702                          */
2703                         txdctl |= 0x10000;
2704                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2705                 }
2706                 else
2707                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2708                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2709                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2710         }
2711
2712 }
2713
2714 void
2715 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2716         struct rte_eth_rxq_info *qinfo)
2717 {
2718         struct igb_rx_queue *rxq;
2719
2720         rxq = dev->data->rx_queues[queue_id];
2721
2722         qinfo->mp = rxq->mb_pool;
2723         qinfo->scattered_rx = dev->data->scattered_rx;
2724         qinfo->nb_desc = rxq->nb_rx_desc;
2725
2726         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2727         qinfo->conf.rx_drop_en = rxq->drop_en;
2728 }
2729
2730 void
2731 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2732         struct rte_eth_txq_info *qinfo)
2733 {
2734         struct igb_tx_queue *txq;
2735
2736         txq = dev->data->tx_queues[queue_id];
2737
2738         qinfo->nb_desc = txq->nb_tx_desc;
2739
2740         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2741         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2742         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2743 }