Imported Upstream version 16.04
[deb_dpdk.git] / drivers / net / e1000 / igb_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_ring.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
62 #include <rte_mbuf.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
66 #include <rte_udp.h>
67 #include <rte_tcp.h>
68 #include <rte_sctp.h>
69 #include <rte_string_fns.h>
70
71 #include "e1000_logs.h"
72 #include "base/e1000_api.h"
73 #include "e1000_ethdev.h"
74
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGB_TX_OFFLOAD_MASK (                    \
77                 PKT_TX_VLAN_PKT |                \
78                 PKT_TX_IP_CKSUM |                \
79                 PKT_TX_L4_MASK |                 \
80                 PKT_TX_TCP_SEG)
81
82 static inline struct rte_mbuf *
83 rte_rxmbuf_alloc(struct rte_mempool *mp)
84 {
85         struct rte_mbuf *m;
86
87         m = __rte_mbuf_raw_alloc(mp);
88         __rte_mbuf_sanity_check_raw(m, 0);
89         return m;
90 }
91
92 /**
93  * Structure associated with each descriptor of the RX ring of a RX queue.
94  */
95 struct igb_rx_entry {
96         struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
97 };
98
99 /**
100  * Structure associated with each descriptor of the TX ring of a TX queue.
101  */
102 struct igb_tx_entry {
103         struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
104         uint16_t next_id; /**< Index of next descriptor in ring. */
105         uint16_t last_id; /**< Index of last scattered descriptor. */
106 };
107
108 /**
109  * Structure associated with each RX queue.
110  */
111 struct igb_rx_queue {
112         struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
113         volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
114         uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
115         volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
116         volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
117         struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
118         struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
119         struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
120         uint16_t            nb_rx_desc; /**< number of RX descriptors. */
121         uint16_t            rx_tail;    /**< current value of RDT register. */
122         uint16_t            nb_rx_hold; /**< number of held free RX desc. */
123         uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
124         uint16_t            queue_id;   /**< RX queue index. */
125         uint16_t            reg_idx;    /**< RX queue register index. */
126         uint8_t             port_id;    /**< Device port identifier. */
127         uint8_t             pthresh;    /**< Prefetch threshold register. */
128         uint8_t             hthresh;    /**< Host threshold register. */
129         uint8_t             wthresh;    /**< Write-back threshold register. */
130         uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
131         uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
132 };
133
134 /**
135  * Hardware context number
136  */
137 enum igb_advctx_num {
138         IGB_CTX_0    = 0, /**< CTX0    */
139         IGB_CTX_1    = 1, /**< CTX1    */
140         IGB_CTX_NUM  = 2, /**< CTX_NUM */
141 };
142
143 /** Offload features */
144 union igb_tx_offload {
145         uint64_t data;
146         struct {
147                 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
148                 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
149                 uint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */
150                 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
151                 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
152
153                 /* uint64_t unused:8; */
154         };
155 };
156
157 /*
158  * Compare mask for igb_tx_offload.data,
159  * should be in sync with igb_tx_offload layout.
160  * */
161 #define TX_MACIP_LEN_CMP_MASK   0x000000000000FFFFULL /**< L2L3 header mask. */
162 #define TX_VLAN_CMP_MASK                0x00000000FFFF0000ULL /**< Vlan mask. */
163 #define TX_TCP_LEN_CMP_MASK             0x000000FF00000000ULL /**< TCP header mask. */
164 #define TX_TSO_MSS_CMP_MASK             0x00FFFF0000000000ULL /**< TSO segsz mask. */
165 /** Mac + IP + TCP + Mss mask. */
166 #define TX_TSO_CMP_MASK \
167         (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
168
169 /**
170  * Strucutre to check if new context need be built
171  */
172 struct igb_advctx_info {
173         uint64_t flags;           /**< ol_flags related to context build. */
174         /** tx offload: vlan, tso, l2-l3-l4 lengths. */
175         union igb_tx_offload tx_offload;
176         /** compare mask for tx offload. */
177         union igb_tx_offload tx_offload_mask;
178 };
179
180 /**
181  * Structure associated with each TX queue.
182  */
183 struct igb_tx_queue {
184         volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
185         uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
186         struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
187         volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
188         uint32_t               txd_type;      /**< Device-specific TXD type */
189         uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
190         uint16_t               tx_tail; /**< Current value of TDT register. */
191         uint16_t               tx_head;
192         /**< Index of first used TX descriptor. */
193         uint16_t               queue_id; /**< TX queue index. */
194         uint16_t               reg_idx;  /**< TX queue register index. */
195         uint8_t                port_id;  /**< Device port identifier. */
196         uint8_t                pthresh;  /**< Prefetch threshold register. */
197         uint8_t                hthresh;  /**< Host threshold register. */
198         uint8_t                wthresh;  /**< Write-back threshold register. */
199         uint32_t               ctx_curr;
200         /**< Current used hardware descriptor. */
201         uint32_t               ctx_start;
202         /**< Start context position for transmit queue. */
203         struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
204         /**< Hardware context history.*/
205 };
206
207 #if 1
208 #define RTE_PMD_USE_PREFETCH
209 #endif
210
211 #ifdef RTE_PMD_USE_PREFETCH
212 #define rte_igb_prefetch(p)     rte_prefetch0(p)
213 #else
214 #define rte_igb_prefetch(p)     do {} while(0)
215 #endif
216
217 #ifdef RTE_PMD_PACKET_PREFETCH
218 #define rte_packet_prefetch(p) rte_prefetch1(p)
219 #else
220 #define rte_packet_prefetch(p)  do {} while(0)
221 #endif
222
223 /*
224  * Macro for VMDq feature for 1 GbE NIC.
225  */
226 #define E1000_VMOLR_SIZE                        (8)
227 #define IGB_TSO_MAX_HDRLEN                      (512)
228 #define IGB_TSO_MAX_MSS                         (9216)
229
230 /*********************************************************************
231  *
232  *  TX function
233  *
234  **********************************************************************/
235
236 /*
237  *There're some limitations in hardware for TCP segmentation offload. We
238  *should check whether the parameters are valid.
239  */
240 static inline uint64_t
241 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
242 {
243         if (!(ol_req & PKT_TX_TCP_SEG))
244                 return ol_req;
245         if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
246                         ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
247                 ol_req &= ~PKT_TX_TCP_SEG;
248                 ol_req |= PKT_TX_TCP_CKSUM;
249         }
250         return ol_req;
251 }
252
253 /*
254  * Advanced context descriptor are almost same between igb/ixgbe
255  * This is a separate function, looking for optimization opportunity here
256  * Rework required to go with the pre-defined values.
257  */
258
259 static inline void
260 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
261                 volatile struct e1000_adv_tx_context_desc *ctx_txd,
262                 uint64_t ol_flags, union igb_tx_offload tx_offload)
263 {
264         uint32_t type_tucmd_mlhl;
265         uint32_t mss_l4len_idx;
266         uint32_t ctx_idx, ctx_curr;
267         uint32_t vlan_macip_lens;
268         union igb_tx_offload tx_offload_mask;
269
270         ctx_curr = txq->ctx_curr;
271         ctx_idx = ctx_curr + txq->ctx_start;
272
273         tx_offload_mask.data = 0;
274         type_tucmd_mlhl = 0;
275
276         /* Specify which HW CTX to upload. */
277         mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
278
279         if (ol_flags & PKT_TX_VLAN_PKT)
280                 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
281
282         /* check if TCP segmentation required for this packet */
283         if (ol_flags & PKT_TX_TCP_SEG) {
284                 /* implies IP cksum in IPv4 */
285                 if (ol_flags & PKT_TX_IP_CKSUM)
286                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
287                                 E1000_ADVTXD_TUCMD_L4T_TCP |
288                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
289                 else
290                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
291                                 E1000_ADVTXD_TUCMD_L4T_TCP |
292                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
293
294                 tx_offload_mask.data |= TX_TSO_CMP_MASK;
295                 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
296                 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
297         } else { /* no TSO, check if hardware checksum is needed */
298                 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
299                         tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
300
301                 if (ol_flags & PKT_TX_IP_CKSUM)
302                         type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
303
304                 switch (ol_flags & PKT_TX_L4_MASK) {
305                 case PKT_TX_UDP_CKSUM:
306                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
307                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
308                         mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
309                         break;
310                 case PKT_TX_TCP_CKSUM:
311                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
312                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
313                         mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
314                         break;
315                 case PKT_TX_SCTP_CKSUM:
316                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
317                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
318                         mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
319                         break;
320                 default:
321                         type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
322                                 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
323                         break;
324                 }
325         }
326
327         txq->ctx_cache[ctx_curr].flags = ol_flags;
328         txq->ctx_cache[ctx_curr].tx_offload.data =
329                 tx_offload_mask.data & tx_offload.data;
330         txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
331
332         ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
333         vlan_macip_lens = (uint32_t)tx_offload.data;
334         ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
335         ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
336         ctx_txd->seqnum_seed = 0;
337 }
338
339 /*
340  * Check which hardware context can be used. Use the existing match
341  * or create a new context descriptor.
342  */
343 static inline uint32_t
344 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
345                 union igb_tx_offload tx_offload)
346 {
347         /* If match with the current context */
348         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
349                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
350                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
351                         return txq->ctx_curr;
352         }
353
354         /* If match with the second context */
355         txq->ctx_curr ^= 1;
356         if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
357                 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
358                 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
359                         return txq->ctx_curr;
360         }
361
362         /* Mismatch, use the previous context */
363         return IGB_CTX_NUM;
364 }
365
366 static inline uint32_t
367 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
368 {
369         static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
370         static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
371         uint32_t tmp;
372
373         tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
374         tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
375         tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
376         return tmp;
377 }
378
379 static inline uint32_t
380 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
381 {
382         uint32_t cmdtype;
383         static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
384         static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
385         cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
386         cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
387         return cmdtype;
388 }
389
390 uint16_t
391 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
392                uint16_t nb_pkts)
393 {
394         struct igb_tx_queue *txq;
395         struct igb_tx_entry *sw_ring;
396         struct igb_tx_entry *txe, *txn;
397         volatile union e1000_adv_tx_desc *txr;
398         volatile union e1000_adv_tx_desc *txd;
399         struct rte_mbuf     *tx_pkt;
400         struct rte_mbuf     *m_seg;
401         uint64_t buf_dma_addr;
402         uint32_t olinfo_status;
403         uint32_t cmd_type_len;
404         uint32_t pkt_len;
405         uint16_t slen;
406         uint64_t ol_flags;
407         uint16_t tx_end;
408         uint16_t tx_id;
409         uint16_t tx_last;
410         uint16_t nb_tx;
411         uint64_t tx_ol_req;
412         uint32_t new_ctx = 0;
413         uint32_t ctx = 0;
414         union igb_tx_offload tx_offload = {0};
415
416         txq = tx_queue;
417         sw_ring = txq->sw_ring;
418         txr     = txq->tx_ring;
419         tx_id   = txq->tx_tail;
420         txe = &sw_ring[tx_id];
421
422         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
423                 tx_pkt = *tx_pkts++;
424                 pkt_len = tx_pkt->pkt_len;
425
426                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
427
428                 /*
429                  * The number of descriptors that must be allocated for a
430                  * packet is the number of segments of that packet, plus 1
431                  * Context Descriptor for the VLAN Tag Identifier, if any.
432                  * Determine the last TX descriptor to allocate in the TX ring
433                  * for the packet, starting from the current position (tx_id)
434                  * in the ring.
435                  */
436                 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
437
438                 ol_flags = tx_pkt->ol_flags;
439                 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
440
441                 /* If a Context Descriptor need be built . */
442                 if (tx_ol_req) {
443                         tx_offload.l2_len = tx_pkt->l2_len;
444                         tx_offload.l3_len = tx_pkt->l3_len;
445                         tx_offload.l4_len = tx_pkt->l4_len;
446                         tx_offload.vlan_tci = tx_pkt->vlan_tci;
447                         tx_offload.tso_segsz = tx_pkt->tso_segsz;
448                         tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
449
450                         ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
451                         /* Only allocate context descriptor if required*/
452                         new_ctx = (ctx == IGB_CTX_NUM);
453                         ctx = txq->ctx_curr + txq->ctx_start;
454                         tx_last = (uint16_t) (tx_last + new_ctx);
455                 }
456                 if (tx_last >= txq->nb_tx_desc)
457                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
458
459                 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
460                            " tx_first=%u tx_last=%u",
461                            (unsigned) txq->port_id,
462                            (unsigned) txq->queue_id,
463                            (unsigned) pkt_len,
464                            (unsigned) tx_id,
465                            (unsigned) tx_last);
466
467                 /*
468                  * Check if there are enough free descriptors in the TX ring
469                  * to transmit the next packet.
470                  * This operation is based on the two following rules:
471                  *
472                  *   1- Only check that the last needed TX descriptor can be
473                  *      allocated (by construction, if that descriptor is free,
474                  *      all intermediate ones are also free).
475                  *
476                  *      For this purpose, the index of the last TX descriptor
477                  *      used for a packet (the "last descriptor" of a packet)
478                  *      is recorded in the TX entries (the last one included)
479                  *      that are associated with all TX descriptors allocated
480                  *      for that packet.
481                  *
482                  *   2- Avoid to allocate the last free TX descriptor of the
483                  *      ring, in order to never set the TDT register with the
484                  *      same value stored in parallel by the NIC in the TDH
485                  *      register, which makes the TX engine of the NIC enter
486                  *      in a deadlock situation.
487                  *
488                  *      By extension, avoid to allocate a free descriptor that
489                  *      belongs to the last set of free descriptors allocated
490                  *      to the same packet previously transmitted.
491                  */
492
493                 /*
494                  * The "last descriptor" of the previously sent packet, if any,
495                  * which used the last descriptor to allocate.
496                  */
497                 tx_end = sw_ring[tx_last].last_id;
498
499                 /*
500                  * The next descriptor following that "last descriptor" in the
501                  * ring.
502                  */
503                 tx_end = sw_ring[tx_end].next_id;
504
505                 /*
506                  * The "last descriptor" associated with that next descriptor.
507                  */
508                 tx_end = sw_ring[tx_end].last_id;
509
510                 /*
511                  * Check that this descriptor is free.
512                  */
513                 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
514                         if (nb_tx == 0)
515                                 return 0;
516                         goto end_of_tx;
517                 }
518
519                 /*
520                  * Set common flags of all TX Data Descriptors.
521                  *
522                  * The following bits must be set in all Data Descriptors:
523                  *   - E1000_ADVTXD_DTYP_DATA
524                  *   - E1000_ADVTXD_DCMD_DEXT
525                  *
526                  * The following bits must be set in the first Data Descriptor
527                  * and are ignored in the other ones:
528                  *   - E1000_ADVTXD_DCMD_IFCS
529                  *   - E1000_ADVTXD_MAC_1588
530                  *   - E1000_ADVTXD_DCMD_VLE
531                  *
532                  * The following bits must only be set in the last Data
533                  * Descriptor:
534                  *   - E1000_TXD_CMD_EOP
535                  *
536                  * The following bits can be set in any Data Descriptor, but
537                  * are only set in the last Data Descriptor:
538                  *   - E1000_TXD_CMD_RS
539                  */
540                 cmd_type_len = txq->txd_type |
541                         E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
542                 if (tx_ol_req & PKT_TX_TCP_SEG)
543                         pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
544                 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
545 #if defined(RTE_LIBRTE_IEEE1588)
546                 if (ol_flags & PKT_TX_IEEE1588_TMST)
547                         cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
548 #endif
549                 if (tx_ol_req) {
550                         /* Setup TX Advanced context descriptor if required */
551                         if (new_ctx) {
552                                 volatile struct e1000_adv_tx_context_desc *
553                                     ctx_txd;
554
555                                 ctx_txd = (volatile struct
556                                     e1000_adv_tx_context_desc *)
557                                     &txr[tx_id];
558
559                                 txn = &sw_ring[txe->next_id];
560                                 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
561
562                                 if (txe->mbuf != NULL) {
563                                         rte_pktmbuf_free_seg(txe->mbuf);
564                                         txe->mbuf = NULL;
565                                 }
566
567                                 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
568
569                                 txe->last_id = tx_last;
570                                 tx_id = txe->next_id;
571                                 txe = txn;
572                         }
573
574                         /* Setup the TX Advanced Data Descriptor */
575                         cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
576                         olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
577                         olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
578                 }
579
580                 m_seg = tx_pkt;
581                 do {
582                         txn = &sw_ring[txe->next_id];
583                         txd = &txr[tx_id];
584
585                         if (txe->mbuf != NULL)
586                                 rte_pktmbuf_free_seg(txe->mbuf);
587                         txe->mbuf = m_seg;
588
589                         /*
590                          * Set up transmit descriptor.
591                          */
592                         slen = (uint16_t) m_seg->data_len;
593                         buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
594                         txd->read.buffer_addr =
595                                 rte_cpu_to_le_64(buf_dma_addr);
596                         txd->read.cmd_type_len =
597                                 rte_cpu_to_le_32(cmd_type_len | slen);
598                         txd->read.olinfo_status =
599                                 rte_cpu_to_le_32(olinfo_status);
600                         txe->last_id = tx_last;
601                         tx_id = txe->next_id;
602                         txe = txn;
603                         m_seg = m_seg->next;
604                 } while (m_seg != NULL);
605
606                 /*
607                  * The last packet data descriptor needs End Of Packet (EOP)
608                  * and Report Status (RS).
609                  */
610                 txd->read.cmd_type_len |=
611                         rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
612         }
613  end_of_tx:
614         rte_wmb();
615
616         /*
617          * Set the Transmit Descriptor Tail (TDT).
618          */
619         E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
620         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
621                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
622                    (unsigned) tx_id, (unsigned) nb_tx);
623         txq->tx_tail = tx_id;
624
625         return nb_tx;
626 }
627
628 /*********************************************************************
629  *
630  *  RX functions
631  *
632  **********************************************************************/
633 #define IGB_PACKET_TYPE_IPV4              0X01
634 #define IGB_PACKET_TYPE_IPV4_TCP          0X11
635 #define IGB_PACKET_TYPE_IPV4_UDP          0X21
636 #define IGB_PACKET_TYPE_IPV4_SCTP         0X41
637 #define IGB_PACKET_TYPE_IPV4_EXT          0X03
638 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP     0X43
639 #define IGB_PACKET_TYPE_IPV6              0X04
640 #define IGB_PACKET_TYPE_IPV6_TCP          0X14
641 #define IGB_PACKET_TYPE_IPV6_UDP          0X24
642 #define IGB_PACKET_TYPE_IPV6_EXT          0X0C
643 #define IGB_PACKET_TYPE_IPV6_EXT_TCP      0X1C
644 #define IGB_PACKET_TYPE_IPV6_EXT_UDP      0X2C
645 #define IGB_PACKET_TYPE_IPV4_IPV6         0X05
646 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP     0X15
647 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP     0X25
648 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT     0X0D
649 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
650 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
651 #define IGB_PACKET_TYPE_MAX               0X80
652 #define IGB_PACKET_TYPE_MASK              0X7F
653 #define IGB_PACKET_TYPE_SHIFT             0X04
654 static inline uint32_t
655 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
656 {
657         static const uint32_t
658                 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
659                 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
660                         RTE_PTYPE_L3_IPV4,
661                 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
662                         RTE_PTYPE_L3_IPV4_EXT,
663                 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
664                         RTE_PTYPE_L3_IPV6,
665                 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
666                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
667                         RTE_PTYPE_INNER_L3_IPV6,
668                 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
669                         RTE_PTYPE_L3_IPV6_EXT,
670                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
671                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
672                         RTE_PTYPE_INNER_L3_IPV6_EXT,
673                 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
674                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
675                 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
676                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
677                 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
678                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
679                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
680                 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
681                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
682                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
683                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
684                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
685                 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
686                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
687                 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
688                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
689                 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |
690                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
691                         RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
692                 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
693                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
694                 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
695                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
696                         RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
697                 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
698                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
699                 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
700                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
701         };
702         if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
703                 return RTE_PTYPE_UNKNOWN;
704
705         pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
706
707         return ptype_table[pkt_info];
708 }
709
710 static inline uint64_t
711 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
712 {
713         uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ?  0 : PKT_RX_RSS_HASH;
714
715 #if defined(RTE_LIBRTE_IEEE1588)
716         static uint32_t ip_pkt_etqf_map[8] = {
717                 0, 0, 0, PKT_RX_IEEE1588_PTP,
718                 0, 0, 0, 0,
719         };
720
721         struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
722         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
723
724         /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
725         if (hw->mac.type == e1000_i210)
726                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
727         else
728                 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
729 #else
730         RTE_SET_USED(rxq);
731 #endif
732
733         return pkt_flags;
734 }
735
736 static inline uint64_t
737 rx_desc_status_to_pkt_flags(uint32_t rx_status)
738 {
739         uint64_t pkt_flags;
740
741         /* Check if VLAN present */
742         pkt_flags = (rx_status & E1000_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
743
744 #if defined(RTE_LIBRTE_IEEE1588)
745         if (rx_status & E1000_RXD_STAT_TMST)
746                 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
747 #endif
748         return pkt_flags;
749 }
750
751 static inline uint64_t
752 rx_desc_error_to_pkt_flags(uint32_t rx_status)
753 {
754         /*
755          * Bit 30: IPE, IPv4 checksum error
756          * Bit 29: L4I, L4I integrity error
757          */
758
759         static uint64_t error_to_pkt_flags_map[4] = {
760                 0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
761                 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
762         };
763         return error_to_pkt_flags_map[(rx_status >>
764                 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
765 }
766
767 uint16_t
768 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
769                uint16_t nb_pkts)
770 {
771         struct igb_rx_queue *rxq;
772         volatile union e1000_adv_rx_desc *rx_ring;
773         volatile union e1000_adv_rx_desc *rxdp;
774         struct igb_rx_entry *sw_ring;
775         struct igb_rx_entry *rxe;
776         struct rte_mbuf *rxm;
777         struct rte_mbuf *nmb;
778         union e1000_adv_rx_desc rxd;
779         uint64_t dma_addr;
780         uint32_t staterr;
781         uint32_t hlen_type_rss;
782         uint16_t pkt_len;
783         uint16_t rx_id;
784         uint16_t nb_rx;
785         uint16_t nb_hold;
786         uint64_t pkt_flags;
787
788         nb_rx = 0;
789         nb_hold = 0;
790         rxq = rx_queue;
791         rx_id = rxq->rx_tail;
792         rx_ring = rxq->rx_ring;
793         sw_ring = rxq->sw_ring;
794         while (nb_rx < nb_pkts) {
795                 /*
796                  * The order of operations here is important as the DD status
797                  * bit must not be read after any other descriptor fields.
798                  * rx_ring and rxdp are pointing to volatile data so the order
799                  * of accesses cannot be reordered by the compiler. If they were
800                  * not volatile, they could be reordered which could lead to
801                  * using invalid descriptor fields when read from rxd.
802                  */
803                 rxdp = &rx_ring[rx_id];
804                 staterr = rxdp->wb.upper.status_error;
805                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
806                         break;
807                 rxd = *rxdp;
808
809                 /*
810                  * End of packet.
811                  *
812                  * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
813                  * likely to be invalid and to be dropped by the various
814                  * validation checks performed by the network stack.
815                  *
816                  * Allocate a new mbuf to replenish the RX ring descriptor.
817                  * If the allocation fails:
818                  *    - arrange for that RX descriptor to be the first one
819                  *      being parsed the next time the receive function is
820                  *      invoked [on the same queue].
821                  *
822                  *    - Stop parsing the RX ring and return immediately.
823                  *
824                  * This policy do not drop the packet received in the RX
825                  * descriptor for which the allocation of a new mbuf failed.
826                  * Thus, it allows that packet to be later retrieved if
827                  * mbuf have been freed in the mean time.
828                  * As a side effect, holding RX descriptors instead of
829                  * systematically giving them back to the NIC may lead to
830                  * RX ring exhaustion situations.
831                  * However, the NIC can gracefully prevent such situations
832                  * to happen by sending specific "back-pressure" flow control
833                  * frames to its peer(s).
834                  */
835                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
836                            "staterr=0x%x pkt_len=%u",
837                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
838                            (unsigned) rx_id, (unsigned) staterr,
839                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
840
841                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
842                 if (nmb == NULL) {
843                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
844                                    "queue_id=%u", (unsigned) rxq->port_id,
845                                    (unsigned) rxq->queue_id);
846                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
847                         break;
848                 }
849
850                 nb_hold++;
851                 rxe = &sw_ring[rx_id];
852                 rx_id++;
853                 if (rx_id == rxq->nb_rx_desc)
854                         rx_id = 0;
855
856                 /* Prefetch next mbuf while processing current one. */
857                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
858
859                 /*
860                  * When next RX descriptor is on a cache-line boundary,
861                  * prefetch the next 4 RX descriptors and the next 8 pointers
862                  * to mbufs.
863                  */
864                 if ((rx_id & 0x3) == 0) {
865                         rte_igb_prefetch(&rx_ring[rx_id]);
866                         rte_igb_prefetch(&sw_ring[rx_id]);
867                 }
868
869                 rxm = rxe->mbuf;
870                 rxe->mbuf = nmb;
871                 dma_addr =
872                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
873                 rxdp->read.hdr_addr = 0;
874                 rxdp->read.pkt_addr = dma_addr;
875
876                 /*
877                  * Initialize the returned mbuf.
878                  * 1) setup generic mbuf fields:
879                  *    - number of segments,
880                  *    - next segment,
881                  *    - packet length,
882                  *    - RX port identifier.
883                  * 2) integrate hardware offload data, if any:
884                  *    - RSS flag & hash,
885                  *    - IP checksum flag,
886                  *    - VLAN TCI, if any,
887                  *    - error flags.
888                  */
889                 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
890                                       rxq->crc_len);
891                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
892                 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
893                 rxm->nb_segs = 1;
894                 rxm->next = NULL;
895                 rxm->pkt_len = pkt_len;
896                 rxm->data_len = pkt_len;
897                 rxm->port = rxq->port_id;
898
899                 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
900                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
901                 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
902                 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
903
904                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
905                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
906                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
907                 rxm->ol_flags = pkt_flags;
908                 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
909                                                 lo_dword.hs_rss.pkt_info);
910
911                 /*
912                  * Store the mbuf address into the next entry of the array
913                  * of returned packets.
914                  */
915                 rx_pkts[nb_rx++] = rxm;
916         }
917         rxq->rx_tail = rx_id;
918
919         /*
920          * If the number of free RX descriptors is greater than the RX free
921          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
922          * register.
923          * Update the RDT with the value of the last processed RX descriptor
924          * minus 1, to guarantee that the RDT register is never equal to the
925          * RDH register, which creates a "full" ring situtation from the
926          * hardware point of view...
927          */
928         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
929         if (nb_hold > rxq->rx_free_thresh) {
930                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
931                            "nb_hold=%u nb_rx=%u",
932                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
933                            (unsigned) rx_id, (unsigned) nb_hold,
934                            (unsigned) nb_rx);
935                 rx_id = (uint16_t) ((rx_id == 0) ?
936                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
937                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
938                 nb_hold = 0;
939         }
940         rxq->nb_rx_hold = nb_hold;
941         return nb_rx;
942 }
943
944 uint16_t
945 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
946                          uint16_t nb_pkts)
947 {
948         struct igb_rx_queue *rxq;
949         volatile union e1000_adv_rx_desc *rx_ring;
950         volatile union e1000_adv_rx_desc *rxdp;
951         struct igb_rx_entry *sw_ring;
952         struct igb_rx_entry *rxe;
953         struct rte_mbuf *first_seg;
954         struct rte_mbuf *last_seg;
955         struct rte_mbuf *rxm;
956         struct rte_mbuf *nmb;
957         union e1000_adv_rx_desc rxd;
958         uint64_t dma; /* Physical address of mbuf data buffer */
959         uint32_t staterr;
960         uint32_t hlen_type_rss;
961         uint16_t rx_id;
962         uint16_t nb_rx;
963         uint16_t nb_hold;
964         uint16_t data_len;
965         uint64_t pkt_flags;
966
967         nb_rx = 0;
968         nb_hold = 0;
969         rxq = rx_queue;
970         rx_id = rxq->rx_tail;
971         rx_ring = rxq->rx_ring;
972         sw_ring = rxq->sw_ring;
973
974         /*
975          * Retrieve RX context of current packet, if any.
976          */
977         first_seg = rxq->pkt_first_seg;
978         last_seg = rxq->pkt_last_seg;
979
980         while (nb_rx < nb_pkts) {
981         next_desc:
982                 /*
983                  * The order of operations here is important as the DD status
984                  * bit must not be read after any other descriptor fields.
985                  * rx_ring and rxdp are pointing to volatile data so the order
986                  * of accesses cannot be reordered by the compiler. If they were
987                  * not volatile, they could be reordered which could lead to
988                  * using invalid descriptor fields when read from rxd.
989                  */
990                 rxdp = &rx_ring[rx_id];
991                 staterr = rxdp->wb.upper.status_error;
992                 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
993                         break;
994                 rxd = *rxdp;
995
996                 /*
997                  * Descriptor done.
998                  *
999                  * Allocate a new mbuf to replenish the RX ring descriptor.
1000                  * If the allocation fails:
1001                  *    - arrange for that RX descriptor to be the first one
1002                  *      being parsed the next time the receive function is
1003                  *      invoked [on the same queue].
1004                  *
1005                  *    - Stop parsing the RX ring and return immediately.
1006                  *
1007                  * This policy does not drop the packet received in the RX
1008                  * descriptor for which the allocation of a new mbuf failed.
1009                  * Thus, it allows that packet to be later retrieved if
1010                  * mbuf have been freed in the mean time.
1011                  * As a side effect, holding RX descriptors instead of
1012                  * systematically giving them back to the NIC may lead to
1013                  * RX ring exhaustion situations.
1014                  * However, the NIC can gracefully prevent such situations
1015                  * to happen by sending specific "back-pressure" flow control
1016                  * frames to its peer(s).
1017                  */
1018                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1019                            "staterr=0x%x data_len=%u",
1020                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1021                            (unsigned) rx_id, (unsigned) staterr,
1022                            (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1023
1024                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
1025                 if (nmb == NULL) {
1026                         PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1027                                    "queue_id=%u", (unsigned) rxq->port_id,
1028                                    (unsigned) rxq->queue_id);
1029                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1030                         break;
1031                 }
1032
1033                 nb_hold++;
1034                 rxe = &sw_ring[rx_id];
1035                 rx_id++;
1036                 if (rx_id == rxq->nb_rx_desc)
1037                         rx_id = 0;
1038
1039                 /* Prefetch next mbuf while processing current one. */
1040                 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1041
1042                 /*
1043                  * When next RX descriptor is on a cache-line boundary,
1044                  * prefetch the next 4 RX descriptors and the next 8 pointers
1045                  * to mbufs.
1046                  */
1047                 if ((rx_id & 0x3) == 0) {
1048                         rte_igb_prefetch(&rx_ring[rx_id]);
1049                         rte_igb_prefetch(&sw_ring[rx_id]);
1050                 }
1051
1052                 /*
1053                  * Update RX descriptor with the physical address of the new
1054                  * data buffer of the new allocated mbuf.
1055                  */
1056                 rxm = rxe->mbuf;
1057                 rxe->mbuf = nmb;
1058                 dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1059                 rxdp->read.pkt_addr = dma;
1060                 rxdp->read.hdr_addr = 0;
1061
1062                 /*
1063                  * Set data length & data buffer address of mbuf.
1064                  */
1065                 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1066                 rxm->data_len = data_len;
1067                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1068
1069                 /*
1070                  * If this is the first buffer of the received packet,
1071                  * set the pointer to the first mbuf of the packet and
1072                  * initialize its context.
1073                  * Otherwise, update the total length and the number of segments
1074                  * of the current scattered packet, and update the pointer to
1075                  * the last mbuf of the current packet.
1076                  */
1077                 if (first_seg == NULL) {
1078                         first_seg = rxm;
1079                         first_seg->pkt_len = data_len;
1080                         first_seg->nb_segs = 1;
1081                 } else {
1082                         first_seg->pkt_len += data_len;
1083                         first_seg->nb_segs++;
1084                         last_seg->next = rxm;
1085                 }
1086
1087                 /*
1088                  * If this is not the last buffer of the received packet,
1089                  * update the pointer to the last mbuf of the current scattered
1090                  * packet and continue to parse the RX ring.
1091                  */
1092                 if (! (staterr & E1000_RXD_STAT_EOP)) {
1093                         last_seg = rxm;
1094                         goto next_desc;
1095                 }
1096
1097                 /*
1098                  * This is the last buffer of the received packet.
1099                  * If the CRC is not stripped by the hardware:
1100                  *   - Subtract the CRC length from the total packet length.
1101                  *   - If the last buffer only contains the whole CRC or a part
1102                  *     of it, free the mbuf associated to the last buffer.
1103                  *     If part of the CRC is also contained in the previous
1104                  *     mbuf, subtract the length of that CRC part from the
1105                  *     data length of the previous mbuf.
1106                  */
1107                 rxm->next = NULL;
1108                 if (unlikely(rxq->crc_len > 0)) {
1109                         first_seg->pkt_len -= ETHER_CRC_LEN;
1110                         if (data_len <= ETHER_CRC_LEN) {
1111                                 rte_pktmbuf_free_seg(rxm);
1112                                 first_seg->nb_segs--;
1113                                 last_seg->data_len = (uint16_t)
1114                                         (last_seg->data_len -
1115                                          (ETHER_CRC_LEN - data_len));
1116                                 last_seg->next = NULL;
1117                         } else
1118                                 rxm->data_len =
1119                                         (uint16_t) (data_len - ETHER_CRC_LEN);
1120                 }
1121
1122                 /*
1123                  * Initialize the first mbuf of the returned packet:
1124                  *    - RX port identifier,
1125                  *    - hardware offload data, if any:
1126                  *      - RSS flag & hash,
1127                  *      - IP checksum flag,
1128                  *      - VLAN TCI, if any,
1129                  *      - error flags.
1130                  */
1131                 first_seg->port = rxq->port_id;
1132                 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1133
1134                 /*
1135                  * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1136                  * set in the pkt_flags field.
1137                  */
1138                 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1139                 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1140                 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1141                 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1142                 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1143                 first_seg->ol_flags = pkt_flags;
1144                 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1145                                         lower.lo_dword.hs_rss.pkt_info);
1146
1147                 /* Prefetch data of first segment, if configured to do so. */
1148                 rte_packet_prefetch((char *)first_seg->buf_addr +
1149                         first_seg->data_off);
1150
1151                 /*
1152                  * Store the mbuf address into the next entry of the array
1153                  * of returned packets.
1154                  */
1155                 rx_pkts[nb_rx++] = first_seg;
1156
1157                 /*
1158                  * Setup receipt context for a new packet.
1159                  */
1160                 first_seg = NULL;
1161         }
1162
1163         /*
1164          * Record index of the next RX descriptor to probe.
1165          */
1166         rxq->rx_tail = rx_id;
1167
1168         /*
1169          * Save receive context.
1170          */
1171         rxq->pkt_first_seg = first_seg;
1172         rxq->pkt_last_seg = last_seg;
1173
1174         /*
1175          * If the number of free RX descriptors is greater than the RX free
1176          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1177          * register.
1178          * Update the RDT with the value of the last processed RX descriptor
1179          * minus 1, to guarantee that the RDT register is never equal to the
1180          * RDH register, which creates a "full" ring situtation from the
1181          * hardware point of view...
1182          */
1183         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1184         if (nb_hold > rxq->rx_free_thresh) {
1185                 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1186                            "nb_hold=%u nb_rx=%u",
1187                            (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1188                            (unsigned) rx_id, (unsigned) nb_hold,
1189                            (unsigned) nb_rx);
1190                 rx_id = (uint16_t) ((rx_id == 0) ?
1191                                      (rxq->nb_rx_desc - 1) : (rx_id - 1));
1192                 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1193                 nb_hold = 0;
1194         }
1195         rxq->nb_rx_hold = nb_hold;
1196         return nb_rx;
1197 }
1198
1199 /*
1200  * Maximum number of Ring Descriptors.
1201  *
1202  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1203  * desscriptors should meet the following condition:
1204  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1205  */
1206
1207 static void
1208 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1209 {
1210         unsigned i;
1211
1212         if (txq->sw_ring != NULL) {
1213                 for (i = 0; i < txq->nb_tx_desc; i++) {
1214                         if (txq->sw_ring[i].mbuf != NULL) {
1215                                 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1216                                 txq->sw_ring[i].mbuf = NULL;
1217                         }
1218                 }
1219         }
1220 }
1221
1222 static void
1223 igb_tx_queue_release(struct igb_tx_queue *txq)
1224 {
1225         if (txq != NULL) {
1226                 igb_tx_queue_release_mbufs(txq);
1227                 rte_free(txq->sw_ring);
1228                 rte_free(txq);
1229         }
1230 }
1231
1232 void
1233 eth_igb_tx_queue_release(void *txq)
1234 {
1235         igb_tx_queue_release(txq);
1236 }
1237
1238 static void
1239 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1240 {
1241         txq->tx_head = 0;
1242         txq->tx_tail = 0;
1243         txq->ctx_curr = 0;
1244         memset((void*)&txq->ctx_cache, 0,
1245                 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1246 }
1247
1248 static void
1249 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1250 {
1251         static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1252         struct igb_tx_entry *txe = txq->sw_ring;
1253         uint16_t i, prev;
1254         struct e1000_hw *hw;
1255
1256         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1257         /* Zero out HW ring memory */
1258         for (i = 0; i < txq->nb_tx_desc; i++) {
1259                 txq->tx_ring[i] = zeroed_desc;
1260         }
1261
1262         /* Initialize ring entries */
1263         prev = (uint16_t)(txq->nb_tx_desc - 1);
1264         for (i = 0; i < txq->nb_tx_desc; i++) {
1265                 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1266
1267                 txd->wb.status = E1000_TXD_STAT_DD;
1268                 txe[i].mbuf = NULL;
1269                 txe[i].last_id = i;
1270                 txe[prev].next_id = i;
1271                 prev = i;
1272         }
1273
1274         txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1275         /* 82575 specific, each tx queue will use 2 hw contexts */
1276         if (hw->mac.type == e1000_82575)
1277                 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1278
1279         igb_reset_tx_queue_stat(txq);
1280 }
1281
1282 int
1283 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1284                          uint16_t queue_idx,
1285                          uint16_t nb_desc,
1286                          unsigned int socket_id,
1287                          const struct rte_eth_txconf *tx_conf)
1288 {
1289         const struct rte_memzone *tz;
1290         struct igb_tx_queue *txq;
1291         struct e1000_hw     *hw;
1292         uint32_t size;
1293
1294         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1295
1296         /*
1297          * Validate number of transmit descriptors.
1298          * It must not exceed hardware maximum, and must be multiple
1299          * of E1000_ALIGN.
1300          */
1301         if (nb_desc % IGB_TXD_ALIGN != 0 ||
1302                         (nb_desc > E1000_MAX_RING_DESC) ||
1303                         (nb_desc < E1000_MIN_RING_DESC)) {
1304                 return -EINVAL;
1305         }
1306
1307         /*
1308          * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1309          * driver.
1310          */
1311         if (tx_conf->tx_free_thresh != 0)
1312                 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1313                              "used for the 1G driver.");
1314         if (tx_conf->tx_rs_thresh != 0)
1315                 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1316                              "used for the 1G driver.");
1317         if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1318                 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1319                              "consider setting the TX WTHRESH value to 4, 8, "
1320                              "or 16.");
1321
1322         /* Free memory prior to re-allocation if needed */
1323         if (dev->data->tx_queues[queue_idx] != NULL) {
1324                 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1325                 dev->data->tx_queues[queue_idx] = NULL;
1326         }
1327
1328         /* First allocate the tx queue data structure */
1329         txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1330                                                         RTE_CACHE_LINE_SIZE);
1331         if (txq == NULL)
1332                 return -ENOMEM;
1333
1334         /*
1335          * Allocate TX ring hardware descriptors. A memzone large enough to
1336          * handle the maximum ring size is allocated in order to allow for
1337          * resizing in later calls to the queue setup function.
1338          */
1339         size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1340         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1341                                       E1000_ALIGN, socket_id);
1342         if (tz == NULL) {
1343                 igb_tx_queue_release(txq);
1344                 return -ENOMEM;
1345         }
1346
1347         txq->nb_tx_desc = nb_desc;
1348         txq->pthresh = tx_conf->tx_thresh.pthresh;
1349         txq->hthresh = tx_conf->tx_thresh.hthresh;
1350         txq->wthresh = tx_conf->tx_thresh.wthresh;
1351         if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1352                 txq->wthresh = 1;
1353         txq->queue_id = queue_idx;
1354         txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1355                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1356         txq->port_id = dev->data->port_id;
1357
1358         txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1359         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1360
1361         txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1362         /* Allocate software ring */
1363         txq->sw_ring = rte_zmalloc("txq->sw_ring",
1364                                    sizeof(struct igb_tx_entry) * nb_desc,
1365                                    RTE_CACHE_LINE_SIZE);
1366         if (txq->sw_ring == NULL) {
1367                 igb_tx_queue_release(txq);
1368                 return -ENOMEM;
1369         }
1370         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1371                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1372
1373         igb_reset_tx_queue(txq, dev);
1374         dev->tx_pkt_burst = eth_igb_xmit_pkts;
1375         dev->data->tx_queues[queue_idx] = txq;
1376
1377         return 0;
1378 }
1379
1380 static void
1381 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1382 {
1383         unsigned i;
1384
1385         if (rxq->sw_ring != NULL) {
1386                 for (i = 0; i < rxq->nb_rx_desc; i++) {
1387                         if (rxq->sw_ring[i].mbuf != NULL) {
1388                                 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1389                                 rxq->sw_ring[i].mbuf = NULL;
1390                         }
1391                 }
1392         }
1393 }
1394
1395 static void
1396 igb_rx_queue_release(struct igb_rx_queue *rxq)
1397 {
1398         if (rxq != NULL) {
1399                 igb_rx_queue_release_mbufs(rxq);
1400                 rte_free(rxq->sw_ring);
1401                 rte_free(rxq);
1402         }
1403 }
1404
1405 void
1406 eth_igb_rx_queue_release(void *rxq)
1407 {
1408         igb_rx_queue_release(rxq);
1409 }
1410
1411 static void
1412 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1413 {
1414         static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1415         unsigned i;
1416
1417         /* Zero out HW ring memory */
1418         for (i = 0; i < rxq->nb_rx_desc; i++) {
1419                 rxq->rx_ring[i] = zeroed_desc;
1420         }
1421
1422         rxq->rx_tail = 0;
1423         rxq->pkt_first_seg = NULL;
1424         rxq->pkt_last_seg = NULL;
1425 }
1426
1427 int
1428 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1429                          uint16_t queue_idx,
1430                          uint16_t nb_desc,
1431                          unsigned int socket_id,
1432                          const struct rte_eth_rxconf *rx_conf,
1433                          struct rte_mempool *mp)
1434 {
1435         const struct rte_memzone *rz;
1436         struct igb_rx_queue *rxq;
1437         struct e1000_hw     *hw;
1438         unsigned int size;
1439
1440         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1441
1442         /*
1443          * Validate number of receive descriptors.
1444          * It must not exceed hardware maximum, and must be multiple
1445          * of E1000_ALIGN.
1446          */
1447         if (nb_desc % IGB_RXD_ALIGN != 0 ||
1448                         (nb_desc > E1000_MAX_RING_DESC) ||
1449                         (nb_desc < E1000_MIN_RING_DESC)) {
1450                 return -EINVAL;
1451         }
1452
1453         /* Free memory prior to re-allocation if needed */
1454         if (dev->data->rx_queues[queue_idx] != NULL) {
1455                 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1456                 dev->data->rx_queues[queue_idx] = NULL;
1457         }
1458
1459         /* First allocate the RX queue data structure. */
1460         rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1461                           RTE_CACHE_LINE_SIZE);
1462         if (rxq == NULL)
1463                 return -ENOMEM;
1464         rxq->mb_pool = mp;
1465         rxq->nb_rx_desc = nb_desc;
1466         rxq->pthresh = rx_conf->rx_thresh.pthresh;
1467         rxq->hthresh = rx_conf->rx_thresh.hthresh;
1468         rxq->wthresh = rx_conf->rx_thresh.wthresh;
1469         if (rxq->wthresh > 0 &&
1470             (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1471                 rxq->wthresh = 1;
1472         rxq->drop_en = rx_conf->rx_drop_en;
1473         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1474         rxq->queue_id = queue_idx;
1475         rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1476                 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1477         rxq->port_id = dev->data->port_id;
1478         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1479                                   ETHER_CRC_LEN);
1480
1481         /*
1482          *  Allocate RX ring hardware descriptors. A memzone large enough to
1483          *  handle the maximum ring size is allocated in order to allow for
1484          *  resizing in later calls to the queue setup function.
1485          */
1486         size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1487         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1488                                       E1000_ALIGN, socket_id);
1489         if (rz == NULL) {
1490                 igb_rx_queue_release(rxq);
1491                 return -ENOMEM;
1492         }
1493         rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1494         rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1495         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1496         rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1497
1498         /* Allocate software ring. */
1499         rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1500                                    sizeof(struct igb_rx_entry) * nb_desc,
1501                                    RTE_CACHE_LINE_SIZE);
1502         if (rxq->sw_ring == NULL) {
1503                 igb_rx_queue_release(rxq);
1504                 return -ENOMEM;
1505         }
1506         PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1507                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1508
1509         dev->data->rx_queues[queue_idx] = rxq;
1510         igb_reset_rx_queue(rxq);
1511
1512         return 0;
1513 }
1514
1515 uint32_t
1516 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1517 {
1518 #define IGB_RXQ_SCAN_INTERVAL 4
1519         volatile union e1000_adv_rx_desc *rxdp;
1520         struct igb_rx_queue *rxq;
1521         uint32_t desc = 0;
1522
1523         if (rx_queue_id >= dev->data->nb_rx_queues) {
1524                 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1525                 return 0;
1526         }
1527
1528         rxq = dev->data->rx_queues[rx_queue_id];
1529         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1530
1531         while ((desc < rxq->nb_rx_desc) &&
1532                 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1533                 desc += IGB_RXQ_SCAN_INTERVAL;
1534                 rxdp += IGB_RXQ_SCAN_INTERVAL;
1535                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1536                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1537                                 desc - rxq->nb_rx_desc]);
1538         }
1539
1540         return 0;
1541 }
1542
1543 int
1544 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1545 {
1546         volatile union e1000_adv_rx_desc *rxdp;
1547         struct igb_rx_queue *rxq = rx_queue;
1548         uint32_t desc;
1549
1550         if (unlikely(offset >= rxq->nb_rx_desc))
1551                 return 0;
1552         desc = rxq->rx_tail + offset;
1553         if (desc >= rxq->nb_rx_desc)
1554                 desc -= rxq->nb_rx_desc;
1555
1556         rxdp = &rxq->rx_ring[desc];
1557         return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1558 }
1559
1560 void
1561 igb_dev_clear_queues(struct rte_eth_dev *dev)
1562 {
1563         uint16_t i;
1564         struct igb_tx_queue *txq;
1565         struct igb_rx_queue *rxq;
1566
1567         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1568                 txq = dev->data->tx_queues[i];
1569                 if (txq != NULL) {
1570                         igb_tx_queue_release_mbufs(txq);
1571                         igb_reset_tx_queue(txq, dev);
1572                 }
1573         }
1574
1575         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1576                 rxq = dev->data->rx_queues[i];
1577                 if (rxq != NULL) {
1578                         igb_rx_queue_release_mbufs(rxq);
1579                         igb_reset_rx_queue(rxq);
1580                 }
1581         }
1582 }
1583
1584 void
1585 igb_dev_free_queues(struct rte_eth_dev *dev)
1586 {
1587         uint16_t i;
1588
1589         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1590                 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1591                 dev->data->rx_queues[i] = NULL;
1592         }
1593         dev->data->nb_rx_queues = 0;
1594
1595         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1596                 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1597                 dev->data->tx_queues[i] = NULL;
1598         }
1599         dev->data->nb_tx_queues = 0;
1600 }
1601
1602 /**
1603  * Receive Side Scaling (RSS).
1604  * See section 7.1.1.7 in the following document:
1605  *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1606  *
1607  * Principles:
1608  * The source and destination IP addresses of the IP header and the source and
1609  * destination ports of TCP/UDP headers, if any, of received packets are hashed
1610  * against a configurable random key to compute a 32-bit RSS hash result.
1611  * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1612  * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
1613  * RSS output index which is used as the RX queue index where to store the
1614  * received packets.
1615  * The following output is supplied in the RX write-back descriptor:
1616  *     - 32-bit result of the Microsoft RSS hash function,
1617  *     - 4-bit RSS type field.
1618  */
1619
1620 /*
1621  * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1622  * Used as the default key.
1623  */
1624 static uint8_t rss_intel_key[40] = {
1625         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1626         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1627         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1628         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1629         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1630 };
1631
1632 static void
1633 igb_rss_disable(struct rte_eth_dev *dev)
1634 {
1635         struct e1000_hw *hw;
1636         uint32_t mrqc;
1637
1638         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1639         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1640         mrqc &= ~E1000_MRQC_ENABLE_MASK;
1641         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1642 }
1643
1644 static void
1645 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1646 {
1647         uint8_t  *hash_key;
1648         uint32_t rss_key;
1649         uint32_t mrqc;
1650         uint64_t rss_hf;
1651         uint16_t i;
1652
1653         hash_key = rss_conf->rss_key;
1654         if (hash_key != NULL) {
1655                 /* Fill in RSS hash key */
1656                 for (i = 0; i < 10; i++) {
1657                         rss_key  = hash_key[(i * 4)];
1658                         rss_key |= hash_key[(i * 4) + 1] << 8;
1659                         rss_key |= hash_key[(i * 4) + 2] << 16;
1660                         rss_key |= hash_key[(i * 4) + 3] << 24;
1661                         E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1662                 }
1663         }
1664
1665         /* Set configured hashing protocols in MRQC register */
1666         rss_hf = rss_conf->rss_hf;
1667         mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1668         if (rss_hf & ETH_RSS_IPV4)
1669                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1670         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1671                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1672         if (rss_hf & ETH_RSS_IPV6)
1673                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1674         if (rss_hf & ETH_RSS_IPV6_EX)
1675                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1676         if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1677                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1678         if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1679                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1680         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1681                 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1682         if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1683                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1684         if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1685                 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1686         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1687 }
1688
1689 int
1690 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1691                         struct rte_eth_rss_conf *rss_conf)
1692 {
1693         struct e1000_hw *hw;
1694         uint32_t mrqc;
1695         uint64_t rss_hf;
1696
1697         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1698
1699         /*
1700          * Before changing anything, first check that the update RSS operation
1701          * does not attempt to disable RSS, if RSS was enabled at
1702          * initialization time, or does not attempt to enable RSS, if RSS was
1703          * disabled at initialization time.
1704          */
1705         rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1706         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1707         if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1708                 if (rss_hf != 0) /* Enable RSS */
1709                         return -(EINVAL);
1710                 return 0; /* Nothing to do */
1711         }
1712         /* RSS enabled */
1713         if (rss_hf == 0) /* Disable RSS */
1714                 return -(EINVAL);
1715         igb_hw_rss_hash_set(hw, rss_conf);
1716         return 0;
1717 }
1718
1719 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1720                               struct rte_eth_rss_conf *rss_conf)
1721 {
1722         struct e1000_hw *hw;
1723         uint8_t *hash_key;
1724         uint32_t rss_key;
1725         uint32_t mrqc;
1726         uint64_t rss_hf;
1727         uint16_t i;
1728
1729         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1730         hash_key = rss_conf->rss_key;
1731         if (hash_key != NULL) {
1732                 /* Return RSS hash key */
1733                 for (i = 0; i < 10; i++) {
1734                         rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1735                         hash_key[(i * 4)] = rss_key & 0x000000FF;
1736                         hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1737                         hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1738                         hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1739                 }
1740         }
1741
1742         /* Get RSS functions configured in MRQC register */
1743         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1744         if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1745                 rss_conf->rss_hf = 0;
1746                 return 0;
1747         }
1748         rss_hf = 0;
1749         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1750                 rss_hf |= ETH_RSS_IPV4;
1751         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1752                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1753         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1754                 rss_hf |= ETH_RSS_IPV6;
1755         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1756                 rss_hf |= ETH_RSS_IPV6_EX;
1757         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1758                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1759         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1760                 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1761         if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1762                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1763         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1764                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1765         if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1766                 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1767         rss_conf->rss_hf = rss_hf;
1768         return 0;
1769 }
1770
1771 static void
1772 igb_rss_configure(struct rte_eth_dev *dev)
1773 {
1774         struct rte_eth_rss_conf rss_conf;
1775         struct e1000_hw *hw;
1776         uint32_t shift;
1777         uint16_t i;
1778
1779         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1780
1781         /* Fill in redirection table. */
1782         shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1783         for (i = 0; i < 128; i++) {
1784                 union e1000_reta {
1785                         uint32_t dword;
1786                         uint8_t  bytes[4];
1787                 } reta;
1788                 uint8_t q_idx;
1789
1790                 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1791                                    i % dev->data->nb_rx_queues : 0);
1792                 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1793                 if ((i & 3) == 3)
1794                         E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1795         }
1796
1797         /*
1798          * Configure the RSS key and the RSS protocols used to compute
1799          * the RSS hash of input packets.
1800          */
1801         rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1802         if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1803                 igb_rss_disable(dev);
1804                 return;
1805         }
1806         if (rss_conf.rss_key == NULL)
1807                 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1808         igb_hw_rss_hash_set(hw, &rss_conf);
1809 }
1810
1811 /*
1812  * Check if the mac type support VMDq or not.
1813  * Return 1 if it supports, otherwise, return 0.
1814  */
1815 static int
1816 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1817 {
1818         const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1819
1820         switch (hw->mac.type) {
1821         case e1000_82576:
1822         case e1000_82580:
1823         case e1000_i350:
1824                 return 1;
1825         case e1000_82540:
1826         case e1000_82541:
1827         case e1000_82542:
1828         case e1000_82543:
1829         case e1000_82544:
1830         case e1000_82545:
1831         case e1000_82546:
1832         case e1000_82547:
1833         case e1000_82571:
1834         case e1000_82572:
1835         case e1000_82573:
1836         case e1000_82574:
1837         case e1000_82583:
1838         case e1000_i210:
1839         case e1000_i211:
1840         default:
1841                 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1842                 return 0;
1843         }
1844 }
1845
1846 static int
1847 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1848 {
1849         struct rte_eth_vmdq_rx_conf *cfg;
1850         struct e1000_hw *hw;
1851         uint32_t mrqc, vt_ctl, vmolr, rctl;
1852         int i;
1853
1854         PMD_INIT_FUNC_TRACE();
1855
1856         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1857         cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1858
1859         /* Check if mac type can support VMDq, return value of 0 means NOT support */
1860         if (igb_is_vmdq_supported(dev) == 0)
1861                 return -1;
1862
1863         igb_rss_disable(dev);
1864
1865         /* RCTL: eanble VLAN filter */
1866         rctl = E1000_READ_REG(hw, E1000_RCTL);
1867         rctl |= E1000_RCTL_VFE;
1868         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1869
1870         /* MRQC: enable vmdq */
1871         mrqc = E1000_READ_REG(hw, E1000_MRQC);
1872         mrqc |= E1000_MRQC_ENABLE_VMDQ;
1873         E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1874
1875         /* VTCTL:  pool selection according to VLAN tag */
1876         vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1877         if (cfg->enable_default_pool)
1878                 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1879         vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1880         E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1881
1882         for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1883                 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1884                 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1885                         E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1886                         E1000_VMOLR_MPME);
1887
1888                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1889                         vmolr |= E1000_VMOLR_AUPE;
1890                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1891                         vmolr |= E1000_VMOLR_ROMPE;
1892                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1893                         vmolr |= E1000_VMOLR_ROPE;
1894                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1895                         vmolr |= E1000_VMOLR_BAM;
1896                 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1897                         vmolr |= E1000_VMOLR_MPME;
1898
1899                 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1900         }
1901
1902         /*
1903          * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1904          * Both 82576 and 82580 support it
1905          */
1906         if (hw->mac.type != e1000_i350) {
1907                 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1908                         vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1909                         vmolr |= E1000_VMOLR_STRVLAN;
1910                         E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1911                 }
1912         }
1913
1914         /* VFTA - enable all vlan filters */
1915         for (i = 0; i < IGB_VFTA_SIZE; i++)
1916                 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1917
1918         /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1919         if (hw->mac.type != e1000_82580)
1920                 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1921
1922         /*
1923          * RAH/RAL - allow pools to read specific mac addresses
1924          * In this case, all pools should be able to read from mac addr 0
1925          */
1926         E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1927         E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1928
1929         /* VLVF: set up filters for vlan tags as configured */
1930         for (i = 0; i < cfg->nb_pool_maps; i++) {
1931                 /* set vlan id in VF register and set the valid bit */
1932                 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1933                         (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1934                         ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1935                         E1000_VLVF_POOLSEL_MASK)));
1936         }
1937
1938         E1000_WRITE_FLUSH(hw);
1939
1940         return 0;
1941 }
1942
1943
1944 /*********************************************************************
1945  *
1946  *  Enable receive unit.
1947  *
1948  **********************************************************************/
1949
1950 static int
1951 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1952 {
1953         struct igb_rx_entry *rxe = rxq->sw_ring;
1954         uint64_t dma_addr;
1955         unsigned i;
1956
1957         /* Initialize software ring entries. */
1958         for (i = 0; i < rxq->nb_rx_desc; i++) {
1959                 volatile union e1000_adv_rx_desc *rxd;
1960                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1961
1962                 if (mbuf == NULL) {
1963                         PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1964                                      "queue_id=%hu", rxq->queue_id);
1965                         return -ENOMEM;
1966                 }
1967                 dma_addr =
1968                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
1969                 rxd = &rxq->rx_ring[i];
1970                 rxd->read.hdr_addr = 0;
1971                 rxd->read.pkt_addr = dma_addr;
1972                 rxe[i].mbuf = mbuf;
1973         }
1974
1975         return 0;
1976 }
1977
1978 #define E1000_MRQC_DEF_Q_SHIFT               (3)
1979 static int
1980 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1981 {
1982         struct e1000_hw *hw =
1983                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1984         uint32_t mrqc;
1985
1986         if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1987                 /*
1988                  * SRIOV active scheme
1989                  * FIXME if support RSS together with VMDq & SRIOV
1990                  */
1991                 mrqc = E1000_MRQC_ENABLE_VMDQ;
1992                 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1993                 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1994                 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1995         } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1996                 /*
1997                  * SRIOV inactive scheme
1998                  */
1999                 switch (dev->data->dev_conf.rxmode.mq_mode) {
2000                         case ETH_MQ_RX_RSS:
2001                                 igb_rss_configure(dev);
2002                                 break;
2003                         case ETH_MQ_RX_VMDQ_ONLY:
2004                                 /*Configure general VMDQ only RX parameters*/
2005                                 igb_vmdq_rx_hw_configure(dev);
2006                                 break;
2007                         case ETH_MQ_RX_NONE:
2008                                 /* if mq_mode is none, disable rss mode.*/
2009                         default:
2010                                 igb_rss_disable(dev);
2011                                 break;
2012                 }
2013         }
2014
2015         return 0;
2016 }
2017
2018 int
2019 eth_igb_rx_init(struct rte_eth_dev *dev)
2020 {
2021         struct e1000_hw     *hw;
2022         struct igb_rx_queue *rxq;
2023         uint32_t rctl;
2024         uint32_t rxcsum;
2025         uint32_t srrctl;
2026         uint16_t buf_size;
2027         uint16_t rctl_bsize;
2028         uint16_t i;
2029         int ret;
2030
2031         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2032         srrctl = 0;
2033
2034         /*
2035          * Make sure receives are disabled while setting
2036          * up the descriptor ring.
2037          */
2038         rctl = E1000_READ_REG(hw, E1000_RCTL);
2039         E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2040
2041         /*
2042          * Configure support of jumbo frames, if any.
2043          */
2044         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2045                 rctl |= E1000_RCTL_LPE;
2046
2047                 /*
2048                  * Set maximum packet length by default, and might be updated
2049                  * together with enabling/disabling dual VLAN.
2050                  */
2051                 E1000_WRITE_REG(hw, E1000_RLPML,
2052                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
2053                                                 VLAN_TAG_SIZE);
2054         } else
2055                 rctl &= ~E1000_RCTL_LPE;
2056
2057         /* Configure and enable each RX queue. */
2058         rctl_bsize = 0;
2059         dev->rx_pkt_burst = eth_igb_recv_pkts;
2060         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2061                 uint64_t bus_addr;
2062                 uint32_t rxdctl;
2063
2064                 rxq = dev->data->rx_queues[i];
2065
2066                 /* Allocate buffers for descriptor rings and set up queue */
2067                 ret = igb_alloc_rx_queue_mbufs(rxq);
2068                 if (ret)
2069                         return ret;
2070
2071                 /*
2072                  * Reset crc_len in case it was changed after queue setup by a
2073                  *  call to configure
2074                  */
2075                 rxq->crc_len =
2076                         (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2077                                                         0 : ETHER_CRC_LEN);
2078
2079                 bus_addr = rxq->rx_ring_phys_addr;
2080                 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2081                                 rxq->nb_rx_desc *
2082                                 sizeof(union e1000_adv_rx_desc));
2083                 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2084                                 (uint32_t)(bus_addr >> 32));
2085                 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2086
2087                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2088
2089                 /*
2090                  * Configure RX buffer size.
2091                  */
2092                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2093                         RTE_PKTMBUF_HEADROOM);
2094                 if (buf_size >= 1024) {
2095                         /*
2096                          * Configure the BSIZEPACKET field of the SRRCTL
2097                          * register of the queue.
2098                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2099                          * If this field is equal to 0b, then RCTL.BSIZE
2100                          * determines the RX packet buffer size.
2101                          */
2102                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2103                                    E1000_SRRCTL_BSIZEPKT_MASK);
2104                         buf_size = (uint16_t) ((srrctl &
2105                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2106                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2107
2108                         /* It adds dual VLAN length for supporting dual VLAN */
2109                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2110                                                 2 * VLAN_TAG_SIZE) > buf_size){
2111                                 if (!dev->data->scattered_rx)
2112                                         PMD_INIT_LOG(DEBUG,
2113                                                      "forcing scatter mode");
2114                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2115                                 dev->data->scattered_rx = 1;
2116                         }
2117                 } else {
2118                         /*
2119                          * Use BSIZE field of the device RCTL register.
2120                          */
2121                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2122                                 rctl_bsize = buf_size;
2123                         if (!dev->data->scattered_rx)
2124                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2125                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2126                         dev->data->scattered_rx = 1;
2127                 }
2128
2129                 /* Set if packets are dropped when no descriptors available */
2130                 if (rxq->drop_en)
2131                         srrctl |= E1000_SRRCTL_DROP_EN;
2132
2133                 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2134
2135                 /* Enable this RX queue. */
2136                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2137                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2138                 rxdctl &= 0xFFF00000;
2139                 rxdctl |= (rxq->pthresh & 0x1F);
2140                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2141                 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2142                 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2143         }
2144
2145         if (dev->data->dev_conf.rxmode.enable_scatter) {
2146                 if (!dev->data->scattered_rx)
2147                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2148                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2149                 dev->data->scattered_rx = 1;
2150         }
2151
2152         /*
2153          * Setup BSIZE field of RCTL register, if needed.
2154          * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2155          * register, since the code above configures the SRRCTL register of
2156          * the RX queue in such a case.
2157          * All configurable sizes are:
2158          * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2159          *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
2160          *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
2161          *  2048: rctl |= E1000_RCTL_SZ_2048;
2162          *  1024: rctl |= E1000_RCTL_SZ_1024;
2163          *   512: rctl |= E1000_RCTL_SZ_512;
2164          *   256: rctl |= E1000_RCTL_SZ_256;
2165          */
2166         if (rctl_bsize > 0) {
2167                 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2168                         rctl |= E1000_RCTL_SZ_512;
2169                 else /* 256 <= buf_size < 512 - use 256 */
2170                         rctl |= E1000_RCTL_SZ_256;
2171         }
2172
2173         /*
2174          * Configure RSS if device configured with multiple RX queues.
2175          */
2176         igb_dev_mq_rx_configure(dev);
2177
2178         /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2179         rctl |= E1000_READ_REG(hw, E1000_RCTL);
2180
2181         /*
2182          * Setup the Checksum Register.
2183          * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2184          */
2185         rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2186         rxcsum |= E1000_RXCSUM_PCSD;
2187
2188         /* Enable both L3/L4 rx checksum offload */
2189         if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2190                 rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
2191         else
2192                 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2193         E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2194
2195         /* Setup the Receive Control Register. */
2196         if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2197                 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2198
2199                 /* set STRCRC bit in all queues */
2200                 if (hw->mac.type == e1000_i350 ||
2201                     hw->mac.type == e1000_i210 ||
2202                     hw->mac.type == e1000_i211 ||
2203                     hw->mac.type == e1000_i354) {
2204                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2205                                 rxq = dev->data->rx_queues[i];
2206                                 uint32_t dvmolr = E1000_READ_REG(hw,
2207                                         E1000_DVMOLR(rxq->reg_idx));
2208                                 dvmolr |= E1000_DVMOLR_STRCRC;
2209                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2210                         }
2211                 }
2212         } else {
2213                 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2214
2215                 /* clear STRCRC bit in all queues */
2216                 if (hw->mac.type == e1000_i350 ||
2217                     hw->mac.type == e1000_i210 ||
2218                     hw->mac.type == e1000_i211 ||
2219                     hw->mac.type == e1000_i354) {
2220                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2221                                 rxq = dev->data->rx_queues[i];
2222                                 uint32_t dvmolr = E1000_READ_REG(hw,
2223                                         E1000_DVMOLR(rxq->reg_idx));
2224                                 dvmolr &= ~E1000_DVMOLR_STRCRC;
2225                                 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2226                         }
2227                 }
2228         }
2229
2230         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2231         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2232                 E1000_RCTL_RDMTS_HALF |
2233                 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2234
2235         /* Make sure VLAN Filters are off. */
2236         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2237                 rctl &= ~E1000_RCTL_VFE;
2238         /* Don't store bad packets. */
2239         rctl &= ~E1000_RCTL_SBP;
2240
2241         /* Enable Receives. */
2242         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2243
2244         /*
2245          * Setup the HW Rx Head and Tail Descriptor Pointers.
2246          * This needs to be done after enable.
2247          */
2248         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2249                 rxq = dev->data->rx_queues[i];
2250                 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2251                 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2252         }
2253
2254         return 0;
2255 }
2256
2257 /*********************************************************************
2258  *
2259  *  Enable transmit unit.
2260  *
2261  **********************************************************************/
2262 void
2263 eth_igb_tx_init(struct rte_eth_dev *dev)
2264 {
2265         struct e1000_hw     *hw;
2266         struct igb_tx_queue *txq;
2267         uint32_t tctl;
2268         uint32_t txdctl;
2269         uint16_t i;
2270
2271         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2272
2273         /* Setup the Base and Length of the Tx Descriptor Rings. */
2274         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2275                 uint64_t bus_addr;
2276                 txq = dev->data->tx_queues[i];
2277                 bus_addr = txq->tx_ring_phys_addr;
2278
2279                 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2280                                 txq->nb_tx_desc *
2281                                 sizeof(union e1000_adv_tx_desc));
2282                 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2283                                 (uint32_t)(bus_addr >> 32));
2284                 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2285
2286                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2287                 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2288                 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2289
2290                 /* Setup Transmit threshold registers. */
2291                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2292                 txdctl |= txq->pthresh & 0x1F;
2293                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2294                 txdctl |= ((txq->wthresh & 0x1F) << 16);
2295                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2296                 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2297         }
2298
2299         /* Program the Transmit Control Register. */
2300         tctl = E1000_READ_REG(hw, E1000_TCTL);
2301         tctl &= ~E1000_TCTL_CT;
2302         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2303                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2304
2305         e1000_config_collision_dist(hw);
2306
2307         /* This write will effectively turn on the transmit unit. */
2308         E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2309 }
2310
2311 /*********************************************************************
2312  *
2313  *  Enable VF receive unit.
2314  *
2315  **********************************************************************/
2316 int
2317 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2318 {
2319         struct e1000_hw     *hw;
2320         struct igb_rx_queue *rxq;
2321         uint32_t srrctl;
2322         uint16_t buf_size;
2323         uint16_t rctl_bsize;
2324         uint16_t i;
2325         int ret;
2326
2327         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2328
2329         /* setup MTU */
2330         e1000_rlpml_set_vf(hw,
2331                 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2332                 VLAN_TAG_SIZE));
2333
2334         /* Configure and enable each RX queue. */
2335         rctl_bsize = 0;
2336         dev->rx_pkt_burst = eth_igb_recv_pkts;
2337         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2338                 uint64_t bus_addr;
2339                 uint32_t rxdctl;
2340
2341                 rxq = dev->data->rx_queues[i];
2342
2343                 /* Allocate buffers for descriptor rings and set up queue */
2344                 ret = igb_alloc_rx_queue_mbufs(rxq);
2345                 if (ret)
2346                         return ret;
2347
2348                 bus_addr = rxq->rx_ring_phys_addr;
2349                 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2350                                 rxq->nb_rx_desc *
2351                                 sizeof(union e1000_adv_rx_desc));
2352                 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2353                                 (uint32_t)(bus_addr >> 32));
2354                 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2355
2356                 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2357
2358                 /*
2359                  * Configure RX buffer size.
2360                  */
2361                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2362                         RTE_PKTMBUF_HEADROOM);
2363                 if (buf_size >= 1024) {
2364                         /*
2365                          * Configure the BSIZEPACKET field of the SRRCTL
2366                          * register of the queue.
2367                          * Value is in 1 KB resolution, from 1 KB to 127 KB.
2368                          * If this field is equal to 0b, then RCTL.BSIZE
2369                          * determines the RX packet buffer size.
2370                          */
2371                         srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2372                                    E1000_SRRCTL_BSIZEPKT_MASK);
2373                         buf_size = (uint16_t) ((srrctl &
2374                                                 E1000_SRRCTL_BSIZEPKT_MASK) <<
2375                                                E1000_SRRCTL_BSIZEPKT_SHIFT);
2376
2377                         /* It adds dual VLAN length for supporting dual VLAN */
2378                         if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2379                                                 2 * VLAN_TAG_SIZE) > buf_size){
2380                                 if (!dev->data->scattered_rx)
2381                                         PMD_INIT_LOG(DEBUG,
2382                                                      "forcing scatter mode");
2383                                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2384                                 dev->data->scattered_rx = 1;
2385                         }
2386                 } else {
2387                         /*
2388                          * Use BSIZE field of the device RCTL register.
2389                          */
2390                         if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2391                                 rctl_bsize = buf_size;
2392                         if (!dev->data->scattered_rx)
2393                                 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2394                         dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2395                         dev->data->scattered_rx = 1;
2396                 }
2397
2398                 /* Set if packets are dropped when no descriptors available */
2399                 if (rxq->drop_en)
2400                         srrctl |= E1000_SRRCTL_DROP_EN;
2401
2402                 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2403
2404                 /* Enable this RX queue. */
2405                 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2406                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2407                 rxdctl &= 0xFFF00000;
2408                 rxdctl |= (rxq->pthresh & 0x1F);
2409                 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2410                 if (hw->mac.type == e1000_vfadapt) {
2411                         /*
2412                          * Workaround of 82576 VF Erratum
2413                          * force set WTHRESH to 1
2414                          * to avoid Write-Back not triggered sometimes
2415                          */
2416                         rxdctl |= 0x10000;
2417                         PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2418                 }
2419                 else
2420                         rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2421                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2422         }
2423
2424         if (dev->data->dev_conf.rxmode.enable_scatter) {
2425                 if (!dev->data->scattered_rx)
2426                         PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2427                 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2428                 dev->data->scattered_rx = 1;
2429         }
2430
2431         /*
2432          * Setup the HW Rx Head and Tail Descriptor Pointers.
2433          * This needs to be done after enable.
2434          */
2435         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2436                 rxq = dev->data->rx_queues[i];
2437                 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2438                 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2439         }
2440
2441         return 0;
2442 }
2443
2444 /*********************************************************************
2445  *
2446  *  Enable VF transmit unit.
2447  *
2448  **********************************************************************/
2449 void
2450 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2451 {
2452         struct e1000_hw     *hw;
2453         struct igb_tx_queue *txq;
2454         uint32_t txdctl;
2455         uint16_t i;
2456
2457         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2458
2459         /* Setup the Base and Length of the Tx Descriptor Rings. */
2460         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2461                 uint64_t bus_addr;
2462
2463                 txq = dev->data->tx_queues[i];
2464                 bus_addr = txq->tx_ring_phys_addr;
2465                 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2466                                 txq->nb_tx_desc *
2467                                 sizeof(union e1000_adv_tx_desc));
2468                 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2469                                 (uint32_t)(bus_addr >> 32));
2470                 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2471
2472                 /* Setup the HW Tx Head and Tail descriptor pointers. */
2473                 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2474                 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2475
2476                 /* Setup Transmit threshold registers. */
2477                 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2478                 txdctl |= txq->pthresh & 0x1F;
2479                 txdctl |= ((txq->hthresh & 0x1F) << 8);
2480                 if (hw->mac.type == e1000_82576) {
2481                         /*
2482                          * Workaround of 82576 VF Erratum
2483                          * force set WTHRESH to 1
2484                          * to avoid Write-Back not triggered sometimes
2485                          */
2486                         txdctl |= 0x10000;
2487                         PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2488                 }
2489                 else
2490                         txdctl |= ((txq->wthresh & 0x1F) << 16);
2491                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2492                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2493         }
2494
2495 }
2496
2497 void
2498 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2499         struct rte_eth_rxq_info *qinfo)
2500 {
2501         struct igb_rx_queue *rxq;
2502
2503         rxq = dev->data->rx_queues[queue_id];
2504
2505         qinfo->mp = rxq->mb_pool;
2506         qinfo->scattered_rx = dev->data->scattered_rx;
2507         qinfo->nb_desc = rxq->nb_rx_desc;
2508
2509         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2510         qinfo->conf.rx_drop_en = rxq->drop_en;
2511 }
2512
2513 void
2514 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2515         struct rte_eth_txq_info *qinfo)
2516 {
2517         struct igb_tx_queue *txq;
2518
2519         txq = dev->data->tx_queues[queue_id];
2520
2521         qinfo->nb_desc = txq->nb_tx_desc;
2522
2523         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2524         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2525         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2526 }