Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / net / i40e / i40e_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <errno.h>
38 #include <stdint.h>
39 #include <stdarg.h>
40 #include <unistd.h>
41 #include <inttypes.h>
42 #include <sys/queue.h>
43
44 #include <rte_string_fns.h>
45 #include <rte_memzone.h>
46 #include <rte_mbuf.h>
47 #include <rte_malloc.h>
48 #include <rte_ether.h>
49 #include <rte_ethdev.h>
50 #include <rte_tcp.h>
51 #include <rte_sctp.h>
52 #include <rte_udp.h>
53
54 #include "i40e_logs.h"
55 #include "base/i40e_prototype.h"
56 #include "base/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
59
60 #define DEFAULT_TX_RS_THRESH   32
61 #define DEFAULT_TX_FREE_THRESH 32
62 #define I40E_MAX_PKT_TYPE      256
63
64 #define I40E_TX_MAX_BURST  32
65
66 #define I40E_DMA_MEM_ALIGN 4096
67
68 /* Base address of the HW descriptor ring should be 128B aligned. */
69 #define I40E_RING_BASE_ALIGN    128
70
71 #define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
72                                         ETH_TXQ_FLAGS_NOOFFLOADS)
73
74 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
75
76 #define I40E_TX_CKSUM_OFFLOAD_MASK (             \
77                 PKT_TX_IP_CKSUM |                \
78                 PKT_TX_L4_MASK |                 \
79                 PKT_TX_TCP_SEG |                 \
80                 PKT_TX_OUTER_IP_CKSUM)
81
82 static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
83                                       struct rte_mbuf **tx_pkts,
84                                       uint16_t nb_pkts);
85
86 static inline void
87 i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
88 {
89         if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
90                 (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
91                 mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
92                 mb->vlan_tci =
93                         rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
94                 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
95                            rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));
96         } else {
97                 mb->vlan_tci = 0;
98         }
99 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
100         if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
101                 (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
102                 mb->ol_flags |= PKT_RX_QINQ_STRIPPED;
103                 mb->vlan_tci_outer = mb->vlan_tci;
104                 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
105                 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
106                            rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),
107                            rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));
108         } else {
109                 mb->vlan_tci_outer = 0;
110         }
111 #endif
112         PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
113                    mb->vlan_tci, mb->vlan_tci_outer);
114 }
115
116 /* Translate the rx descriptor status to pkt flags */
117 static inline uint64_t
118 i40e_rxd_status_to_pkt_flags(uint64_t qword)
119 {
120         uint64_t flags;
121
122         /* Check if RSS_HASH */
123         flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
124                                         I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
125                         I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
126
127         /* Check if FDIR Match */
128         flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ?
129                                                         PKT_RX_FDIR : 0);
130
131         return flags;
132 }
133
134 static inline uint64_t
135 i40e_rxd_error_to_pkt_flags(uint64_t qword)
136 {
137         uint64_t flags = 0;
138         uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
139
140 #define I40E_RX_ERR_BITS 0x3f
141         if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
142                 return flags;
143         if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
144                 flags |= PKT_RX_IP_CKSUM_BAD;
145         if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
146                 flags |= PKT_RX_L4_CKSUM_BAD;
147         if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
148                 flags |= PKT_RX_EIP_CKSUM_BAD;
149
150         return flags;
151 }
152
153 /* Function to check and set the ieee1588 timesync index and get the
154  * appropriate flags.
155  */
156 #ifdef RTE_LIBRTE_IEEE1588
157 static inline uint64_t
158 i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
159 {
160         uint64_t pkt_flags = 0;
161         uint16_t tsyn = (qword & (I40E_RXD_QW1_STATUS_TSYNVALID_MASK
162                                   | I40E_RXD_QW1_STATUS_TSYNINDX_MASK))
163                                     >> I40E_RX_DESC_STATUS_TSYNINDX_SHIFT;
164
165         if ((mb->packet_type & RTE_PTYPE_L2_MASK)
166                         == RTE_PTYPE_L2_ETHER_TIMESYNC)
167                 pkt_flags = PKT_RX_IEEE1588_PTP;
168         if (tsyn & 0x04) {
169                 pkt_flags |= PKT_RX_IEEE1588_TMST;
170                 mb->timesync = tsyn & 0x03;
171         }
172
173         return pkt_flags;
174 }
175 #endif
176
177 /* For each value it means, datasheet of hardware can tell more details
178  *
179  * @note: fix i40e_dev_supported_ptypes_get() if any change here.
180  */
181 static inline uint32_t
182 i40e_rxd_pkt_type_mapping(uint8_t ptype)
183 {
184         static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
185                 /* L2 types */
186                 /* [0] reserved */
187                 [1] = RTE_PTYPE_L2_ETHER,
188                 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
189                 /* [3] - [5] reserved */
190                 [6] = RTE_PTYPE_L2_ETHER_LLDP,
191                 /* [7] - [10] reserved */
192                 [11] = RTE_PTYPE_L2_ETHER_ARP,
193                 /* [12] - [21] reserved */
194
195                 /* Non tunneled IPv4 */
196                 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
197                         RTE_PTYPE_L4_FRAG,
198                 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199                         RTE_PTYPE_L4_NONFRAG,
200                 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
201                         RTE_PTYPE_L4_UDP,
202                 /* [25] reserved */
203                 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
204                         RTE_PTYPE_L4_TCP,
205                 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
206                         RTE_PTYPE_L4_SCTP,
207                 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
208                         RTE_PTYPE_L4_ICMP,
209
210                 /* IPv4 --> IPv4 */
211                 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
212                         RTE_PTYPE_TUNNEL_IP |
213                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
214                         RTE_PTYPE_INNER_L4_FRAG,
215                 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
216                         RTE_PTYPE_TUNNEL_IP |
217                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
218                         RTE_PTYPE_INNER_L4_NONFRAG,
219                 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
220                         RTE_PTYPE_TUNNEL_IP |
221                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
222                         RTE_PTYPE_INNER_L4_UDP,
223                 /* [32] reserved */
224                 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
225                         RTE_PTYPE_TUNNEL_IP |
226                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
227                         RTE_PTYPE_INNER_L4_TCP,
228                 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229                         RTE_PTYPE_TUNNEL_IP |
230                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
231                         RTE_PTYPE_INNER_L4_SCTP,
232                 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
233                         RTE_PTYPE_TUNNEL_IP |
234                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
235                         RTE_PTYPE_INNER_L4_ICMP,
236
237                 /* IPv4 --> IPv6 */
238                 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
239                         RTE_PTYPE_TUNNEL_IP |
240                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
241                         RTE_PTYPE_INNER_L4_FRAG,
242                 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
243                         RTE_PTYPE_TUNNEL_IP |
244                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
245                         RTE_PTYPE_INNER_L4_NONFRAG,
246                 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
247                         RTE_PTYPE_TUNNEL_IP |
248                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
249                         RTE_PTYPE_INNER_L4_UDP,
250                 /* [39] reserved */
251                 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
252                         RTE_PTYPE_TUNNEL_IP |
253                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
254                         RTE_PTYPE_INNER_L4_TCP,
255                 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
256                         RTE_PTYPE_TUNNEL_IP |
257                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
258                         RTE_PTYPE_INNER_L4_SCTP,
259                 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
260                         RTE_PTYPE_TUNNEL_IP |
261                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
262                         RTE_PTYPE_INNER_L4_ICMP,
263
264                 /* IPv4 --> GRE/Teredo/VXLAN */
265                 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
266                         RTE_PTYPE_TUNNEL_GRENAT,
267
268                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
269                 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
270                         RTE_PTYPE_TUNNEL_GRENAT |
271                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
272                         RTE_PTYPE_INNER_L4_FRAG,
273                 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
274                         RTE_PTYPE_TUNNEL_GRENAT |
275                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
276                         RTE_PTYPE_INNER_L4_NONFRAG,
277                 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
278                         RTE_PTYPE_TUNNEL_GRENAT |
279                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
280                         RTE_PTYPE_INNER_L4_UDP,
281                 /* [47] reserved */
282                 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
283                         RTE_PTYPE_TUNNEL_GRENAT |
284                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
285                         RTE_PTYPE_INNER_L4_TCP,
286                 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
287                         RTE_PTYPE_TUNNEL_GRENAT |
288                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
289                         RTE_PTYPE_INNER_L4_SCTP,
290                 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
291                         RTE_PTYPE_TUNNEL_GRENAT |
292                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
293                         RTE_PTYPE_INNER_L4_ICMP,
294
295                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
296                 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
297                         RTE_PTYPE_TUNNEL_GRENAT |
298                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
299                         RTE_PTYPE_INNER_L4_FRAG,
300                 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
301                         RTE_PTYPE_TUNNEL_GRENAT |
302                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
303                         RTE_PTYPE_INNER_L4_NONFRAG,
304                 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
305                         RTE_PTYPE_TUNNEL_GRENAT |
306                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
307                         RTE_PTYPE_INNER_L4_UDP,
308                 /* [54] reserved */
309                 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
310                         RTE_PTYPE_TUNNEL_GRENAT |
311                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
312                         RTE_PTYPE_INNER_L4_TCP,
313                 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
314                         RTE_PTYPE_TUNNEL_GRENAT |
315                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
316                         RTE_PTYPE_INNER_L4_SCTP,
317                 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
318                         RTE_PTYPE_TUNNEL_GRENAT |
319                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
320                         RTE_PTYPE_INNER_L4_ICMP,
321
322                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
323                 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
324                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
325
326                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
327                 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
328                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
329                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
330                         RTE_PTYPE_INNER_L4_FRAG,
331                 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
332                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
333                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
334                         RTE_PTYPE_INNER_L4_NONFRAG,
335                 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
336                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
337                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
338                         RTE_PTYPE_INNER_L4_UDP,
339                 /* [62] reserved */
340                 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
341                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
342                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
343                         RTE_PTYPE_INNER_L4_TCP,
344                 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
345                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
346                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
347                         RTE_PTYPE_INNER_L4_SCTP,
348                 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
349                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
350                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
351                         RTE_PTYPE_INNER_L4_ICMP,
352
353                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
354                 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
355                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
356                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
357                         RTE_PTYPE_INNER_L4_FRAG,
358                 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
359                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
360                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
361                         RTE_PTYPE_INNER_L4_NONFRAG,
362                 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
363                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
364                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
365                         RTE_PTYPE_INNER_L4_UDP,
366                 /* [69] reserved */
367                 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
368                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
369                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
370                         RTE_PTYPE_INNER_L4_TCP,
371                 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
372                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
373                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
374                         RTE_PTYPE_INNER_L4_SCTP,
375                 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
376                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
377                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
378                         RTE_PTYPE_INNER_L4_ICMP,
379
380                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
381                 [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
382                         RTE_PTYPE_TUNNEL_GRENAT |
383                         RTE_PTYPE_INNER_L2_ETHER_VLAN,
384
385                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
386                 [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
387                         RTE_PTYPE_TUNNEL_GRENAT |
388                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
389                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
390                         RTE_PTYPE_INNER_L4_FRAG,
391                 [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
392                         RTE_PTYPE_TUNNEL_GRENAT |
393                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
394                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
395                         RTE_PTYPE_INNER_L4_NONFRAG,
396                 [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
397                         RTE_PTYPE_TUNNEL_GRENAT |
398                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
399                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
400                         RTE_PTYPE_INNER_L4_UDP,
401                 /* [77] reserved */
402                 [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
403                         RTE_PTYPE_TUNNEL_GRENAT |
404                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
405                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
406                         RTE_PTYPE_INNER_L4_TCP,
407                 [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
408                         RTE_PTYPE_TUNNEL_GRENAT |
409                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
410                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
411                         RTE_PTYPE_INNER_L4_SCTP,
412                 [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
413                         RTE_PTYPE_TUNNEL_GRENAT |
414                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
415                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
416                         RTE_PTYPE_INNER_L4_ICMP,
417
418                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
419                 [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
420                         RTE_PTYPE_TUNNEL_GRENAT |
421                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
422                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
423                         RTE_PTYPE_INNER_L4_FRAG,
424                 [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
425                         RTE_PTYPE_TUNNEL_GRENAT |
426                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
427                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
428                         RTE_PTYPE_INNER_L4_NONFRAG,
429                 [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
430                         RTE_PTYPE_TUNNEL_GRENAT |
431                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
432                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
433                         RTE_PTYPE_INNER_L4_UDP,
434                 /* [84] reserved */
435                 [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
436                         RTE_PTYPE_TUNNEL_GRENAT |
437                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
438                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
439                         RTE_PTYPE_INNER_L4_TCP,
440                 [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
441                         RTE_PTYPE_TUNNEL_GRENAT |
442                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
443                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
444                         RTE_PTYPE_INNER_L4_SCTP,
445                 [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
446                         RTE_PTYPE_TUNNEL_GRENAT |
447                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
448                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
449                         RTE_PTYPE_INNER_L4_ICMP,
450
451                 /* Non tunneled IPv6 */
452                 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
453                         RTE_PTYPE_L4_FRAG,
454                 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
455                         RTE_PTYPE_L4_NONFRAG,
456                 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
457                         RTE_PTYPE_L4_UDP,
458                 /* [91] reserved */
459                 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
460                         RTE_PTYPE_L4_TCP,
461                 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
462                         RTE_PTYPE_L4_SCTP,
463                 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
464                         RTE_PTYPE_L4_ICMP,
465
466                 /* IPv6 --> IPv4 */
467                 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
468                         RTE_PTYPE_TUNNEL_IP |
469                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
470                         RTE_PTYPE_INNER_L4_FRAG,
471                 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
472                         RTE_PTYPE_TUNNEL_IP |
473                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
474                         RTE_PTYPE_INNER_L4_NONFRAG,
475                 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
476                         RTE_PTYPE_TUNNEL_IP |
477                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
478                         RTE_PTYPE_INNER_L4_UDP,
479                 /* [98] reserved */
480                 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
481                         RTE_PTYPE_TUNNEL_IP |
482                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
483                         RTE_PTYPE_INNER_L4_TCP,
484                 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
485                         RTE_PTYPE_TUNNEL_IP |
486                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
487                         RTE_PTYPE_INNER_L4_SCTP,
488                 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
489                         RTE_PTYPE_TUNNEL_IP |
490                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
491                         RTE_PTYPE_INNER_L4_ICMP,
492
493                 /* IPv6 --> IPv6 */
494                 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
495                         RTE_PTYPE_TUNNEL_IP |
496                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
497                         RTE_PTYPE_INNER_L4_FRAG,
498                 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
499                         RTE_PTYPE_TUNNEL_IP |
500                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
501                         RTE_PTYPE_INNER_L4_NONFRAG,
502                 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
503                         RTE_PTYPE_TUNNEL_IP |
504                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
505                         RTE_PTYPE_INNER_L4_UDP,
506                 /* [105] reserved */
507                 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
508                         RTE_PTYPE_TUNNEL_IP |
509                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
510                         RTE_PTYPE_INNER_L4_TCP,
511                 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
512                         RTE_PTYPE_TUNNEL_IP |
513                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
514                         RTE_PTYPE_INNER_L4_SCTP,
515                 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
516                         RTE_PTYPE_TUNNEL_IP |
517                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
518                         RTE_PTYPE_INNER_L4_ICMP,
519
520                 /* IPv6 --> GRE/Teredo/VXLAN */
521                 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
522                         RTE_PTYPE_TUNNEL_GRENAT,
523
524                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
525                 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
526                         RTE_PTYPE_TUNNEL_GRENAT |
527                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
528                         RTE_PTYPE_INNER_L4_FRAG,
529                 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
530                         RTE_PTYPE_TUNNEL_GRENAT |
531                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
532                         RTE_PTYPE_INNER_L4_NONFRAG,
533                 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
534                         RTE_PTYPE_TUNNEL_GRENAT |
535                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
536                         RTE_PTYPE_INNER_L4_UDP,
537                 /* [113] reserved */
538                 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
539                         RTE_PTYPE_TUNNEL_GRENAT |
540                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
541                         RTE_PTYPE_INNER_L4_TCP,
542                 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
543                         RTE_PTYPE_TUNNEL_GRENAT |
544                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
545                         RTE_PTYPE_INNER_L4_SCTP,
546                 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
547                         RTE_PTYPE_TUNNEL_GRENAT |
548                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
549                         RTE_PTYPE_INNER_L4_ICMP,
550
551                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
552                 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
553                         RTE_PTYPE_TUNNEL_GRENAT |
554                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
555                         RTE_PTYPE_INNER_L4_FRAG,
556                 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
557                         RTE_PTYPE_TUNNEL_GRENAT |
558                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
559                         RTE_PTYPE_INNER_L4_NONFRAG,
560                 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
561                         RTE_PTYPE_TUNNEL_GRENAT |
562                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
563                         RTE_PTYPE_INNER_L4_UDP,
564                 /* [120] reserved */
565                 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
566                         RTE_PTYPE_TUNNEL_GRENAT |
567                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
568                         RTE_PTYPE_INNER_L4_TCP,
569                 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
570                         RTE_PTYPE_TUNNEL_GRENAT |
571                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
572                         RTE_PTYPE_INNER_L4_SCTP,
573                 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
574                         RTE_PTYPE_TUNNEL_GRENAT |
575                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
576                         RTE_PTYPE_INNER_L4_ICMP,
577
578                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
579                 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
580                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
581
582                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
583                 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
584                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
585                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
586                         RTE_PTYPE_INNER_L4_FRAG,
587                 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
588                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
589                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
590                         RTE_PTYPE_INNER_L4_NONFRAG,
591                 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
592                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
593                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
594                         RTE_PTYPE_INNER_L4_UDP,
595                 /* [128] reserved */
596                 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
597                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
598                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
599                         RTE_PTYPE_INNER_L4_TCP,
600                 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
601                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
602                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
603                         RTE_PTYPE_INNER_L4_SCTP,
604                 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
605                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
606                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
607                         RTE_PTYPE_INNER_L4_ICMP,
608
609                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
610                 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
611                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
612                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
613                         RTE_PTYPE_INNER_L4_FRAG,
614                 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
615                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
616                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
617                         RTE_PTYPE_INNER_L4_NONFRAG,
618                 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
619                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
620                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
621                         RTE_PTYPE_INNER_L4_UDP,
622                 /* [135] reserved */
623                 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
624                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
625                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
626                         RTE_PTYPE_INNER_L4_TCP,
627                 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
628                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
629                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
630                         RTE_PTYPE_INNER_L4_SCTP,
631                 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
632                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
633                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
634                         RTE_PTYPE_INNER_L4_ICMP,
635
636                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
637                 [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
638                         RTE_PTYPE_TUNNEL_GRENAT |
639                         RTE_PTYPE_INNER_L2_ETHER_VLAN,
640
641                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
642                 [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
643                         RTE_PTYPE_TUNNEL_GRENAT |
644                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
645                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
646                         RTE_PTYPE_INNER_L4_FRAG,
647                 [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
648                         RTE_PTYPE_TUNNEL_GRENAT |
649                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
650                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
651                         RTE_PTYPE_INNER_L4_NONFRAG,
652                 [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
653                         RTE_PTYPE_TUNNEL_GRENAT |
654                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
655                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
656                         RTE_PTYPE_INNER_L4_UDP,
657                 /* [143] reserved */
658                 [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
659                         RTE_PTYPE_TUNNEL_GRENAT |
660                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
661                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
662                         RTE_PTYPE_INNER_L4_TCP,
663                 [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
664                         RTE_PTYPE_TUNNEL_GRENAT |
665                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
666                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
667                         RTE_PTYPE_INNER_L4_SCTP,
668                 [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
669                         RTE_PTYPE_TUNNEL_GRENAT |
670                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
671                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
672                         RTE_PTYPE_INNER_L4_ICMP,
673
674                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
675                 [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
676                         RTE_PTYPE_TUNNEL_GRENAT |
677                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
678                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
679                         RTE_PTYPE_INNER_L4_FRAG,
680                 [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
681                         RTE_PTYPE_TUNNEL_GRENAT |
682                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
683                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
684                         RTE_PTYPE_INNER_L4_NONFRAG,
685                 [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
686                         RTE_PTYPE_TUNNEL_GRENAT |
687                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
688                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
689                         RTE_PTYPE_INNER_L4_UDP,
690                 /* [150] reserved */
691                 [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
692                         RTE_PTYPE_TUNNEL_GRENAT |
693                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
694                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
695                         RTE_PTYPE_INNER_L4_TCP,
696                 [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
697                         RTE_PTYPE_TUNNEL_GRENAT |
698                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
699                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
700                         RTE_PTYPE_INNER_L4_SCTP,
701                 [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
702                         RTE_PTYPE_TUNNEL_GRENAT |
703                         RTE_PTYPE_INNER_L2_ETHER_VLAN |
704                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
705                         RTE_PTYPE_INNER_L4_ICMP,
706
707                 /* L2 NSH packet type */
708                 [154] = RTE_PTYPE_L2_ETHER_NSH,
709                 [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
710                         RTE_PTYPE_L4_FRAG,
711                 [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
712                         RTE_PTYPE_L4_NONFRAG,
713                 [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
714                         RTE_PTYPE_L4_UDP,
715                 [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
716                         RTE_PTYPE_L4_TCP,
717                 [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
718                         RTE_PTYPE_L4_SCTP,
719                 [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
720                         RTE_PTYPE_L4_ICMP,
721                 [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
722                         RTE_PTYPE_L4_FRAG,
723                 [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
724                         RTE_PTYPE_L4_NONFRAG,
725                 [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
726                         RTE_PTYPE_L4_UDP,
727                 [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
728                         RTE_PTYPE_L4_TCP,
729                 [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
730                         RTE_PTYPE_L4_SCTP,
731                 [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
732                         RTE_PTYPE_L4_ICMP,
733
734                 /* All others reserved */
735         };
736
737         return type_table[ptype];
738 }
739
740 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK   0x03
741 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID  0x01
742 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX   0x02
743 #define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK   0x03
744 #define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX   0x01
745
746 static inline uint64_t
747 i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
748 {
749         uint64_t flags = 0;
750 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
751         uint16_t flexbh, flexbl;
752
753         flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
754                 I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
755                 I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK;
756         flexbl = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
757                 I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT) &
758                 I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK;
759
760
761         if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
762                 mb->hash.fdir.hi =
763                         rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
764                 flags |= PKT_RX_FDIR_ID;
765         } else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) {
766                 mb->hash.fdir.hi =
767                         rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi);
768                 flags |= PKT_RX_FDIR_FLX;
769         }
770         if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) {
771                 mb->hash.fdir.lo =
772                         rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
773                 flags |= PKT_RX_FDIR_FLX;
774         }
775 #else
776         mb->hash.fdir.hi =
777                 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
778         flags |= PKT_RX_FDIR_ID;
779 #endif
780         return flags;
781 }
782 static inline void
783 i40e_txd_enable_checksum(uint64_t ol_flags,
784                         uint32_t *td_cmd,
785                         uint32_t *td_offset,
786                         union i40e_tx_offload tx_offload,
787                         uint32_t *cd_tunneling)
788 {
789         /* UDP tunneling packet TX checksum offload */
790         if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
791
792                 *td_offset |= (tx_offload.outer_l2_len >> 1)
793                                 << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
794
795                 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
796                         *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
797                 else if (ol_flags & PKT_TX_OUTER_IPV4)
798                         *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
799                 else if (ol_flags & PKT_TX_OUTER_IPV6)
800                         *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
801
802                 /* Now set the ctx descriptor fields */
803                 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
804                                 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
805                                 (tx_offload.l2_len >> 1) <<
806                                 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
807
808         } else
809                 *td_offset |= (tx_offload.l2_len >> 1)
810                         << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
811
812         /* Enable L3 checksum offloads */
813         if (ol_flags & PKT_TX_IP_CKSUM) {
814                 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
815                 *td_offset |= (tx_offload.l3_len >> 2)
816                                 << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
817         } else if (ol_flags & PKT_TX_IPV4) {
818                 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
819                 *td_offset |= (tx_offload.l3_len >> 2)
820                                 << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
821         } else if (ol_flags & PKT_TX_IPV6) {
822                 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
823                 *td_offset |= (tx_offload.l3_len >> 2)
824                                 << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
825         }
826
827         if (ol_flags & PKT_TX_TCP_SEG) {
828                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
829                 *td_offset |= (tx_offload.l4_len >> 2)
830                         << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
831                 return;
832         }
833
834         /* Enable L4 checksum offloads */
835         switch (ol_flags & PKT_TX_L4_MASK) {
836         case PKT_TX_TCP_CKSUM:
837                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
838                 *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
839                                 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
840                 break;
841         case PKT_TX_SCTP_CKSUM:
842                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
843                 *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
844                                 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
845                 break;
846         case PKT_TX_UDP_CKSUM:
847                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
848                 *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
849                                 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
850                 break;
851         default:
852                 break;
853         }
854 }
855
856 /* Construct the tx flags */
857 static inline uint64_t
858 i40e_build_ctob(uint32_t td_cmd,
859                 uint32_t td_offset,
860                 unsigned int size,
861                 uint32_t td_tag)
862 {
863         return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
864                         ((uint64_t)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
865                         ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
866                         ((uint64_t)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
867                         ((uint64_t)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
868 }
869
870 static inline int
871 i40e_xmit_cleanup(struct i40e_tx_queue *txq)
872 {
873         struct i40e_tx_entry *sw_ring = txq->sw_ring;
874         volatile struct i40e_tx_desc *txd = txq->tx_ring;
875         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
876         uint16_t nb_tx_desc = txq->nb_tx_desc;
877         uint16_t desc_to_clean_to;
878         uint16_t nb_tx_to_clean;
879
880         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
881         if (desc_to_clean_to >= nb_tx_desc)
882                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
883
884         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
885         if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
886                         rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
887                         rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) {
888                 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
889                         "(port=%d queue=%d)", desc_to_clean_to,
890                                 txq->port_id, txq->queue_id);
891                 return -1;
892         }
893
894         if (last_desc_cleaned > desc_to_clean_to)
895                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
896                                                         desc_to_clean_to);
897         else
898                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
899                                         last_desc_cleaned);
900
901         txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
902
903         txq->last_desc_cleaned = desc_to_clean_to;
904         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
905
906         return 0;
907 }
908
909 static inline int
910 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
911 check_rx_burst_bulk_alloc_preconditions(struct i40e_rx_queue *rxq)
912 #else
913 check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
914 #endif
915 {
916         int ret = 0;
917
918 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
919         if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST)) {
920                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
921                              "rxq->rx_free_thresh=%d, "
922                              "RTE_PMD_I40E_RX_MAX_BURST=%d",
923                              rxq->rx_free_thresh, RTE_PMD_I40E_RX_MAX_BURST);
924                 ret = -EINVAL;
925         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
926                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
927                              "rxq->rx_free_thresh=%d, "
928                              "rxq->nb_rx_desc=%d",
929                              rxq->rx_free_thresh, rxq->nb_rx_desc);
930                 ret = -EINVAL;
931         } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
932                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
933                              "rxq->nb_rx_desc=%d, "
934                              "rxq->rx_free_thresh=%d",
935                              rxq->nb_rx_desc, rxq->rx_free_thresh);
936                 ret = -EINVAL;
937         } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
938                                 RTE_PMD_I40E_RX_MAX_BURST))) {
939                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
940                              "rxq->nb_rx_desc=%d, "
941                              "I40E_MAX_RING_DESC=%d, "
942                              "RTE_PMD_I40E_RX_MAX_BURST=%d",
943                              rxq->nb_rx_desc, I40E_MAX_RING_DESC,
944                              RTE_PMD_I40E_RX_MAX_BURST);
945                 ret = -EINVAL;
946         }
947 #else
948         ret = -EINVAL;
949 #endif
950
951         return ret;
952 }
953
954 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
955 #define I40E_LOOK_AHEAD 8
956 #if (I40E_LOOK_AHEAD != 8)
957 #error "PMD I40E: I40E_LOOK_AHEAD must be 8\n"
958 #endif
959 static inline int
960 i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
961 {
962         volatile union i40e_rx_desc *rxdp;
963         struct i40e_rx_entry *rxep;
964         struct rte_mbuf *mb;
965         uint16_t pkt_len;
966         uint64_t qword1;
967         uint32_t rx_status;
968         int32_t s[I40E_LOOK_AHEAD], nb_dd;
969         int32_t i, j, nb_rx = 0;
970         uint64_t pkt_flags;
971
972         rxdp = &rxq->rx_ring[rxq->rx_tail];
973         rxep = &rxq->sw_ring[rxq->rx_tail];
974
975         qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
976         rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
977                                 I40E_RXD_QW1_STATUS_SHIFT;
978
979         /* Make sure there is at least 1 packet to receive */
980         if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
981                 return 0;
982
983         /**
984          * Scan LOOK_AHEAD descriptors at a time to determine which
985          * descriptors reference packets that are ready to be received.
986          */
987         for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; i+=I40E_LOOK_AHEAD,
988                         rxdp += I40E_LOOK_AHEAD, rxep += I40E_LOOK_AHEAD) {
989                 /* Read desc statuses backwards to avoid race condition */
990                 for (j = I40E_LOOK_AHEAD - 1; j >= 0; j--) {
991                         qword1 = rte_le_to_cpu_64(\
992                                 rxdp[j].wb.qword1.status_error_len);
993                         s[j] = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
994                                         I40E_RXD_QW1_STATUS_SHIFT;
995                 }
996
997                 /* Compute how many status bits were set */
998                 for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
999                         nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
1000
1001                 nb_rx += nb_dd;
1002
1003                 /* Translate descriptor info to mbuf parameters */
1004                 for (j = 0; j < nb_dd; j++) {
1005                         mb = rxep[j].mbuf;
1006                         qword1 = rte_le_to_cpu_64(\
1007                                 rxdp[j].wb.qword1.status_error_len);
1008                         pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1009                                 I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1010                         mb->data_len = pkt_len;
1011                         mb->pkt_len = pkt_len;
1012                         mb->ol_flags = 0;
1013                         i40e_rxd_to_vlan_tci(mb, &rxdp[j]);
1014                         pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
1015                         pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
1016                         mb->packet_type =
1017                                 i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
1018                                                 I40E_RXD_QW1_PTYPE_MASK) >>
1019                                                 I40E_RXD_QW1_PTYPE_SHIFT));
1020                         if (pkt_flags & PKT_RX_RSS_HASH)
1021                                 mb->hash.rss = rte_le_to_cpu_32(\
1022                                         rxdp[j].wb.qword0.hi_dword.rss);
1023                         if (pkt_flags & PKT_RX_FDIR)
1024                                 pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
1025
1026 #ifdef RTE_LIBRTE_IEEE1588
1027                         pkt_flags |= i40e_get_iee15888_flags(mb, qword1);
1028 #endif
1029                         mb->ol_flags |= pkt_flags;
1030
1031                 }
1032
1033                 for (j = 0; j < I40E_LOOK_AHEAD; j++)
1034                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1035
1036                 if (nb_dd != I40E_LOOK_AHEAD)
1037                         break;
1038         }
1039
1040         /* Clear software ring entries */
1041         for (i = 0; i < nb_rx; i++)
1042                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1043
1044         return nb_rx;
1045 }
1046
1047 static inline uint16_t
1048 i40e_rx_fill_from_stage(struct i40e_rx_queue *rxq,
1049                         struct rte_mbuf **rx_pkts,
1050                         uint16_t nb_pkts)
1051 {
1052         uint16_t i;
1053         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1054
1055         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1056
1057         for (i = 0; i < nb_pkts; i++)
1058                 rx_pkts[i] = stage[i];
1059
1060         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1061         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1062
1063         return nb_pkts;
1064 }
1065
1066 static inline int
1067 i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
1068 {
1069         volatile union i40e_rx_desc *rxdp;
1070         struct i40e_rx_entry *rxep;
1071         struct rte_mbuf *mb;
1072         uint16_t alloc_idx, i;
1073         uint64_t dma_addr;
1074         int diag;
1075
1076         /* Allocate buffers in bulk */
1077         alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1078                                 (rxq->rx_free_thresh - 1));
1079         rxep = &(rxq->sw_ring[alloc_idx]);
1080         diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1081                                         rxq->rx_free_thresh);
1082         if (unlikely(diag != 0)) {
1083                 PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk");
1084                 return -ENOMEM;
1085         }
1086
1087         rxdp = &rxq->rx_ring[alloc_idx];
1088         for (i = 0; i < rxq->rx_free_thresh; i++) {
1089                 if (likely(i < (rxq->rx_free_thresh - 1)))
1090                         /* Prefetch next mbuf */
1091                         rte_prefetch0(rxep[i + 1].mbuf);
1092
1093                 mb = rxep[i].mbuf;
1094                 rte_mbuf_refcnt_set(mb, 1);
1095                 mb->next = NULL;
1096                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1097                 mb->nb_segs = 1;
1098                 mb->port = rxq->port_id;
1099                 dma_addr = rte_cpu_to_le_64(\
1100                         rte_mbuf_data_dma_addr_default(mb));
1101                 rxdp[i].read.hdr_addr = 0;
1102                 rxdp[i].read.pkt_addr = dma_addr;
1103         }
1104
1105         /* Update rx tail regsiter */
1106         rte_wmb();
1107         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1108
1109         rxq->rx_free_trigger =
1110                 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1111         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1112                 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1113
1114         return 0;
1115 }
1116
1117 static inline uint16_t
1118 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1119 {
1120         struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
1121         uint16_t nb_rx = 0;
1122
1123         if (!nb_pkts)
1124                 return 0;
1125
1126         if (rxq->rx_nb_avail)
1127                 return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1128
1129         nb_rx = (uint16_t)i40e_rx_scan_hw_ring(rxq);
1130         rxq->rx_next_avail = 0;
1131         rxq->rx_nb_avail = nb_rx;
1132         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1133
1134         if (rxq->rx_tail > rxq->rx_free_trigger) {
1135                 if (i40e_rx_alloc_bufs(rxq) != 0) {
1136                         uint16_t i, j;
1137
1138                         PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1139                                    "port_id=%u, queue_id=%u",
1140                                    rxq->port_id, rxq->queue_id);
1141                         rxq->rx_nb_avail = 0;
1142                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1143                         for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1144                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1145
1146                         return 0;
1147                 }
1148         }
1149
1150         if (rxq->rx_tail >= rxq->nb_rx_desc)
1151                 rxq->rx_tail = 0;
1152
1153         if (rxq->rx_nb_avail)
1154                 return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1155
1156         return 0;
1157 }
1158
1159 static uint16_t
1160 i40e_recv_pkts_bulk_alloc(void *rx_queue,
1161                           struct rte_mbuf **rx_pkts,
1162                           uint16_t nb_pkts)
1163 {
1164         uint16_t nb_rx = 0, n, count;
1165
1166         if (unlikely(nb_pkts == 0))
1167                 return 0;
1168
1169         if (likely(nb_pkts <= RTE_PMD_I40E_RX_MAX_BURST))
1170                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1171
1172         while (nb_pkts) {
1173                 n = RTE_MIN(nb_pkts, RTE_PMD_I40E_RX_MAX_BURST);
1174                 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1175                 nb_rx = (uint16_t)(nb_rx + count);
1176                 nb_pkts = (uint16_t)(nb_pkts - count);
1177                 if (count < n)
1178                         break;
1179         }
1180
1181         return nb_rx;
1182 }
1183 #else
1184 static uint16_t
1185 i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
1186                           struct rte_mbuf __rte_unused **rx_pkts,
1187                           uint16_t __rte_unused nb_pkts)
1188 {
1189         return 0;
1190 }
1191 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
1192
1193 uint16_t
1194 i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1195 {
1196         struct i40e_rx_queue *rxq;
1197         volatile union i40e_rx_desc *rx_ring;
1198         volatile union i40e_rx_desc *rxdp;
1199         union i40e_rx_desc rxd;
1200         struct i40e_rx_entry *sw_ring;
1201         struct i40e_rx_entry *rxe;
1202         struct rte_mbuf *rxm;
1203         struct rte_mbuf *nmb;
1204         uint16_t nb_rx;
1205         uint32_t rx_status;
1206         uint64_t qword1;
1207         uint16_t rx_packet_len;
1208         uint16_t rx_id, nb_hold;
1209         uint64_t dma_addr;
1210         uint64_t pkt_flags;
1211
1212         nb_rx = 0;
1213         nb_hold = 0;
1214         rxq = rx_queue;
1215         rx_id = rxq->rx_tail;
1216         rx_ring = rxq->rx_ring;
1217         sw_ring = rxq->sw_ring;
1218
1219         while (nb_rx < nb_pkts) {
1220                 rxdp = &rx_ring[rx_id];
1221                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1222                 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1223                                 >> I40E_RXD_QW1_STATUS_SHIFT;
1224
1225                 /* Check the DD bit first */
1226                 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1227                         break;
1228
1229                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1230                 if (unlikely(!nmb))
1231                         break;
1232                 rxd = *rxdp;
1233
1234                 nb_hold++;
1235                 rxe = &sw_ring[rx_id];
1236                 rx_id++;
1237                 if (unlikely(rx_id == rxq->nb_rx_desc))
1238                         rx_id = 0;
1239
1240                 /* Prefetch next mbuf */
1241                 rte_prefetch0(sw_ring[rx_id].mbuf);
1242
1243                 /**
1244                  * When next RX descriptor is on a cache line boundary,
1245                  * prefetch the next 4 RX descriptors and next 8 pointers
1246                  * to mbufs.
1247                  */
1248                 if ((rx_id & 0x3) == 0) {
1249                         rte_prefetch0(&rx_ring[rx_id]);
1250                         rte_prefetch0(&sw_ring[rx_id]);
1251                 }
1252                 rxm = rxe->mbuf;
1253                 rxe->mbuf = nmb;
1254                 dma_addr =
1255                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1256                 rxdp->read.hdr_addr = 0;
1257                 rxdp->read.pkt_addr = dma_addr;
1258
1259                 rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1260                                 I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1261
1262                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1263                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1264                 rxm->nb_segs = 1;
1265                 rxm->next = NULL;
1266                 rxm->pkt_len = rx_packet_len;
1267                 rxm->data_len = rx_packet_len;
1268                 rxm->port = rxq->port_id;
1269                 rxm->ol_flags = 0;
1270                 i40e_rxd_to_vlan_tci(rxm, &rxd);
1271                 pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
1272                 pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
1273                 rxm->packet_type =
1274                         i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
1275                         I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
1276                 if (pkt_flags & PKT_RX_RSS_HASH)
1277                         rxm->hash.rss =
1278                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1279                 if (pkt_flags & PKT_RX_FDIR)
1280                         pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
1281
1282 #ifdef RTE_LIBRTE_IEEE1588
1283                 pkt_flags |= i40e_get_iee15888_flags(rxm, qword1);
1284 #endif
1285                 rxm->ol_flags |= pkt_flags;
1286
1287                 rx_pkts[nb_rx++] = rxm;
1288         }
1289         rxq->rx_tail = rx_id;
1290
1291         /**
1292          * If the number of free RX descriptors is greater than the RX free
1293          * threshold of the queue, advance the receive tail register of queue.
1294          * Update that register with the value of the last processed RX
1295          * descriptor minus 1.
1296          */
1297         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1298         if (nb_hold > rxq->rx_free_thresh) {
1299                 rx_id = (uint16_t) ((rx_id == 0) ?
1300                         (rxq->nb_rx_desc - 1) : (rx_id - 1));
1301                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1302                 nb_hold = 0;
1303         }
1304         rxq->nb_rx_hold = nb_hold;
1305
1306         return nb_rx;
1307 }
1308
1309 uint16_t
1310 i40e_recv_scattered_pkts(void *rx_queue,
1311                          struct rte_mbuf **rx_pkts,
1312                          uint16_t nb_pkts)
1313 {
1314         struct i40e_rx_queue *rxq = rx_queue;
1315         volatile union i40e_rx_desc *rx_ring = rxq->rx_ring;
1316         volatile union i40e_rx_desc *rxdp;
1317         union i40e_rx_desc rxd;
1318         struct i40e_rx_entry *sw_ring = rxq->sw_ring;
1319         struct i40e_rx_entry *rxe;
1320         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1321         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1322         struct rte_mbuf *nmb, *rxm;
1323         uint16_t rx_id = rxq->rx_tail;
1324         uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1325         uint32_t rx_status;
1326         uint64_t qword1;
1327         uint64_t dma_addr;
1328         uint64_t pkt_flags;
1329
1330         while (nb_rx < nb_pkts) {
1331                 rxdp = &rx_ring[rx_id];
1332                 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1333                 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
1334                                         I40E_RXD_QW1_STATUS_SHIFT;
1335
1336                 /* Check the DD bit */
1337                 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1338                         break;
1339
1340                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1341                 if (unlikely(!nmb))
1342                         break;
1343                 rxd = *rxdp;
1344                 nb_hold++;
1345                 rxe = &sw_ring[rx_id];
1346                 rx_id++;
1347                 if (rx_id == rxq->nb_rx_desc)
1348                         rx_id = 0;
1349
1350                 /* Prefetch next mbuf */
1351                 rte_prefetch0(sw_ring[rx_id].mbuf);
1352
1353                 /**
1354                  * When next RX descriptor is on a cache line boundary,
1355                  * prefetch the next 4 RX descriptors and next 8 pointers
1356                  * to mbufs.
1357                  */
1358                 if ((rx_id & 0x3) == 0) {
1359                         rte_prefetch0(&rx_ring[rx_id]);
1360                         rte_prefetch0(&sw_ring[rx_id]);
1361                 }
1362
1363                 rxm = rxe->mbuf;
1364                 rxe->mbuf = nmb;
1365                 dma_addr =
1366                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1367
1368                 /* Set data buffer address and data length of the mbuf */
1369                 rxdp->read.hdr_addr = 0;
1370                 rxdp->read.pkt_addr = dma_addr;
1371                 rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1372                                         I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1373                 rxm->data_len = rx_packet_len;
1374                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1375
1376                 /**
1377                  * If this is the first buffer of the received packet, set the
1378                  * pointer to the first mbuf of the packet and initialize its
1379                  * context. Otherwise, update the total length and the number
1380                  * of segments of the current scattered packet, and update the
1381                  * pointer to the last mbuf of the current packet.
1382                  */
1383                 if (!first_seg) {
1384                         first_seg = rxm;
1385                         first_seg->nb_segs = 1;
1386                         first_seg->pkt_len = rx_packet_len;
1387                 } else {
1388                         first_seg->pkt_len =
1389                                 (uint16_t)(first_seg->pkt_len +
1390                                                 rx_packet_len);
1391                         first_seg->nb_segs++;
1392                         last_seg->next = rxm;
1393                 }
1394
1395                 /**
1396                  * If this is not the last buffer of the received packet,
1397                  * update the pointer to the last mbuf of the current scattered
1398                  * packet and continue to parse the RX ring.
1399                  */
1400                 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))) {
1401                         last_seg = rxm;
1402                         continue;
1403                 }
1404
1405                 /**
1406                  * This is the last buffer of the received packet. If the CRC
1407                  * is not stripped by the hardware:
1408                  *  - Subtract the CRC length from the total packet length.
1409                  *  - If the last buffer only contains the whole CRC or a part
1410                  *  of it, free the mbuf associated to the last buffer. If part
1411                  *  of the CRC is also contained in the previous mbuf, subtract
1412                  *  the length of that CRC part from the data length of the
1413                  *  previous mbuf.
1414                  */
1415                 rxm->next = NULL;
1416                 if (unlikely(rxq->crc_len > 0)) {
1417                         first_seg->pkt_len -= ETHER_CRC_LEN;
1418                         if (rx_packet_len <= ETHER_CRC_LEN) {
1419                                 rte_pktmbuf_free_seg(rxm);
1420                                 first_seg->nb_segs--;
1421                                 last_seg->data_len =
1422                                         (uint16_t)(last_seg->data_len -
1423                                         (ETHER_CRC_LEN - rx_packet_len));
1424                                 last_seg->next = NULL;
1425                         } else
1426                                 rxm->data_len = (uint16_t)(rx_packet_len -
1427                                                                 ETHER_CRC_LEN);
1428                 }
1429
1430                 first_seg->port = rxq->port_id;
1431                 first_seg->ol_flags = 0;
1432                 i40e_rxd_to_vlan_tci(first_seg, &rxd);
1433                 pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
1434                 pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
1435                 first_seg->packet_type =
1436                         i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
1437                         I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
1438                 if (pkt_flags & PKT_RX_RSS_HASH)
1439                         rxm->hash.rss =
1440                                 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1441                 if (pkt_flags & PKT_RX_FDIR)
1442                         pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
1443
1444 #ifdef RTE_LIBRTE_IEEE1588
1445                 pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1);
1446 #endif
1447                 first_seg->ol_flags |= pkt_flags;
1448
1449                 /* Prefetch data of first segment, if configured to do so. */
1450                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1451                         first_seg->data_off));
1452                 rx_pkts[nb_rx++] = first_seg;
1453                 first_seg = NULL;
1454         }
1455
1456         /* Record index of the next RX descriptor to probe. */
1457         rxq->rx_tail = rx_id;
1458         rxq->pkt_first_seg = first_seg;
1459         rxq->pkt_last_seg = last_seg;
1460
1461         /**
1462          * If the number of free RX descriptors is greater than the RX free
1463          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1464          * register. Update the RDT with the value of the last processed RX
1465          * descriptor minus 1, to guarantee that the RDT register is never
1466          * equal to the RDH register, which creates a "full" ring situtation
1467          * from the hardware point of view.
1468          */
1469         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1470         if (nb_hold > rxq->rx_free_thresh) {
1471                 rx_id = (uint16_t)(rx_id == 0 ?
1472                         (rxq->nb_rx_desc - 1) : (rx_id - 1));
1473                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1474                 nb_hold = 0;
1475         }
1476         rxq->nb_rx_hold = nb_hold;
1477
1478         return nb_rx;
1479 }
1480
1481 /* Check if the context descriptor is needed for TX offloading */
1482 static inline uint16_t
1483 i40e_calc_context_desc(uint64_t flags)
1484 {
1485         static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
1486                 PKT_TX_TCP_SEG |
1487                 PKT_TX_QINQ_PKT;
1488
1489 #ifdef RTE_LIBRTE_IEEE1588
1490         mask |= PKT_TX_IEEE1588_TMST;
1491 #endif
1492
1493         return (flags & mask) ? 1 : 0;
1494 }
1495
1496 /* set i40e TSO context descriptor */
1497 static inline uint64_t
1498 i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
1499 {
1500         uint64_t ctx_desc = 0;
1501         uint32_t cd_cmd, hdr_len, cd_tso_len;
1502
1503         if (!tx_offload.l4_len) {
1504                 PMD_DRV_LOG(DEBUG, "L4 length set to 0");
1505                 return ctx_desc;
1506         }
1507
1508         /**
1509          * in case of tunneling packet, the outer_l2_len and
1510          * outer_l3_len must be 0.
1511          */
1512         hdr_len = tx_offload.outer_l2_len +
1513                 tx_offload.outer_l3_len +
1514                 tx_offload.l2_len +
1515                 tx_offload.l3_len +
1516                 tx_offload.l4_len;
1517
1518         cd_cmd = I40E_TX_CTX_DESC_TSO;
1519         cd_tso_len = mbuf->pkt_len - hdr_len;
1520         ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1521                 ((uint64_t)cd_tso_len <<
1522                  I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1523                 ((uint64_t)mbuf->tso_segsz <<
1524                  I40E_TXD_CTX_QW1_MSS_SHIFT);
1525
1526         return ctx_desc;
1527 }
1528
1529 uint16_t
1530 i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1531 {
1532         struct i40e_tx_queue *txq;
1533         struct i40e_tx_entry *sw_ring;
1534         struct i40e_tx_entry *txe, *txn;
1535         volatile struct i40e_tx_desc *txd;
1536         volatile struct i40e_tx_desc *txr;
1537         struct rte_mbuf *tx_pkt;
1538         struct rte_mbuf *m_seg;
1539         uint32_t cd_tunneling_params;
1540         uint16_t tx_id;
1541         uint16_t nb_tx;
1542         uint32_t td_cmd;
1543         uint32_t td_offset;
1544         uint32_t tx_flags;
1545         uint32_t td_tag;
1546         uint64_t ol_flags;
1547         uint16_t nb_used;
1548         uint16_t nb_ctx;
1549         uint16_t tx_last;
1550         uint16_t slen;
1551         uint64_t buf_dma_addr;
1552         union i40e_tx_offload tx_offload = {0};
1553
1554         txq = tx_queue;
1555         sw_ring = txq->sw_ring;
1556         txr = txq->tx_ring;
1557         tx_id = txq->tx_tail;
1558         txe = &sw_ring[tx_id];
1559
1560         /* Check if the descriptor ring needs to be cleaned. */
1561         if (txq->nb_tx_free < txq->tx_free_thresh)
1562                 i40e_xmit_cleanup(txq);
1563
1564         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1565                 td_cmd = 0;
1566                 td_tag = 0;
1567                 td_offset = 0;
1568                 tx_flags = 0;
1569
1570                 tx_pkt = *tx_pkts++;
1571                 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1572
1573                 ol_flags = tx_pkt->ol_flags;
1574                 tx_offload.l2_len = tx_pkt->l2_len;
1575                 tx_offload.l3_len = tx_pkt->l3_len;
1576                 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
1577                 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
1578                 tx_offload.l4_len = tx_pkt->l4_len;
1579                 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1580
1581                 /* Calculate the number of context descriptors needed. */
1582                 nb_ctx = i40e_calc_context_desc(ol_flags);
1583
1584                 /**
1585                  * The number of descriptors that must be allocated for
1586                  * a packet equals to the number of the segments of that
1587                  * packet plus 1 context descriptor if needed.
1588                  */
1589                 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1590                 tx_last = (uint16_t)(tx_id + nb_used - 1);
1591
1592                 /* Circular ring */
1593                 if (tx_last >= txq->nb_tx_desc)
1594                         tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1595
1596                 if (nb_used > txq->nb_tx_free) {
1597                         if (i40e_xmit_cleanup(txq) != 0) {
1598                                 if (nb_tx == 0)
1599                                         return 0;
1600                                 goto end_of_tx;
1601                         }
1602                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
1603                                 while (nb_used > txq->nb_tx_free) {
1604                                         if (i40e_xmit_cleanup(txq) != 0) {
1605                                                 if (nb_tx == 0)
1606                                                         return 0;
1607                                                 goto end_of_tx;
1608                                         }
1609                                 }
1610                         }
1611                 }
1612
1613                 /* Descriptor based VLAN insertion */
1614                 if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1615                         tx_flags |= tx_pkt->vlan_tci <<
1616                                 I40E_TX_FLAG_L2TAG1_SHIFT;
1617                         tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
1618                         td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
1619                         td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
1620                                                 I40E_TX_FLAG_L2TAG1_SHIFT;
1621                 }
1622
1623                 /* Always enable CRC offload insertion */
1624                 td_cmd |= I40E_TX_DESC_CMD_ICRC;
1625
1626                 /* Enable checksum offloading */
1627                 cd_tunneling_params = 0;
1628                 if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) {
1629                         i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
1630                                 tx_offload, &cd_tunneling_params);
1631                 }
1632
1633                 if (nb_ctx) {
1634                         /* Setup TX context descriptor if required */
1635                         volatile struct i40e_tx_context_desc *ctx_txd =
1636                                 (volatile struct i40e_tx_context_desc *)\
1637                                                         &txr[tx_id];
1638                         uint16_t cd_l2tag2 = 0;
1639                         uint64_t cd_type_cmd_tso_mss =
1640                                 I40E_TX_DESC_DTYPE_CONTEXT;
1641
1642                         txn = &sw_ring[txe->next_id];
1643                         RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1644                         if (txe->mbuf != NULL) {
1645                                 rte_pktmbuf_free_seg(txe->mbuf);
1646                                 txe->mbuf = NULL;
1647                         }
1648
1649                         /* TSO enabled means no timestamp */
1650                         if (ol_flags & PKT_TX_TCP_SEG)
1651                                 cd_type_cmd_tso_mss |=
1652                                         i40e_set_tso_ctx(tx_pkt, tx_offload);
1653                         else {
1654 #ifdef RTE_LIBRTE_IEEE1588
1655                                 if (ol_flags & PKT_TX_IEEE1588_TMST)
1656                                         cd_type_cmd_tso_mss |=
1657                                                 ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
1658                                                  I40E_TXD_CTX_QW1_CMD_SHIFT);
1659 #endif
1660                         }
1661
1662                         ctx_txd->tunneling_params =
1663                                 rte_cpu_to_le_32(cd_tunneling_params);
1664                         if (ol_flags & PKT_TX_QINQ_PKT) {
1665                                 cd_l2tag2 = tx_pkt->vlan_tci_outer;
1666                                 cd_type_cmd_tso_mss |=
1667                                         ((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
1668                                                 I40E_TXD_CTX_QW1_CMD_SHIFT);
1669                         }
1670                         ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
1671                         ctx_txd->type_cmd_tso_mss =
1672                                 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1673
1674                         PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n"
1675                                 "tunneling_params: %#x;\n"
1676                                 "l2tag2: %#hx;\n"
1677                                 "rsvd: %#hx;\n"
1678                                 "type_cmd_tso_mss: %#"PRIx64";\n",
1679                                 tx_pkt, tx_id,
1680                                 ctx_txd->tunneling_params,
1681                                 ctx_txd->l2tag2,
1682                                 ctx_txd->rsvd,
1683                                 ctx_txd->type_cmd_tso_mss);
1684
1685                         txe->last_id = tx_last;
1686                         tx_id = txe->next_id;
1687                         txe = txn;
1688                 }
1689
1690                 m_seg = tx_pkt;
1691                 do {
1692                         txd = &txr[tx_id];
1693                         txn = &sw_ring[txe->next_id];
1694
1695                         if (txe->mbuf)
1696                                 rte_pktmbuf_free_seg(txe->mbuf);
1697                         txe->mbuf = m_seg;
1698
1699                         /* Setup TX Descriptor */
1700                         slen = m_seg->data_len;
1701                         buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
1702
1703                         PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
1704                                 "buf_dma_addr: %#"PRIx64";\n"
1705                                 "td_cmd: %#x;\n"
1706                                 "td_offset: %#x;\n"
1707                                 "td_len: %u;\n"
1708                                 "td_tag: %#x;\n",
1709                                 tx_pkt, tx_id, buf_dma_addr,
1710                                 td_cmd, td_offset, slen, td_tag);
1711
1712                         txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
1713                         txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
1714                                                 td_offset, slen, td_tag);
1715                         txe->last_id = tx_last;
1716                         tx_id = txe->next_id;
1717                         txe = txn;
1718                         m_seg = m_seg->next;
1719                 } while (m_seg != NULL);
1720
1721                 /* The last packet data descriptor needs End Of Packet (EOP) */
1722                 td_cmd |= I40E_TX_DESC_CMD_EOP;
1723                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
1724                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
1725
1726                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
1727                         PMD_TX_FREE_LOG(DEBUG,
1728                                         "Setting RS bit on TXD id="
1729                                         "%4u (port=%d queue=%d)",
1730                                         tx_last, txq->port_id, txq->queue_id);
1731
1732                         td_cmd |= I40E_TX_DESC_CMD_RS;
1733
1734                         /* Update txq RS bit counters */
1735                         txq->nb_tx_used = 0;
1736                 }
1737
1738                 txd->cmd_type_offset_bsz |=
1739                         rte_cpu_to_le_64(((uint64_t)td_cmd) <<
1740                                         I40E_TXD_QW1_CMD_SHIFT);
1741         }
1742
1743 end_of_tx:
1744         rte_wmb();
1745
1746         PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
1747                    (unsigned) txq->port_id, (unsigned) txq->queue_id,
1748                    (unsigned) tx_id, (unsigned) nb_tx);
1749
1750         I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
1751         txq->tx_tail = tx_id;
1752
1753         return nb_tx;
1754 }
1755
1756 static inline int __attribute__((always_inline))
1757 i40e_tx_free_bufs(struct i40e_tx_queue *txq)
1758 {
1759         struct i40e_tx_entry *txep;
1760         uint16_t i;
1761
1762         if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
1763                         rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
1764                         rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1765                 return 0;
1766
1767         txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
1768
1769         for (i = 0; i < txq->tx_rs_thresh; i++)
1770                 rte_prefetch0((txep + i)->mbuf);
1771
1772         if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) {
1773                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
1774                         rte_mempool_put(txep->mbuf->pool, txep->mbuf);
1775                         txep->mbuf = NULL;
1776                 }
1777         } else {
1778                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
1779                         rte_pktmbuf_free_seg(txep->mbuf);
1780                         txep->mbuf = NULL;
1781                 }
1782         }
1783
1784         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
1785         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
1786         if (txq->tx_next_dd >= txq->nb_tx_desc)
1787                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1788
1789         return txq->tx_rs_thresh;
1790 }
1791
1792 /* Populate 4 descriptors with data from 4 mbufs */
1793 static inline void
1794 tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
1795 {
1796         uint64_t dma_addr;
1797         uint32_t i;
1798
1799         for (i = 0; i < 4; i++, txdp++, pkts++) {
1800                 dma_addr = rte_mbuf_data_dma_addr(*pkts);
1801                 txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
1802                 txdp->cmd_type_offset_bsz =
1803                         i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
1804                                         (*pkts)->data_len, 0);
1805         }
1806 }
1807
1808 /* Populate 1 descriptor with data from 1 mbuf */
1809 static inline void
1810 tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
1811 {
1812         uint64_t dma_addr;
1813
1814         dma_addr = rte_mbuf_data_dma_addr(*pkts);
1815         txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
1816         txdp->cmd_type_offset_bsz =
1817                 i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
1818                                 (*pkts)->data_len, 0);
1819 }
1820
1821 /* Fill hardware descriptor ring with mbuf data */
1822 static inline void
1823 i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
1824                      struct rte_mbuf **pkts,
1825                      uint16_t nb_pkts)
1826 {
1827         volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
1828         struct i40e_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
1829         const int N_PER_LOOP = 4;
1830         const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
1831         int mainpart, leftover;
1832         int i, j;
1833
1834         mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
1835         leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
1836         for (i = 0; i < mainpart; i += N_PER_LOOP) {
1837                 for (j = 0; j < N_PER_LOOP; ++j) {
1838                         (txep + i + j)->mbuf = *(pkts + i + j);
1839                 }
1840                 tx4(txdp + i, pkts + i);
1841         }
1842         if (unlikely(leftover > 0)) {
1843                 for (i = 0; i < leftover; ++i) {
1844                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
1845                         tx1(txdp + mainpart + i, pkts + mainpart + i);
1846                 }
1847         }
1848 }
1849
1850 static inline uint16_t
1851 tx_xmit_pkts(struct i40e_tx_queue *txq,
1852              struct rte_mbuf **tx_pkts,
1853              uint16_t nb_pkts)
1854 {
1855         volatile struct i40e_tx_desc *txr = txq->tx_ring;
1856         uint16_t n = 0;
1857
1858         /**
1859          * Begin scanning the H/W ring for done descriptors when the number
1860          * of available descriptors drops below tx_free_thresh. For each done
1861          * descriptor, free the associated buffer.
1862          */
1863         if (txq->nb_tx_free < txq->tx_free_thresh)
1864                 i40e_tx_free_bufs(txq);
1865
1866         /* Use available descriptor only */
1867         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
1868         if (unlikely(!nb_pkts))
1869                 return 0;
1870
1871         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
1872         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
1873                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
1874                 i40e_tx_fill_hw_ring(txq, tx_pkts, n);
1875                 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
1876                         rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
1877                                                 I40E_TXD_QW1_CMD_SHIFT);
1878                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1879                 txq->tx_tail = 0;
1880         }
1881
1882         /* Fill hardware descriptor ring with mbuf data */
1883         i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
1884         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
1885
1886         /* Determin if RS bit needs to be set */
1887         if (txq->tx_tail > txq->tx_next_rs) {
1888                 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
1889                         rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
1890                                                 I40E_TXD_QW1_CMD_SHIFT);
1891                 txq->tx_next_rs =
1892                         (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
1893                 if (txq->tx_next_rs >= txq->nb_tx_desc)
1894                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1895         }
1896
1897         if (txq->tx_tail >= txq->nb_tx_desc)
1898                 txq->tx_tail = 0;
1899
1900         /* Update the tx tail register */
1901         rte_wmb();
1902         I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1903
1904         return nb_pkts;
1905 }
1906
1907 static uint16_t
1908 i40e_xmit_pkts_simple(void *tx_queue,
1909                       struct rte_mbuf **tx_pkts,
1910                       uint16_t nb_pkts)
1911 {
1912         uint16_t nb_tx = 0;
1913
1914         if (likely(nb_pkts <= I40E_TX_MAX_BURST))
1915                 return tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
1916                                                 tx_pkts, nb_pkts);
1917
1918         while (nb_pkts) {
1919                 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
1920                                                 I40E_TX_MAX_BURST);
1921
1922                 ret = tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
1923                                                 &tx_pkts[nb_tx], num);
1924                 nb_tx = (uint16_t)(nb_tx + ret);
1925                 nb_pkts = (uint16_t)(nb_pkts - ret);
1926                 if (ret < num)
1927                         break;
1928         }
1929
1930         return nb_tx;
1931 }
1932
1933 /*
1934  * Find the VSI the queue belongs to. 'queue_idx' is the queue index
1935  * application used, which assume having sequential ones. But from driver's
1936  * perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64
1937  * to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application
1938  * running on host, q1-64 and q97-128 can be used, total 96 queues. They can
1939  * use queue_idx from 0 to 95 to access queues, while real queue would be
1940  * different. This function will do a queue mapping to find VSI the queue
1941  * belongs to.
1942  */
1943 static struct i40e_vsi*
1944 i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
1945 {
1946         /* the queue in MAIN VSI range */
1947         if (queue_idx < pf->main_vsi->nb_qps)
1948                 return pf->main_vsi;
1949
1950         queue_idx -= pf->main_vsi->nb_qps;
1951
1952         /* queue_idx is greater than VMDQ VSIs range */
1953         if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {
1954                 PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?");
1955                 return NULL;
1956         }
1957
1958         return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;
1959 }
1960
1961 static uint16_t
1962 i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
1963 {
1964         /* the queue in MAIN VSI range */
1965         if (queue_idx < pf->main_vsi->nb_qps)
1966                 return queue_idx;
1967
1968         /* It's VMDQ queues */
1969         queue_idx -= pf->main_vsi->nb_qps;
1970
1971         if (pf->nb_cfg_vmdq_vsi)
1972                 return queue_idx % pf->vmdq_nb_qps;
1973         else {
1974                 PMD_INIT_LOG(ERR, "Fail to get queue offset");
1975                 return (uint16_t)(-1);
1976         }
1977 }
1978
1979 int
1980 i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1981 {
1982         struct i40e_rx_queue *rxq;
1983         int err = -1;
1984         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1985
1986         PMD_INIT_FUNC_TRACE();
1987
1988         if (rx_queue_id < dev->data->nb_rx_queues) {
1989                 rxq = dev->data->rx_queues[rx_queue_id];
1990
1991                 err = i40e_alloc_rx_queue_mbufs(rxq);
1992                 if (err) {
1993                         PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1994                         return err;
1995                 }
1996
1997                 rte_wmb();
1998
1999                 /* Init the RX tail regieter. */
2000                 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
2001
2002                 err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
2003
2004                 if (err) {
2005                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
2006                                     rx_queue_id);
2007
2008                         i40e_rx_queue_release_mbufs(rxq);
2009                         i40e_reset_rx_queue(rxq);
2010                 } else
2011                         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
2012         }
2013
2014         return err;
2015 }
2016
2017 int
2018 i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2019 {
2020         struct i40e_rx_queue *rxq;
2021         int err;
2022         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2023
2024         if (rx_queue_id < dev->data->nb_rx_queues) {
2025                 rxq = dev->data->rx_queues[rx_queue_id];
2026
2027                 /*
2028                 * rx_queue_id is queue id aplication refers to, while
2029                 * rxq->reg_idx is the real queue index.
2030                 */
2031                 err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
2032
2033                 if (err) {
2034                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
2035                                     rx_queue_id);
2036                         return err;
2037                 }
2038                 i40e_rx_queue_release_mbufs(rxq);
2039                 i40e_reset_rx_queue(rxq);
2040                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
2041         }
2042
2043         return 0;
2044 }
2045
2046 int
2047 i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2048 {
2049         int err = -1;
2050         struct i40e_tx_queue *txq;
2051         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2052
2053         PMD_INIT_FUNC_TRACE();
2054
2055         if (tx_queue_id < dev->data->nb_tx_queues) {
2056                 txq = dev->data->tx_queues[tx_queue_id];
2057
2058                 /*
2059                 * tx_queue_id is queue id aplication refers to, while
2060                 * rxq->reg_idx is the real queue index.
2061                 */
2062                 err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
2063                 if (err)
2064                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
2065                                     tx_queue_id);
2066                 else
2067                         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
2068         }
2069
2070         return err;
2071 }
2072
2073 int
2074 i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2075 {
2076         struct i40e_tx_queue *txq;
2077         int err;
2078         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2079
2080         if (tx_queue_id < dev->data->nb_tx_queues) {
2081                 txq = dev->data->tx_queues[tx_queue_id];
2082
2083                 /*
2084                 * tx_queue_id is queue id aplication refers to, while
2085                 * txq->reg_idx is the real queue index.
2086                 */
2087                 err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
2088
2089                 if (err) {
2090                         PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
2091                                     tx_queue_id);
2092                         return err;
2093                 }
2094
2095                 i40e_tx_queue_release_mbufs(txq);
2096                 i40e_reset_tx_queue(txq);
2097                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
2098         }
2099
2100         return 0;
2101 }
2102
2103 const uint32_t *
2104 i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2105 {
2106         static const uint32_t ptypes[] = {
2107                 /* refers to i40e_rxd_pkt_type_mapping() */
2108                 RTE_PTYPE_L2_ETHER,
2109                 RTE_PTYPE_L2_ETHER_TIMESYNC,
2110                 RTE_PTYPE_L2_ETHER_LLDP,
2111                 RTE_PTYPE_L2_ETHER_ARP,
2112                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2113                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2114                 RTE_PTYPE_L4_FRAG,
2115                 RTE_PTYPE_L4_ICMP,
2116                 RTE_PTYPE_L4_NONFRAG,
2117                 RTE_PTYPE_L4_SCTP,
2118                 RTE_PTYPE_L4_TCP,
2119                 RTE_PTYPE_L4_UDP,
2120                 RTE_PTYPE_TUNNEL_GRENAT,
2121                 RTE_PTYPE_TUNNEL_IP,
2122                 RTE_PTYPE_INNER_L2_ETHER,
2123                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
2124                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2125                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2126                 RTE_PTYPE_INNER_L4_FRAG,
2127                 RTE_PTYPE_INNER_L4_ICMP,
2128                 RTE_PTYPE_INNER_L4_NONFRAG,
2129                 RTE_PTYPE_INNER_L4_SCTP,
2130                 RTE_PTYPE_INNER_L4_TCP,
2131                 RTE_PTYPE_INNER_L4_UDP,
2132                 RTE_PTYPE_UNKNOWN
2133         };
2134
2135         if (dev->rx_pkt_burst == i40e_recv_pkts ||
2136 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2137             dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
2138 #endif
2139             dev->rx_pkt_burst == i40e_recv_scattered_pkts)
2140                 return ptypes;
2141         return NULL;
2142 }
2143
2144 int
2145 i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
2146                         uint16_t queue_idx,
2147                         uint16_t nb_desc,
2148                         unsigned int socket_id,
2149                         const struct rte_eth_rxconf *rx_conf,
2150                         struct rte_mempool *mp)
2151 {
2152         struct i40e_vsi *vsi;
2153         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2154         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2155         struct i40e_adapter *ad =
2156                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2157         struct i40e_rx_queue *rxq;
2158         const struct rte_memzone *rz;
2159         uint32_t ring_size;
2160         uint16_t len, i;
2161         uint16_t base, bsf, tc_mapping;
2162         int use_def_burst_func = 1;
2163
2164         if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
2165                 struct i40e_vf *vf =
2166                         I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2167                 vsi = &vf->vsi;
2168         } else
2169                 vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
2170
2171         if (vsi == NULL) {
2172                 PMD_DRV_LOG(ERR, "VSI not available or queue "
2173                             "index exceeds the maximum");
2174                 return I40E_ERR_PARAM;
2175         }
2176         if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
2177                         (nb_desc > I40E_MAX_RING_DESC) ||
2178                         (nb_desc < I40E_MIN_RING_DESC)) {
2179                 PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
2180                             "invalid", nb_desc);
2181                 return I40E_ERR_PARAM;
2182         }
2183
2184         /* Free memory if needed */
2185         if (dev->data->rx_queues[queue_idx]) {
2186                 i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
2187                 dev->data->rx_queues[queue_idx] = NULL;
2188         }
2189
2190         /* Allocate the rx queue data structure */
2191         rxq = rte_zmalloc_socket("i40e rx queue",
2192                                  sizeof(struct i40e_rx_queue),
2193                                  RTE_CACHE_LINE_SIZE,
2194                                  socket_id);
2195         if (!rxq) {
2196                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2197                             "rx queue data structure");
2198                 return -ENOMEM;
2199         }
2200         rxq->mp = mp;
2201         rxq->nb_rx_desc = nb_desc;
2202         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2203         rxq->queue_id = queue_idx;
2204         if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
2205                 rxq->reg_idx = queue_idx;
2206         else /* PF device */
2207                 rxq->reg_idx = vsi->base_queue +
2208                         i40e_get_queue_offset_by_qindex(pf, queue_idx);
2209
2210         rxq->port_id = dev->data->port_id;
2211         rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
2212                                                         0 : ETHER_CRC_LEN);
2213         rxq->drop_en = rx_conf->rx_drop_en;
2214         rxq->vsi = vsi;
2215         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2216
2217         /* Allocate the maximun number of RX ring hardware descriptor. */
2218         ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
2219         ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
2220         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2221                               ring_size, I40E_RING_BASE_ALIGN, socket_id);
2222         if (!rz) {
2223                 i40e_dev_rx_queue_release(rxq);
2224                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
2225                 return -ENOMEM;
2226         }
2227
2228         /* Zero all the descriptors in the ring. */
2229         memset(rz->addr, 0, ring_size);
2230
2231         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
2232         rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
2233
2234 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2235         len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
2236 #else
2237         len = nb_desc;
2238 #endif
2239
2240         /* Allocate the software ring. */
2241         rxq->sw_ring =
2242                 rte_zmalloc_socket("i40e rx sw ring",
2243                                    sizeof(struct i40e_rx_entry) * len,
2244                                    RTE_CACHE_LINE_SIZE,
2245                                    socket_id);
2246         if (!rxq->sw_ring) {
2247                 i40e_dev_rx_queue_release(rxq);
2248                 PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
2249                 return -ENOMEM;
2250         }
2251
2252         i40e_reset_rx_queue(rxq);
2253         rxq->q_set = TRUE;
2254         dev->data->rx_queues[queue_idx] = rxq;
2255
2256         use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
2257
2258         if (!use_def_burst_func) {
2259 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2260                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2261                              "satisfied. Rx Burst Bulk Alloc function will be "
2262                              "used on port=%d, queue=%d.",
2263                              rxq->port_id, rxq->queue_id);
2264 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2265         } else {
2266                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2267                              "not satisfied, Scattered Rx is requested, "
2268                              "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
2269                              "not enabled on port=%d, queue=%d.",
2270                              rxq->port_id, rxq->queue_id);
2271                 ad->rx_bulk_alloc_allowed = false;
2272         }
2273
2274         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2275                 if (!(vsi->enabled_tc & (1 << i)))
2276                         continue;
2277                 tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
2278                 base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
2279                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
2280                 bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
2281                         I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
2282
2283                 if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
2284                         rxq->dcb_tc = i;
2285         }
2286
2287         return 0;
2288 }
2289
2290 void
2291 i40e_dev_rx_queue_release(void *rxq)
2292 {
2293         struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
2294
2295         if (!q) {
2296                 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
2297                 return;
2298         }
2299
2300         i40e_rx_queue_release_mbufs(q);
2301         rte_free(q->sw_ring);
2302         rte_free(q);
2303 }
2304
2305 uint32_t
2306 i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2307 {
2308 #define I40E_RXQ_SCAN_INTERVAL 4
2309         volatile union i40e_rx_desc *rxdp;
2310         struct i40e_rx_queue *rxq;
2311         uint16_t desc = 0;
2312
2313         if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
2314                 PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
2315                 return 0;
2316         }
2317
2318         rxq = dev->data->rx_queues[rx_queue_id];
2319         rxdp = &(rxq->rx_ring[rxq->rx_tail]);
2320         while ((desc < rxq->nb_rx_desc) &&
2321                 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2322                 I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
2323                                 (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
2324                 /**
2325                  * Check the DD bit of a rx descriptor of each 4 in a group,
2326                  * to avoid checking too frequently and downgrading performance
2327                  * too much.
2328                  */
2329                 desc += I40E_RXQ_SCAN_INTERVAL;
2330                 rxdp += I40E_RXQ_SCAN_INTERVAL;
2331                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2332                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
2333                                         desc - rxq->nb_rx_desc]);
2334         }
2335
2336         return desc;
2337 }
2338
2339 int
2340 i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
2341 {
2342         volatile union i40e_rx_desc *rxdp;
2343         struct i40e_rx_queue *rxq = rx_queue;
2344         uint16_t desc;
2345         int ret;
2346
2347         if (unlikely(offset >= rxq->nb_rx_desc)) {
2348                 PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
2349                 return 0;
2350         }
2351
2352         desc = rxq->rx_tail + offset;
2353         if (desc >= rxq->nb_rx_desc)
2354                 desc -= rxq->nb_rx_desc;
2355
2356         rxdp = &(rxq->rx_ring[desc]);
2357
2358         ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2359                 I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
2360                                 (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
2361
2362         return ret;
2363 }
2364
2365 int
2366 i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
2367                         uint16_t queue_idx,
2368                         uint16_t nb_desc,
2369                         unsigned int socket_id,
2370                         const struct rte_eth_txconf *tx_conf)
2371 {
2372         struct i40e_vsi *vsi;
2373         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2374         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2375         struct i40e_tx_queue *txq;
2376         const struct rte_memzone *tz;
2377         uint32_t ring_size;
2378         uint16_t tx_rs_thresh, tx_free_thresh;
2379         uint16_t i, base, bsf, tc_mapping;
2380
2381         if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
2382                 struct i40e_vf *vf =
2383                         I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2384                 vsi = &vf->vsi;
2385         } else
2386                 vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
2387
2388         if (vsi == NULL) {
2389                 PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
2390                             "exceeds the maximum", queue_idx);
2391                 return I40E_ERR_PARAM;
2392         }
2393
2394         if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
2395                         (nb_desc > I40E_MAX_RING_DESC) ||
2396                         (nb_desc < I40E_MIN_RING_DESC)) {
2397                 PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
2398                             "invalid", nb_desc);
2399                 return I40E_ERR_PARAM;
2400         }
2401
2402         /**
2403          * The following two parameters control the setting of the RS bit on
2404          * transmit descriptors. TX descriptors will have their RS bit set
2405          * after txq->tx_rs_thresh descriptors have been used. The TX
2406          * descriptor ring will be cleaned after txq->tx_free_thresh
2407          * descriptors are used or if the number of descriptors required to
2408          * transmit a packet is greater than the number of free TX descriptors.
2409          *
2410          * The following constraints must be satisfied:
2411          *  - tx_rs_thresh must be greater than 0.
2412          *  - tx_rs_thresh must be less than the size of the ring minus 2.
2413          *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
2414          *  - tx_rs_thresh must be a divisor of the ring size.
2415          *  - tx_free_thresh must be greater than 0.
2416          *  - tx_free_thresh must be less than the size of the ring minus 3.
2417          *
2418          * One descriptor in the TX ring is used as a sentinel to avoid a H/W
2419          * race condition, hence the maximum threshold constraints. When set
2420          * to zero use default values.
2421          */
2422         tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2423                 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2424         tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2425                 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2426         if (tx_rs_thresh >= (nb_desc - 2)) {
2427                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2428                              "number of TX descriptors minus 2. "
2429                              "(tx_rs_thresh=%u port=%d queue=%d)",
2430                              (unsigned int)tx_rs_thresh,
2431                              (int)dev->data->port_id,
2432                              (int)queue_idx);
2433                 return I40E_ERR_PARAM;
2434         }
2435         if (tx_free_thresh >= (nb_desc - 3)) {
2436                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2437                              "tx_free_thresh must be less than the "
2438                              "number of TX descriptors minus 3. "
2439                              "(tx_free_thresh=%u port=%d queue=%d)",
2440                              (unsigned int)tx_free_thresh,
2441                              (int)dev->data->port_id,
2442                              (int)queue_idx);
2443                 return I40E_ERR_PARAM;
2444         }
2445         if (tx_rs_thresh > tx_free_thresh) {
2446                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
2447                              "equal to tx_free_thresh. (tx_free_thresh=%u"
2448                              " tx_rs_thresh=%u port=%d queue=%d)",
2449                              (unsigned int)tx_free_thresh,
2450                              (unsigned int)tx_rs_thresh,
2451                              (int)dev->data->port_id,
2452                              (int)queue_idx);
2453                 return I40E_ERR_PARAM;
2454         }
2455         if ((nb_desc % tx_rs_thresh) != 0) {
2456                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2457                              "number of TX descriptors. (tx_rs_thresh=%u"
2458                              " port=%d queue=%d)",
2459                              (unsigned int)tx_rs_thresh,
2460                              (int)dev->data->port_id,
2461                              (int)queue_idx);
2462                 return I40E_ERR_PARAM;
2463         }
2464         if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2465                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2466                              "tx_rs_thresh is greater than 1. "
2467                              "(tx_rs_thresh=%u port=%d queue=%d)",
2468                              (unsigned int)tx_rs_thresh,
2469                              (int)dev->data->port_id,
2470                              (int)queue_idx);
2471                 return I40E_ERR_PARAM;
2472         }
2473
2474         /* Free memory if needed. */
2475         if (dev->data->tx_queues[queue_idx]) {
2476                 i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
2477                 dev->data->tx_queues[queue_idx] = NULL;
2478         }
2479
2480         /* Allocate the TX queue data structure. */
2481         txq = rte_zmalloc_socket("i40e tx queue",
2482                                   sizeof(struct i40e_tx_queue),
2483                                   RTE_CACHE_LINE_SIZE,
2484                                   socket_id);
2485         if (!txq) {
2486                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2487                             "tx queue structure");
2488                 return -ENOMEM;
2489         }
2490
2491         /* Allocate TX hardware ring descriptors. */
2492         ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
2493         ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
2494         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2495                               ring_size, I40E_RING_BASE_ALIGN, socket_id);
2496         if (!tz) {
2497                 i40e_dev_tx_queue_release(txq);
2498                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
2499                 return -ENOMEM;
2500         }
2501
2502         txq->nb_tx_desc = nb_desc;
2503         txq->tx_rs_thresh = tx_rs_thresh;
2504         txq->tx_free_thresh = tx_free_thresh;
2505         txq->pthresh = tx_conf->tx_thresh.pthresh;
2506         txq->hthresh = tx_conf->tx_thresh.hthresh;
2507         txq->wthresh = tx_conf->tx_thresh.wthresh;
2508         txq->queue_id = queue_idx;
2509         if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
2510                 txq->reg_idx = queue_idx;
2511         else /* PF device */
2512                 txq->reg_idx = vsi->base_queue +
2513                         i40e_get_queue_offset_by_qindex(pf, queue_idx);
2514
2515         txq->port_id = dev->data->port_id;
2516         txq->txq_flags = tx_conf->txq_flags;
2517         txq->vsi = vsi;
2518         txq->tx_deferred_start = tx_conf->tx_deferred_start;
2519
2520         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
2521         txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
2522
2523         /* Allocate software ring */
2524         txq->sw_ring =
2525                 rte_zmalloc_socket("i40e tx sw ring",
2526                                    sizeof(struct i40e_tx_entry) * nb_desc,
2527                                    RTE_CACHE_LINE_SIZE,
2528                                    socket_id);
2529         if (!txq->sw_ring) {
2530                 i40e_dev_tx_queue_release(txq);
2531                 PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
2532                 return -ENOMEM;
2533         }
2534
2535         i40e_reset_tx_queue(txq);
2536         txq->q_set = TRUE;
2537         dev->data->tx_queues[queue_idx] = txq;
2538
2539         /* Use a simple TX queue without offloads or multi segs if possible */
2540         i40e_set_tx_function_flag(dev, txq);
2541
2542         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2543                 if (!(vsi->enabled_tc & (1 << i)))
2544                         continue;
2545                 tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
2546                 base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
2547                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
2548                 bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
2549                         I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
2550
2551                 if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
2552                         txq->dcb_tc = i;
2553         }
2554
2555         return 0;
2556 }
2557
2558 void
2559 i40e_dev_tx_queue_release(void *txq)
2560 {
2561         struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
2562
2563         if (!q) {
2564                 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
2565                 return;
2566         }
2567
2568         i40e_tx_queue_release_mbufs(q);
2569         rte_free(q->sw_ring);
2570         rte_free(q);
2571 }
2572
2573 const struct rte_memzone *
2574 i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
2575 {
2576         const struct rte_memzone *mz;
2577
2578         mz = rte_memzone_lookup(name);
2579         if (mz)
2580                 return mz;
2581
2582         if (rte_xen_dom0_supported())
2583                 mz = rte_memzone_reserve_bounded(name, len,
2584                                 socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
2585         else
2586                 mz = rte_memzone_reserve_aligned(name, len,
2587                                 socket_id, 0, I40E_RING_BASE_ALIGN);
2588         return mz;
2589 }
2590
2591 void
2592 i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
2593 {
2594         uint16_t i;
2595
2596         /* SSE Vector driver has a different way of releasing mbufs. */
2597         if (rxq->rx_using_sse) {
2598                 i40e_rx_queue_release_mbufs_vec(rxq);
2599                 return;
2600         }
2601
2602         if (!rxq->sw_ring) {
2603                 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
2604                 return;
2605         }
2606
2607         for (i = 0; i < rxq->nb_rx_desc; i++) {
2608                 if (rxq->sw_ring[i].mbuf) {
2609                         rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2610                         rxq->sw_ring[i].mbuf = NULL;
2611                 }
2612         }
2613 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2614         if (rxq->rx_nb_avail == 0)
2615                 return;
2616         for (i = 0; i < rxq->rx_nb_avail; i++) {
2617                 struct rte_mbuf *mbuf;
2618
2619                 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
2620                 rte_pktmbuf_free_seg(mbuf);
2621         }
2622         rxq->rx_nb_avail = 0;
2623 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2624 }
2625
2626 void
2627 i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
2628 {
2629         unsigned i;
2630         uint16_t len;
2631
2632         if (!rxq) {
2633                 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
2634                 return;
2635         }
2636
2637 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2638         if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
2639                 len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_I40E_RX_MAX_BURST);
2640         else
2641 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2642                 len = rxq->nb_rx_desc;
2643
2644         for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
2645                 ((volatile char *)rxq->rx_ring)[i] = 0;
2646
2647 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
2648         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2649         for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
2650                 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
2651
2652         rxq->rx_nb_avail = 0;
2653         rxq->rx_next_avail = 0;
2654         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2655 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
2656         rxq->rx_tail = 0;
2657         rxq->nb_rx_hold = 0;
2658         rxq->pkt_first_seg = NULL;
2659         rxq->pkt_last_seg = NULL;
2660
2661         rxq->rxrearm_start = 0;
2662         rxq->rxrearm_nb = 0;
2663 }
2664
2665 void
2666 i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
2667 {
2668         uint16_t i;
2669
2670         if (!txq || !txq->sw_ring) {
2671                 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
2672                 return;
2673         }
2674
2675         for (i = 0; i < txq->nb_tx_desc; i++) {
2676                 if (txq->sw_ring[i].mbuf) {
2677                         rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2678                         txq->sw_ring[i].mbuf = NULL;
2679                 }
2680         }
2681 }
2682
2683 void
2684 i40e_reset_tx_queue(struct i40e_tx_queue *txq)
2685 {
2686         struct i40e_tx_entry *txe;
2687         uint16_t i, prev, size;
2688
2689         if (!txq) {
2690                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
2691                 return;
2692         }
2693
2694         txe = txq->sw_ring;
2695         size = sizeof(struct i40e_tx_desc) * txq->nb_tx_desc;
2696         for (i = 0; i < size; i++)
2697                 ((volatile char *)txq->tx_ring)[i] = 0;
2698
2699         prev = (uint16_t)(txq->nb_tx_desc - 1);
2700         for (i = 0; i < txq->nb_tx_desc; i++) {
2701                 volatile struct i40e_tx_desc *txd = &txq->tx_ring[i];
2702
2703                 txd->cmd_type_offset_bsz =
2704                         rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
2705                 txe[i].mbuf =  NULL;
2706                 txe[i].last_id = i;
2707                 txe[prev].next_id = i;
2708                 prev = i;
2709         }
2710
2711         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2712         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2713
2714         txq->tx_tail = 0;
2715         txq->nb_tx_used = 0;
2716
2717         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2718         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2719 }
2720
2721 /* Init the TX queue in hardware */
2722 int
2723 i40e_tx_queue_init(struct i40e_tx_queue *txq)
2724 {
2725         enum i40e_status_code err = I40E_SUCCESS;
2726         struct i40e_vsi *vsi = txq->vsi;
2727         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2728         uint16_t pf_q = txq->reg_idx;
2729         struct i40e_hmc_obj_txq tx_ctx;
2730         uint32_t qtx_ctl;
2731
2732         /* clear the context structure first */
2733         memset(&tx_ctx, 0, sizeof(tx_ctx));
2734         tx_ctx.new_context = 1;
2735         tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
2736         tx_ctx.qlen = txq->nb_tx_desc;
2737
2738 #ifdef RTE_LIBRTE_IEEE1588
2739         tx_ctx.timesync_ena = 1;
2740 #endif
2741         tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[txq->dcb_tc]);
2742         if (vsi->type == I40E_VSI_FDIR)
2743                 tx_ctx.fd_ena = TRUE;
2744
2745         err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2746         if (err != I40E_SUCCESS) {
2747                 PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context");
2748                 return err;
2749         }
2750
2751         err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2752         if (err != I40E_SUCCESS) {
2753                 PMD_DRV_LOG(ERR, "Failure of set lan tx queue context");
2754                 return err;
2755         }
2756
2757         /* Now associate this queue with this PCI function */
2758         qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2759         qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2760                                         I40E_QTX_CTL_PF_INDX_MASK);
2761         I40E_WRITE_REG(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2762         I40E_WRITE_FLUSH(hw);
2763
2764         txq->qtx_tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2765
2766         return err;
2767 }
2768
2769 int
2770 i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
2771 {
2772         struct i40e_rx_entry *rxe = rxq->sw_ring;
2773         uint64_t dma_addr;
2774         uint16_t i;
2775
2776         for (i = 0; i < rxq->nb_rx_desc; i++) {
2777                 volatile union i40e_rx_desc *rxd;
2778                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
2779
2780                 if (unlikely(!mbuf)) {
2781                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
2782                         return -ENOMEM;
2783                 }
2784
2785                 rte_mbuf_refcnt_set(mbuf, 1);
2786                 mbuf->next = NULL;
2787                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
2788                 mbuf->nb_segs = 1;
2789                 mbuf->port = rxq->port_id;
2790
2791                 dma_addr =
2792                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
2793
2794                 rxd = &rxq->rx_ring[i];
2795                 rxd->read.pkt_addr = dma_addr;
2796                 rxd->read.hdr_addr = 0;
2797 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2798                 rxd->read.rsvd1 = 0;
2799                 rxd->read.rsvd2 = 0;
2800 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
2801
2802                 rxe[i].mbuf = mbuf;
2803         }
2804
2805         return 0;
2806 }
2807
2808 /*
2809  * Calculate the buffer length, and check the jumbo frame
2810  * and maximum packet length.
2811  */
2812 static int
2813 i40e_rx_queue_config(struct i40e_rx_queue *rxq)
2814 {
2815         struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
2816         struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
2817         struct rte_eth_dev_data *data = pf->dev_data;
2818         uint16_t buf_size, len;
2819
2820         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
2821                 RTE_PKTMBUF_HEADROOM);
2822
2823         switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED |
2824                         I40E_FLAG_HEADER_SPLIT_ENABLED)) {
2825         case I40E_FLAG_HEADER_SPLIT_ENABLED: /* Not supported */
2826                 rxq->rx_hdr_len = RTE_ALIGN(I40E_RXBUF_SZ_1024,
2827                                 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2828                 rxq->rx_buf_len = RTE_ALIGN(I40E_RXBUF_SZ_2048,
2829                                 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2830                 rxq->hs_mode = i40e_header_split_enabled;
2831                 break;
2832         case I40E_FLAG_HEADER_SPLIT_DISABLED:
2833         default:
2834                 rxq->rx_hdr_len = 0;
2835                 rxq->rx_buf_len = RTE_ALIGN(buf_size,
2836                         (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2837                 rxq->hs_mode = i40e_header_split_none;
2838                 break;
2839         }
2840
2841         len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
2842         rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
2843         if (data->dev_conf.rxmode.jumbo_frame == 1) {
2844                 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
2845                         rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
2846                         PMD_DRV_LOG(ERR, "maximum packet length must "
2847                                     "be larger than %u and smaller than %u,"
2848                                     "as jumbo frame is enabled",
2849                                     (uint32_t)ETHER_MAX_LEN,
2850                                     (uint32_t)I40E_FRAME_SIZE_MAX);
2851                         return I40E_ERR_CONFIG;
2852                 }
2853         } else {
2854                 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
2855                         rxq->max_pkt_len > ETHER_MAX_LEN) {
2856                         PMD_DRV_LOG(ERR, "maximum packet length must be "
2857                                     "larger than %u and smaller than %u, "
2858                                     "as jumbo frame is disabled",
2859                                     (uint32_t)ETHER_MIN_LEN,
2860                                     (uint32_t)ETHER_MAX_LEN);
2861                         return I40E_ERR_CONFIG;
2862                 }
2863         }
2864
2865         return 0;
2866 }
2867
2868 /* Init the RX queue in hardware */
2869 int
2870 i40e_rx_queue_init(struct i40e_rx_queue *rxq)
2871 {
2872         int err = I40E_SUCCESS;
2873         struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
2874         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
2875         uint16_t pf_q = rxq->reg_idx;
2876         uint16_t buf_size;
2877         struct i40e_hmc_obj_rxq rx_ctx;
2878
2879         err = i40e_rx_queue_config(rxq);
2880         if (err < 0) {
2881                 PMD_DRV_LOG(ERR, "Failed to config RX queue");
2882                 return err;
2883         }
2884
2885         /* Clear the context structure first */
2886         memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2887         rx_ctx.dbuff = rxq->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2888         rx_ctx.hbuff = rxq->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2889
2890         rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
2891         rx_ctx.qlen = rxq->nb_rx_desc;
2892 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2893         rx_ctx.dsize = 1;
2894 #endif
2895         rx_ctx.dtype = rxq->hs_mode;
2896         if (rxq->hs_mode)
2897                 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
2898         else
2899                 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
2900         rx_ctx.rxmax = rxq->max_pkt_len;
2901         rx_ctx.tphrdesc_ena = 1;
2902         rx_ctx.tphwdesc_ena = 1;
2903         rx_ctx.tphdata_ena = 1;
2904         rx_ctx.tphhead_ena = 1;
2905         rx_ctx.lrxqthresh = 2;
2906         rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
2907         rx_ctx.l2tsel = 1;
2908         /* showiv indicates if inner VLAN is stripped inside of tunnel
2909          * packet. When set it to 1, vlan information is stripped from
2910          * the inner header, but the hardware does not put it in the
2911          * descriptor. So set it zero by default.
2912          */
2913         rx_ctx.showiv = 0;
2914         rx_ctx.prefena = 1;
2915
2916         err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2917         if (err != I40E_SUCCESS) {
2918                 PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context");
2919                 return err;
2920         }
2921         err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2922         if (err != I40E_SUCCESS) {
2923                 PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context");
2924                 return err;
2925         }
2926
2927         rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2928
2929         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
2930                 RTE_PKTMBUF_HEADROOM);
2931
2932         /* Check if scattered RX needs to be used. */
2933         if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
2934                 dev_data->scattered_rx = 1;
2935         }
2936
2937         /* Init the RX tail regieter. */
2938         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
2939
2940         return 0;
2941 }
2942
2943 void
2944 i40e_dev_clear_queues(struct rte_eth_dev *dev)
2945 {
2946         uint16_t i;
2947
2948         PMD_INIT_FUNC_TRACE();
2949
2950         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2951                 i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
2952                 i40e_reset_tx_queue(dev->data->tx_queues[i]);
2953         }
2954
2955         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2956                 i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
2957                 i40e_reset_rx_queue(dev->data->rx_queues[i]);
2958         }
2959 }
2960
2961 void
2962 i40e_dev_free_queues(struct rte_eth_dev *dev)
2963 {
2964         uint16_t i;
2965
2966         PMD_INIT_FUNC_TRACE();
2967
2968         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2969                 i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
2970                 dev->data->rx_queues[i] = NULL;
2971         }
2972         dev->data->nb_rx_queues = 0;
2973
2974         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2975                 i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
2976                 dev->data->tx_queues[i] = NULL;
2977         }
2978         dev->data->nb_tx_queues = 0;
2979 }
2980
2981 #define I40E_FDIR_NUM_TX_DESC  I40E_MIN_RING_DESC
2982 #define I40E_FDIR_NUM_RX_DESC  I40E_MIN_RING_DESC
2983
2984 enum i40e_status_code
2985 i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
2986 {
2987         struct i40e_tx_queue *txq;
2988         const struct rte_memzone *tz = NULL;
2989         uint32_t ring_size;
2990         struct rte_eth_dev *dev;
2991
2992         if (!pf) {
2993                 PMD_DRV_LOG(ERR, "PF is not available");
2994                 return I40E_ERR_BAD_PTR;
2995         }
2996
2997         dev = pf->adapter->eth_dev;
2998
2999         /* Allocate the TX queue data structure. */
3000         txq = rte_zmalloc_socket("i40e fdir tx queue",
3001                                   sizeof(struct i40e_tx_queue),
3002                                   RTE_CACHE_LINE_SIZE,
3003                                   SOCKET_ID_ANY);
3004         if (!txq) {
3005                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3006                                         "tx queue structure.");
3007                 return I40E_ERR_NO_MEMORY;
3008         }
3009
3010         /* Allocate TX hardware ring descriptors. */
3011         ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
3012         ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
3013
3014         tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
3015                                       I40E_FDIR_QUEUE_ID, ring_size,
3016                                       I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
3017         if (!tz) {
3018                 i40e_dev_tx_queue_release(txq);
3019                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
3020                 return I40E_ERR_NO_MEMORY;
3021         }
3022
3023         txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
3024         txq->queue_id = I40E_FDIR_QUEUE_ID;
3025         txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
3026         txq->vsi = pf->fdir.fdir_vsi;
3027
3028         txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
3029         txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
3030         /*
3031          * don't need to allocate software ring and reset for the fdir
3032          * program queue just set the queue has been configured.
3033          */
3034         txq->q_set = TRUE;
3035         pf->fdir.txq = txq;
3036
3037         return I40E_SUCCESS;
3038 }
3039
3040 enum i40e_status_code
3041 i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
3042 {
3043         struct i40e_rx_queue *rxq;
3044         const struct rte_memzone *rz = NULL;
3045         uint32_t ring_size;
3046         struct rte_eth_dev *dev;
3047
3048         if (!pf) {
3049                 PMD_DRV_LOG(ERR, "PF is not available");
3050                 return I40E_ERR_BAD_PTR;
3051         }
3052
3053         dev = pf->adapter->eth_dev;
3054
3055         /* Allocate the RX queue data structure. */
3056         rxq = rte_zmalloc_socket("i40e fdir rx queue",
3057                                   sizeof(struct i40e_rx_queue),
3058                                   RTE_CACHE_LINE_SIZE,
3059                                   SOCKET_ID_ANY);
3060         if (!rxq) {
3061                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3062                                         "rx queue structure.");
3063                 return I40E_ERR_NO_MEMORY;
3064         }
3065
3066         /* Allocate RX hardware ring descriptors. */
3067         ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
3068         ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
3069
3070         rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
3071                                       I40E_FDIR_QUEUE_ID, ring_size,
3072                                       I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
3073         if (!rz) {
3074                 i40e_dev_rx_queue_release(rxq);
3075                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
3076                 return I40E_ERR_NO_MEMORY;
3077         }
3078
3079         rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
3080         rxq->queue_id = I40E_FDIR_QUEUE_ID;
3081         rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
3082         rxq->vsi = pf->fdir.fdir_vsi;
3083
3084         rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
3085         rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
3086
3087         /*
3088          * Don't need to allocate software ring and reset for the fdir
3089          * rx queue, just set the queue has been configured.
3090          */
3091         rxq->q_set = TRUE;
3092         pf->fdir.rxq = rxq;
3093
3094         return I40E_SUCCESS;
3095 }
3096
3097 void
3098 i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3099         struct rte_eth_rxq_info *qinfo)
3100 {
3101         struct i40e_rx_queue *rxq;
3102
3103         rxq = dev->data->rx_queues[queue_id];
3104
3105         qinfo->mp = rxq->mp;
3106         qinfo->scattered_rx = dev->data->scattered_rx;
3107         qinfo->nb_desc = rxq->nb_rx_desc;
3108
3109         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3110         qinfo->conf.rx_drop_en = rxq->drop_en;
3111         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3112 }
3113
3114 void
3115 i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3116         struct rte_eth_txq_info *qinfo)
3117 {
3118         struct i40e_tx_queue *txq;
3119
3120         txq = dev->data->tx_queues[queue_id];
3121
3122         qinfo->nb_desc = txq->nb_tx_desc;
3123
3124         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
3125         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
3126         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
3127
3128         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
3129         qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
3130         qinfo->conf.txq_flags = txq->txq_flags;
3131         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
3132 }
3133
3134 void __attribute__((cold))
3135 i40e_set_rx_function(struct rte_eth_dev *dev)
3136 {
3137         struct i40e_adapter *ad =
3138                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3139         uint16_t rx_using_sse, i;
3140         /* In order to allow Vector Rx there are a few configuration
3141          * conditions to be met and Rx Bulk Allocation should be allowed.
3142          */
3143         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3144                 if (i40e_rx_vec_dev_conf_condition_check(dev) ||
3145                     !ad->rx_bulk_alloc_allowed) {
3146                         PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
3147                                      " Vector Rx preconditions",
3148                                      dev->data->port_id);
3149
3150                         ad->rx_vec_allowed = false;
3151                 }
3152                 if (ad->rx_vec_allowed) {
3153                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3154                                 struct i40e_rx_queue *rxq =
3155                                         dev->data->rx_queues[i];
3156
3157                                 if (i40e_rxq_vec_setup(rxq)) {
3158                                         ad->rx_vec_allowed = false;
3159                                         break;
3160                                 }
3161                         }
3162                 }
3163         }
3164
3165         if (dev->data->scattered_rx) {
3166                 /* Set the non-LRO scattered callback: there are Vector and
3167                  * single allocation versions.
3168                  */
3169                 if (ad->rx_vec_allowed) {
3170                         PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
3171                                             "callback (port=%d).",
3172                                      dev->data->port_id);
3173
3174                         dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
3175                 } else {
3176                         PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
3177                                            "allocation callback (port=%d).",
3178                                      dev->data->port_id);
3179                         dev->rx_pkt_burst = i40e_recv_scattered_pkts;
3180                 }
3181         /* If parameters allow we are going to choose between the following
3182          * callbacks:
3183          *    - Vector
3184          *    - Bulk Allocation
3185          *    - Single buffer allocation (the simplest one)
3186          */
3187         } else if (ad->rx_vec_allowed) {
3188                 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
3189                                     "burst size no less than %d (port=%d).",
3190                              RTE_I40E_DESCS_PER_LOOP,
3191                              dev->data->port_id);
3192
3193                 dev->rx_pkt_burst = i40e_recv_pkts_vec;
3194         } else if (ad->rx_bulk_alloc_allowed) {
3195                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
3196                                     "satisfied. Rx Burst Bulk Alloc function "
3197                                     "will be used on port=%d.",
3198                              dev->data->port_id);
3199
3200                 dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
3201         } else {
3202                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
3203                                     "satisfied, or Scattered Rx is requested "
3204                                     "(port=%d).",
3205                              dev->data->port_id);
3206
3207                 dev->rx_pkt_burst = i40e_recv_pkts;
3208         }
3209
3210         /* Propagate information about RX function choice through all queues. */
3211         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3212                 rx_using_sse =
3213                         (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
3214                          dev->rx_pkt_burst == i40e_recv_pkts_vec);
3215
3216                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3217                         struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
3218
3219                         rxq->rx_using_sse = rx_using_sse;
3220                 }
3221         }
3222 }
3223
3224 void __attribute__((cold))
3225 i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
3226 {
3227         struct i40e_adapter *ad =
3228                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3229
3230         /* Use a simple Tx queue (no offloads, no multi segs) if possible */
3231         if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
3232                         && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
3233                 if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
3234                         PMD_INIT_LOG(DEBUG, "Vector tx"
3235                                      " can be enabled on this txq.");
3236
3237                 } else {
3238                         ad->tx_vec_allowed = false;
3239                 }
3240         } else {
3241                 ad->tx_simple_allowed = false;
3242         }
3243 }
3244
3245 void __attribute__((cold))
3246 i40e_set_tx_function(struct rte_eth_dev *dev)
3247 {
3248         struct i40e_adapter *ad =
3249                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3250         int i;
3251
3252         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3253                 if (ad->tx_vec_allowed) {
3254                         for (i = 0; i < dev->data->nb_tx_queues; i++) {
3255                                 struct i40e_tx_queue *txq =
3256                                         dev->data->tx_queues[i];
3257
3258                                 if (i40e_txq_vec_setup(txq)) {
3259                                         ad->tx_vec_allowed = false;
3260                                         break;
3261                                 }
3262                         }
3263                 }
3264         }
3265
3266         if (ad->tx_simple_allowed) {
3267                 if (ad->tx_vec_allowed) {
3268                         PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
3269                         dev->tx_pkt_burst = i40e_xmit_pkts_vec;
3270                 } else {
3271                         PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3272                         dev->tx_pkt_burst = i40e_xmit_pkts_simple;
3273                 }
3274         } else {
3275                 PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
3276                 dev->tx_pkt_burst = i40e_xmit_pkts;
3277         }
3278 }
3279
3280 /* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
3281 int __attribute__((weak))
3282 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
3283 {
3284         return -1;
3285 }
3286
3287 uint16_t __attribute__((weak))
3288 i40e_recv_pkts_vec(
3289         void __rte_unused *rx_queue,
3290         struct rte_mbuf __rte_unused **rx_pkts,
3291         uint16_t __rte_unused nb_pkts)
3292 {
3293         return 0;
3294 }
3295
3296 uint16_t __attribute__((weak))
3297 i40e_recv_scattered_pkts_vec(
3298         void __rte_unused *rx_queue,
3299         struct rte_mbuf __rte_unused **rx_pkts,
3300         uint16_t __rte_unused nb_pkts)
3301 {
3302         return 0;
3303 }
3304
3305 int __attribute__((weak))
3306 i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
3307 {
3308         return -1;
3309 }
3310
3311 int __attribute__((weak))
3312 i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
3313 {
3314         return -1;
3315 }
3316
3317 void __attribute__((weak))
3318 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
3319 {
3320         return;
3321 }
3322
3323 uint16_t __attribute__((weak))
3324 i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
3325                    struct rte_mbuf __rte_unused **tx_pkts,
3326                    uint16_t __rte_unused nb_pkts)
3327 {
3328         return 0;
3329 }