Imported Upstream version 16.11.2
[deb_dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40 #include <inttypes.h>
41 #include <assert.h>
42
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
51 #include <rte_dev.h>
52 #include <rte_eth_ctrl.h>
53 #include <rte_tailq.h>
54
55 #include "i40e_logs.h"
56 #include "base/i40e_prototype.h"
57 #include "base/i40e_adminq_cmd.h"
58 #include "base/i40e_type.h"
59 #include "base/i40e_register.h"
60 #include "base/i40e_dcb.h"
61 #include "i40e_ethdev.h"
62 #include "i40e_rxtx.h"
63 #include "i40e_pf.h"
64 #include "i40e_regs.h"
65
66 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
67 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
68
69 #define I40E_CLEAR_PXE_WAIT_MS     200
70
71 /* Maximun number of capability elements */
72 #define I40E_MAX_CAP_ELE_NUM       128
73
74 /* Wait count and inteval */
75 #define I40E_CHK_Q_ENA_COUNT       1000
76 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
77
78 /* Maximun number of VSI */
79 #define I40E_MAX_NUM_VSIS          (384UL)
80
81 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
82
83 /* Flow control default timer */
84 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
85
86 /* Flow control default high water */
87 #define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
88
89 /* Flow control default low water */
90 #define I40E_DEFAULT_LOW_WATER  (0x1A40/1024)
91
92 /* Flow control enable fwd bit */
93 #define I40E_PRTMAC_FWD_CTRL   0x00000001
94
95 /* Receive Packet Buffer size */
96 #define I40E_RXPBSIZE (968 * 1024)
97
98 /* Kilobytes shift */
99 #define I40E_KILOSHIFT 10
100
101 /* Receive Average Packet Size in Byte*/
102 #define I40E_PACKET_AVERAGE_SIZE 128
103
104 /* Mask of PF interrupt causes */
105 #define I40E_PFINT_ICR0_ENA_MASK ( \
106                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
107                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
108                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
109                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
110                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
111                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
112                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
113                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
114                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
115
116 #define I40E_FLOW_TYPES ( \
117         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
118         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
119         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
120         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
121         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
122         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
123         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
124         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
125         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
126         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
127         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
128
129 /* Additional timesync values. */
130 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
131 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
132 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
133 #define I40E_PRTTSYN_TSYNENA     0x80000000
134 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
135 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
136
137 #define I40E_MAX_PERCENT            100
138 #define I40E_DEFAULT_DCB_APP_NUM    1
139 #define I40E_DEFAULT_DCB_APP_PRIO   3
140
141 #define I40E_INSET_NONE            0x00000000000000000ULL
142
143 /* bit0 ~ bit 7 */
144 #define I40E_INSET_DMAC            0x0000000000000001ULL
145 #define I40E_INSET_SMAC            0x0000000000000002ULL
146 #define I40E_INSET_VLAN_OUTER      0x0000000000000004ULL
147 #define I40E_INSET_VLAN_INNER      0x0000000000000008ULL
148 #define I40E_INSET_VLAN_TUNNEL     0x0000000000000010ULL
149
150 /* bit 8 ~ bit 15 */
151 #define I40E_INSET_IPV4_SRC        0x0000000000000100ULL
152 #define I40E_INSET_IPV4_DST        0x0000000000000200ULL
153 #define I40E_INSET_IPV6_SRC        0x0000000000000400ULL
154 #define I40E_INSET_IPV6_DST        0x0000000000000800ULL
155 #define I40E_INSET_SRC_PORT        0x0000000000001000ULL
156 #define I40E_INSET_DST_PORT        0x0000000000002000ULL
157 #define I40E_INSET_SCTP_VT         0x0000000000004000ULL
158
159 /* bit 16 ~ bit 31 */
160 #define I40E_INSET_IPV4_TOS        0x0000000000010000ULL
161 #define I40E_INSET_IPV4_PROTO      0x0000000000020000ULL
162 #define I40E_INSET_IPV4_TTL        0x0000000000040000ULL
163 #define I40E_INSET_IPV6_TC         0x0000000000080000ULL
164 #define I40E_INSET_IPV6_FLOW       0x0000000000100000ULL
165 #define I40E_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
166 #define I40E_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
167 #define I40E_INSET_TCP_FLAGS       0x0000000000800000ULL
168
169 /* bit 32 ~ bit 47, tunnel fields */
170 #define I40E_INSET_TUNNEL_IPV4_DST       0x0000000100000000ULL
171 #define I40E_INSET_TUNNEL_IPV6_DST       0x0000000200000000ULL
172 #define I40E_INSET_TUNNEL_DMAC           0x0000000400000000ULL
173 #define I40E_INSET_TUNNEL_SRC_PORT       0x0000000800000000ULL
174 #define I40E_INSET_TUNNEL_DST_PORT       0x0000001000000000ULL
175 #define I40E_INSET_TUNNEL_ID             0x0000002000000000ULL
176
177 /* bit 48 ~ bit 55 */
178 #define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
179
180 /* bit 56 ~ bit 63, Flex Payload */
181 #define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
182 #define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
183 #define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
184 #define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
185 #define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
186 #define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
187 #define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
188 #define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
189 #define I40E_INSET_FLEX_PAYLOAD \
190         (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
191         I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
192         I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
193         I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
194
195 /**
196  * Below are values for writing un-exposed registers suggested
197  * by silicon experts
198  */
199 /* Destination MAC address */
200 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
201 /* Source MAC address */
202 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
203 /* Outer (S-Tag) VLAN tag in the outer L2 header */
204 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
205 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
206 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
207 /* Single VLAN tag in the inner L2 header */
208 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
209 /* Source IPv4 address */
210 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
211 /* Destination IPv4 address */
212 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
213 /* Source IPv4 address for X722 */
214 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
215 /* Destination IPv4 address for X722 */
216 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
217 /* IPv4 Protocol for X722 */
218 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
219 /* IPv4 Time to Live for X722 */
220 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
221 /* IPv4 Type of Service (TOS) */
222 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
223 /* IPv4 Protocol */
224 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
225 /* IPv4 Time to Live */
226 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
227 /* Source IPv6 address */
228 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
229 /* Destination IPv6 address */
230 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
231 /* IPv6 Traffic Class (TC) */
232 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
233 /* IPv6 Next Header */
234 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
235 /* IPv6 Hop Limit */
236 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
237 /* Source L4 port */
238 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
239 /* Destination L4 port */
240 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
241 /* SCTP verification tag */
242 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
243 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
244 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
245 /* Source port of tunneling UDP */
246 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
247 /* Destination port of tunneling UDP */
248 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
249 /* UDP Tunneling ID, NVGRE/GRE key */
250 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
251 /* Last ether type */
252 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
253 /* Tunneling outer destination IPv4 address */
254 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
255 /* Tunneling outer destination IPv6 address */
256 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
257 /* 1st word of flex payload */
258 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
259 /* 2nd word of flex payload */
260 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
261 /* 3rd word of flex payload */
262 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
263 /* 4th word of flex payload */
264 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
265 /* 5th word of flex payload */
266 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
267 /* 6th word of flex payload */
268 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
269 /* 7th word of flex payload */
270 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
271 /* 8th word of flex payload */
272 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
273 /* all 8 words flex payload */
274 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
275 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
276
277 #define I40E_TRANSLATE_INSET 0
278 #define I40E_TRANSLATE_REG   1
279
280 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
281 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
282 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
283 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
284 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
285 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
286
287 #define I40E_GL_SWT_L2TAGCTRL(_i)             (0x001C0A70 + ((_i) * 4))
288 #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
289 #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK  \
290         I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
291
292 /* PCI offset for querying capability */
293 #define PCI_DEV_CAP_REG            0xA4
294 /* PCI offset for enabling/disabling Extended Tag */
295 #define PCI_DEV_CTRL_REG           0xA8
296 /* Bit mask of Extended Tag capability */
297 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
298 /* Bit shift of Extended Tag enable/disable */
299 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
300 /* Bit mask of Extended Tag enable/disable */
301 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
302
303 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
304 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
305 static int i40e_dev_configure(struct rte_eth_dev *dev);
306 static int i40e_dev_start(struct rte_eth_dev *dev);
307 static void i40e_dev_stop(struct rte_eth_dev *dev);
308 static void i40e_dev_close(struct rte_eth_dev *dev);
309 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
310 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
311 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
312 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
313 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
314 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
315 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
316                                struct rte_eth_stats *stats);
317 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
318                                struct rte_eth_xstat *xstats, unsigned n);
319 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
320                                      struct rte_eth_xstat_name *xstats_names,
321                                      unsigned limit);
322 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
323 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
324                                             uint16_t queue_id,
325                                             uint8_t stat_idx,
326                                             uint8_t is_rx);
327 static void i40e_dev_info_get(struct rte_eth_dev *dev,
328                               struct rte_eth_dev_info *dev_info);
329 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
330                                 uint16_t vlan_id,
331                                 int on);
332 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
333                               enum rte_vlan_type vlan_type,
334                               uint16_t tpid);
335 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
336 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
337                                       uint16_t queue,
338                                       int on);
339 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
340 static int i40e_dev_led_on(struct rte_eth_dev *dev);
341 static int i40e_dev_led_off(struct rte_eth_dev *dev);
342 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
343                               struct rte_eth_fc_conf *fc_conf);
344 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
345                               struct rte_eth_fc_conf *fc_conf);
346 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
347                                        struct rte_eth_pfc_conf *pfc_conf);
348 static void i40e_macaddr_add(struct rte_eth_dev *dev,
349                           struct ether_addr *mac_addr,
350                           uint32_t index,
351                           uint32_t pool);
352 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
353 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
354                                     struct rte_eth_rss_reta_entry64 *reta_conf,
355                                     uint16_t reta_size);
356 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
357                                    struct rte_eth_rss_reta_entry64 *reta_conf,
358                                    uint16_t reta_size);
359
360 static int i40e_get_cap(struct i40e_hw *hw);
361 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
362 static int i40e_pf_setup(struct i40e_pf *pf);
363 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
364 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
365 static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
366 static int i40e_dcb_setup(struct rte_eth_dev *dev);
367 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
368                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
369 static void i40e_stat_update_48(struct i40e_hw *hw,
370                                uint32_t hireg,
371                                uint32_t loreg,
372                                bool offset_loaded,
373                                uint64_t *offset,
374                                uint64_t *stat);
375 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
376 static void i40e_dev_interrupt_handler(
377                 __rte_unused struct rte_intr_handle *handle, void *param);
378 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
379                                 uint32_t base, uint32_t num);
380 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
381 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
382                         uint32_t base);
383 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
384                         uint16_t num);
385 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
386 static int i40e_veb_release(struct i40e_veb *veb);
387 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
388                                                 struct i40e_vsi *vsi);
389 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
390 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
391 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
392                                              struct i40e_macvlan_filter *mv_f,
393                                              int num,
394                                              struct ether_addr *addr);
395 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
396                                              struct i40e_macvlan_filter *mv_f,
397                                              int num,
398                                              uint16_t vlan);
399 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
400 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
401                                     struct rte_eth_rss_conf *rss_conf);
402 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
403                                       struct rte_eth_rss_conf *rss_conf);
404 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
405                                         struct rte_eth_udp_tunnel *udp_tunnel);
406 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
407                                         struct rte_eth_udp_tunnel *udp_tunnel);
408 static void i40e_filter_input_set_init(struct i40e_pf *pf);
409 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
410                         struct rte_eth_ethertype_filter *filter,
411                         bool add);
412 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
413                                 enum rte_filter_op filter_op,
414                                 void *arg);
415 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
416                                 enum rte_filter_type filter_type,
417                                 enum rte_filter_op filter_op,
418                                 void *arg);
419 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
420                                   struct rte_eth_dcb_info *dcb_info);
421 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
422 static void i40e_configure_registers(struct i40e_hw *hw);
423 static void i40e_hw_init(struct rte_eth_dev *dev);
424 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
425 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
426                         struct rte_eth_mirror_conf *mirror_conf,
427                         uint8_t sw_id, uint8_t on);
428 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
429
430 static int i40e_timesync_enable(struct rte_eth_dev *dev);
431 static int i40e_timesync_disable(struct rte_eth_dev *dev);
432 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
433                                            struct timespec *timestamp,
434                                            uint32_t flags);
435 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
436                                            struct timespec *timestamp);
437 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
438
439 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
440
441 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
442                                    struct timespec *timestamp);
443 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
444                                     const struct timespec *timestamp);
445
446 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
447                                          uint16_t queue_id);
448 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
449                                           uint16_t queue_id);
450
451 static int i40e_get_regs(struct rte_eth_dev *dev,
452                          struct rte_dev_reg_info *regs);
453
454 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
455
456 static int i40e_get_eeprom(struct rte_eth_dev *dev,
457                            struct rte_dev_eeprom_info *eeprom);
458
459 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
460                                       struct ether_addr *mac_addr);
461
462 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
463 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
464
465 static const struct rte_pci_id pci_id_i40e_map[] = {
466         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
467         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
468         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
469         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
470         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
471         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
472         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
473         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
474         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
475         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
476         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
477         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
478         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
479         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
480         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
481         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
482         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
483         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
484         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
485         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
486         { .vendor_id = 0, /* sentinel */ },
487 };
488
489 static const struct eth_dev_ops i40e_eth_dev_ops = {
490         .dev_configure                = i40e_dev_configure,
491         .dev_start                    = i40e_dev_start,
492         .dev_stop                     = i40e_dev_stop,
493         .dev_close                    = i40e_dev_close,
494         .promiscuous_enable           = i40e_dev_promiscuous_enable,
495         .promiscuous_disable          = i40e_dev_promiscuous_disable,
496         .allmulticast_enable          = i40e_dev_allmulticast_enable,
497         .allmulticast_disable         = i40e_dev_allmulticast_disable,
498         .dev_set_link_up              = i40e_dev_set_link_up,
499         .dev_set_link_down            = i40e_dev_set_link_down,
500         .link_update                  = i40e_dev_link_update,
501         .stats_get                    = i40e_dev_stats_get,
502         .xstats_get                   = i40e_dev_xstats_get,
503         .xstats_get_names             = i40e_dev_xstats_get_names,
504         .stats_reset                  = i40e_dev_stats_reset,
505         .xstats_reset                 = i40e_dev_stats_reset,
506         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
507         .dev_infos_get                = i40e_dev_info_get,
508         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
509         .vlan_filter_set              = i40e_vlan_filter_set,
510         .vlan_tpid_set                = i40e_vlan_tpid_set,
511         .vlan_offload_set             = i40e_vlan_offload_set,
512         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
513         .vlan_pvid_set                = i40e_vlan_pvid_set,
514         .rx_queue_start               = i40e_dev_rx_queue_start,
515         .rx_queue_stop                = i40e_dev_rx_queue_stop,
516         .tx_queue_start               = i40e_dev_tx_queue_start,
517         .tx_queue_stop                = i40e_dev_tx_queue_stop,
518         .rx_queue_setup               = i40e_dev_rx_queue_setup,
519         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
520         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
521         .rx_queue_release             = i40e_dev_rx_queue_release,
522         .rx_queue_count               = i40e_dev_rx_queue_count,
523         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
524         .tx_queue_setup               = i40e_dev_tx_queue_setup,
525         .tx_queue_release             = i40e_dev_tx_queue_release,
526         .dev_led_on                   = i40e_dev_led_on,
527         .dev_led_off                  = i40e_dev_led_off,
528         .flow_ctrl_get                = i40e_flow_ctrl_get,
529         .flow_ctrl_set                = i40e_flow_ctrl_set,
530         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
531         .mac_addr_add                 = i40e_macaddr_add,
532         .mac_addr_remove              = i40e_macaddr_remove,
533         .reta_update                  = i40e_dev_rss_reta_update,
534         .reta_query                   = i40e_dev_rss_reta_query,
535         .rss_hash_update              = i40e_dev_rss_hash_update,
536         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
537         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
538         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
539         .filter_ctrl                  = i40e_dev_filter_ctrl,
540         .rxq_info_get                 = i40e_rxq_info_get,
541         .txq_info_get                 = i40e_txq_info_get,
542         .mirror_rule_set              = i40e_mirror_rule_set,
543         .mirror_rule_reset            = i40e_mirror_rule_reset,
544         .timesync_enable              = i40e_timesync_enable,
545         .timesync_disable             = i40e_timesync_disable,
546         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
547         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
548         .get_dcb_info                 = i40e_dev_get_dcb_info,
549         .timesync_adjust_time         = i40e_timesync_adjust_time,
550         .timesync_read_time           = i40e_timesync_read_time,
551         .timesync_write_time          = i40e_timesync_write_time,
552         .get_reg                      = i40e_get_regs,
553         .get_eeprom_length            = i40e_get_eeprom_length,
554         .get_eeprom                   = i40e_get_eeprom,
555         .mac_addr_set                 = i40e_set_default_mac_addr,
556         .mtu_set                      = i40e_dev_mtu_set,
557 };
558
559 /* store statistics names and its offset in stats structure */
560 struct rte_i40e_xstats_name_off {
561         char name[RTE_ETH_XSTATS_NAME_SIZE];
562         unsigned offset;
563 };
564
565 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
566         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
567         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
568         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
569         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
570         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
571                 rx_unknown_protocol)},
572         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
573         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
574         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
575         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
576 };
577
578 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
579                 sizeof(rte_i40e_stats_strings[0]))
580
581 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
582         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
583                 tx_dropped_link_down)},
584         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
585         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
586                 illegal_bytes)},
587         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
588         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
589                 mac_local_faults)},
590         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
591                 mac_remote_faults)},
592         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
593                 rx_length_errors)},
594         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
595         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
596         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
597         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
598         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
599         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
600                 rx_size_127)},
601         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
602                 rx_size_255)},
603         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
604                 rx_size_511)},
605         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
606                 rx_size_1023)},
607         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
608                 rx_size_1522)},
609         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
610                 rx_size_big)},
611         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
612                 rx_undersize)},
613         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
614                 rx_oversize)},
615         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
616                 mac_short_packet_dropped)},
617         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
618                 rx_fragments)},
619         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
620         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
621         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
622                 tx_size_127)},
623         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
624                 tx_size_255)},
625         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
626                 tx_size_511)},
627         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
628                 tx_size_1023)},
629         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
630                 tx_size_1522)},
631         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
632                 tx_size_big)},
633         {"rx_flow_director_atr_match_packets",
634                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
635         {"rx_flow_director_sb_match_packets",
636                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
637         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
638                 tx_lpi_status)},
639         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
640                 rx_lpi_status)},
641         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
642                 tx_lpi_count)},
643         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
644                 rx_lpi_count)},
645 };
646
647 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
648                 sizeof(rte_i40e_hw_port_strings[0]))
649
650 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
651         {"xon_packets", offsetof(struct i40e_hw_port_stats,
652                 priority_xon_rx)},
653         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
654                 priority_xoff_rx)},
655 };
656
657 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
658                 sizeof(rte_i40e_rxq_prio_strings[0]))
659
660 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
661         {"xon_packets", offsetof(struct i40e_hw_port_stats,
662                 priority_xon_tx)},
663         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
664                 priority_xoff_tx)},
665         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
666                 priority_xon_2_xoff)},
667 };
668
669 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
670                 sizeof(rte_i40e_txq_prio_strings[0]))
671
672 static struct eth_driver rte_i40e_pmd = {
673         .pci_drv = {
674                 .id_table = pci_id_i40e_map,
675                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
676                         RTE_PCI_DRV_DETACHABLE,
677                 .probe = rte_eth_dev_pci_probe,
678                 .remove = rte_eth_dev_pci_remove,
679         },
680         .eth_dev_init = eth_i40e_dev_init,
681         .eth_dev_uninit = eth_i40e_dev_uninit,
682         .dev_private_size = sizeof(struct i40e_adapter),
683 };
684
685 static inline int
686 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
687                                      struct rte_eth_link *link)
688 {
689         struct rte_eth_link *dst = link;
690         struct rte_eth_link *src = &(dev->data->dev_link);
691
692         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
693                                         *(uint64_t *)src) == 0)
694                 return -1;
695
696         return 0;
697 }
698
699 static inline int
700 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
701                                       struct rte_eth_link *link)
702 {
703         struct rte_eth_link *dst = &(dev->data->dev_link);
704         struct rte_eth_link *src = link;
705
706         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
707                                         *(uint64_t *)src) == 0)
708                 return -1;
709
710         return 0;
711 }
712
713 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
714 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
715
716 #ifndef I40E_GLQF_ORT
717 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
718 #endif
719 #ifndef I40E_GLQF_PIT
720 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
721 #endif
722
723 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
724 {
725         /*
726          * Initialize registers for flexible payload, which should be set by NVM.
727          * This should be removed from code once it is fixed in NVM.
728          */
729         I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
730         I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
731         I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
732         I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
733         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
734         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
735         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
736         I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
737         I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
738         I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
739         I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
740         I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
741
742         /* Initialize registers for parsing packet type of QinQ */
743         I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
744         I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
745 }
746
747 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
748
749 /*
750  * Add a ethertype filter to drop all flow control frames transmitted
751  * from VSIs.
752 */
753 static void
754 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
755 {
756         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
757         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
758                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
759                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
760         int ret;
761
762         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
763                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
764                                 pf->main_vsi_seid, 0,
765                                 TRUE, NULL, NULL);
766         if (ret)
767                 PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
768                                   " frames from VSIs.");
769 }
770
771 static int
772 floating_veb_list_handler(__rte_unused const char *key,
773                           const char *floating_veb_value,
774                           void *opaque)
775 {
776         int idx = 0;
777         unsigned int count = 0;
778         char *end = NULL;
779         int min, max;
780         bool *vf_floating_veb = opaque;
781
782         while (isblank(*floating_veb_value))
783                 floating_veb_value++;
784
785         /* Reset floating VEB configuration for VFs */
786         for (idx = 0; idx < I40E_MAX_VF; idx++)
787                 vf_floating_veb[idx] = false;
788
789         min = I40E_MAX_VF;
790         do {
791                 while (isblank(*floating_veb_value))
792                         floating_veb_value++;
793                 if (*floating_veb_value == '\0')
794                         return -1;
795                 errno = 0;
796                 idx = strtoul(floating_veb_value, &end, 10);
797                 if (errno || end == NULL)
798                         return -1;
799                 while (isblank(*end))
800                         end++;
801                 if (*end == '-') {
802                         min = idx;
803                 } else if ((*end == ';') || (*end == '\0')) {
804                         max = idx;
805                         if (min == I40E_MAX_VF)
806                                 min = idx;
807                         if (max >= I40E_MAX_VF)
808                                 max = I40E_MAX_VF - 1;
809                         for (idx = min; idx <= max; idx++) {
810                                 vf_floating_veb[idx] = true;
811                                 count++;
812                         }
813                         min = I40E_MAX_VF;
814                 } else {
815                         return -1;
816                 }
817                 floating_veb_value = end + 1;
818         } while (*end != '\0');
819
820         if (count == 0)
821                 return -1;
822
823         return 0;
824 }
825
826 static void
827 config_vf_floating_veb(struct rte_devargs *devargs,
828                        uint16_t floating_veb,
829                        bool *vf_floating_veb)
830 {
831         struct rte_kvargs *kvlist;
832         int i;
833         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
834
835         if (!floating_veb)
836                 return;
837         /* All the VFs attach to the floating VEB by default
838          * when the floating VEB is enabled.
839          */
840         for (i = 0; i < I40E_MAX_VF; i++)
841                 vf_floating_veb[i] = true;
842
843         if (devargs == NULL)
844                 return;
845
846         kvlist = rte_kvargs_parse(devargs->args, NULL);
847         if (kvlist == NULL)
848                 return;
849
850         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
851                 rte_kvargs_free(kvlist);
852                 return;
853         }
854         /* When the floating_veb_list parameter exists, all the VFs
855          * will attach to the legacy VEB firstly, then configure VFs
856          * to the floating VEB according to the floating_veb_list.
857          */
858         if (rte_kvargs_process(kvlist, floating_veb_list,
859                                floating_veb_list_handler,
860                                vf_floating_veb) < 0) {
861                 rte_kvargs_free(kvlist);
862                 return;
863         }
864         rte_kvargs_free(kvlist);
865 }
866
867 static int
868 i40e_check_floating_handler(__rte_unused const char *key,
869                             const char *value,
870                             __rte_unused void *opaque)
871 {
872         if (strcmp(value, "1"))
873                 return -1;
874
875         return 0;
876 }
877
878 static int
879 is_floating_veb_supported(struct rte_devargs *devargs)
880 {
881         struct rte_kvargs *kvlist;
882         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
883
884         if (devargs == NULL)
885                 return 0;
886
887         kvlist = rte_kvargs_parse(devargs->args, NULL);
888         if (kvlist == NULL)
889                 return 0;
890
891         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
892                 rte_kvargs_free(kvlist);
893                 return 0;
894         }
895         /* Floating VEB is enabled when there's key-value:
896          * enable_floating_veb=1
897          */
898         if (rte_kvargs_process(kvlist, floating_veb_key,
899                                i40e_check_floating_handler, NULL) < 0) {
900                 rte_kvargs_free(kvlist);
901                 return 0;
902         }
903         rte_kvargs_free(kvlist);
904
905         return 1;
906 }
907
908 static void
909 config_floating_veb(struct rte_eth_dev *dev)
910 {
911         struct rte_pci_device *pci_dev = dev->pci_dev;
912         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
913         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
914
915         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
916
917         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
918                 pf->floating_veb =
919                         is_floating_veb_supported(pci_dev->device.devargs);
920                 config_vf_floating_veb(pci_dev->device.devargs,
921                                        pf->floating_veb,
922                                        pf->floating_veb_list);
923         } else {
924                 pf->floating_veb = false;
925         }
926 }
927
928 #define I40E_L2_TAGS_S_TAG_SHIFT 1
929 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
930
931 static int
932 eth_i40e_dev_init(struct rte_eth_dev *dev)
933 {
934         struct rte_pci_device *pci_dev;
935         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
936         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
937         struct i40e_vsi *vsi;
938         int ret;
939         uint32_t len;
940         uint8_t aq_fail = 0;
941
942         PMD_INIT_FUNC_TRACE();
943
944         dev->dev_ops = &i40e_eth_dev_ops;
945         dev->rx_pkt_burst = i40e_recv_pkts;
946         dev->tx_pkt_burst = i40e_xmit_pkts;
947
948         /* for secondary processes, we don't initialise any further as primary
949          * has already done this work. Only check we don't need a different
950          * RX function */
951         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
952                 i40e_set_rx_function(dev);
953                 i40e_set_tx_function(dev);
954                 return 0;
955         }
956         pci_dev = dev->pci_dev;
957
958         rte_eth_copy_pci_info(dev, pci_dev);
959
960         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
961         pf->adapter->eth_dev = dev;
962         pf->dev_data = dev->data;
963
964         hw->back = I40E_PF_TO_ADAPTER(pf);
965         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
966         if (!hw->hw_addr) {
967                 PMD_INIT_LOG(ERR, "Hardware is not available, "
968                              "as address is NULL");
969                 return -ENODEV;
970         }
971
972         hw->vendor_id = pci_dev->id.vendor_id;
973         hw->device_id = pci_dev->id.device_id;
974         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
975         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
976         hw->bus.device = pci_dev->addr.devid;
977         hw->bus.func = pci_dev->addr.function;
978         hw->adapter_stopped = 0;
979
980         /* Make sure all is clean before doing PF reset */
981         i40e_clear_hw(hw);
982
983         /* Initialize the hardware */
984         i40e_hw_init(dev);
985
986         /* Reset here to make sure all is clean for each PF */
987         ret = i40e_pf_reset(hw);
988         if (ret) {
989                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
990                 return ret;
991         }
992
993         /* Initialize the shared code (base driver) */
994         ret = i40e_init_shared_code(hw);
995         if (ret) {
996                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
997                 return ret;
998         }
999
1000         /*
1001          * To work around the NVM issue, initialize registers
1002          * for flexible payload and packet type of QinQ by
1003          * software. It should be removed once issues are fixed
1004          * in NVM.
1005          */
1006         i40e_GLQF_reg_init(hw);
1007
1008         /* Initialize the input set for filters (hash and fd) to default value */
1009         i40e_filter_input_set_init(pf);
1010
1011         /* Initialize the parameters for adminq */
1012         i40e_init_adminq_parameter(hw);
1013         ret = i40e_init_adminq(hw);
1014         if (ret != I40E_SUCCESS) {
1015                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1016                 return -EIO;
1017         }
1018         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1019                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1020                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1021                      ((hw->nvm.version >> 12) & 0xf),
1022                      ((hw->nvm.version >> 4) & 0xff),
1023                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1024
1025         /* Need the special FW version to support floating VEB */
1026         config_floating_veb(dev);
1027         /* Clear PXE mode */
1028         i40e_clear_pxe_mode(hw);
1029         ret = i40e_dev_sync_phy_type(hw);
1030         if (ret) {
1031                 PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret);
1032                 goto err_sync_phy_type;
1033         }
1034         /*
1035          * On X710, performance number is far from the expectation on recent
1036          * firmware versions. The fix for this issue may not be integrated in
1037          * the following firmware version. So the workaround in software driver
1038          * is needed. It needs to modify the initial values of 3 internal only
1039          * registers. Note that the workaround can be removed when it is fixed
1040          * in firmware in the future.
1041          */
1042         i40e_configure_registers(hw);
1043
1044         /* Get hw capabilities */
1045         ret = i40e_get_cap(hw);
1046         if (ret != I40E_SUCCESS) {
1047                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1048                 goto err_get_capabilities;
1049         }
1050
1051         /* Initialize parameters for PF */
1052         ret = i40e_pf_parameter_init(dev);
1053         if (ret != 0) {
1054                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1055                 goto err_parameter_init;
1056         }
1057
1058         /* Initialize the queue management */
1059         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1060         if (ret < 0) {
1061                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1062                 goto err_qp_pool_init;
1063         }
1064         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1065                                 hw->func_caps.num_msix_vectors - 1);
1066         if (ret < 0) {
1067                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1068                 goto err_msix_pool_init;
1069         }
1070
1071         /* Initialize lan hmc */
1072         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1073                                 hw->func_caps.num_rx_qp, 0, 0);
1074         if (ret != I40E_SUCCESS) {
1075                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1076                 goto err_init_lan_hmc;
1077         }
1078
1079         /* Configure lan hmc */
1080         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1081         if (ret != I40E_SUCCESS) {
1082                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1083                 goto err_configure_lan_hmc;
1084         }
1085
1086         /* Get and check the mac address */
1087         i40e_get_mac_addr(hw, hw->mac.addr);
1088         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1089                 PMD_INIT_LOG(ERR, "mac address is not valid");
1090                 ret = -EIO;
1091                 goto err_get_mac_addr;
1092         }
1093         /* Copy the permanent MAC address */
1094         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1095                         (struct ether_addr *) hw->mac.perm_addr);
1096
1097         /* Disable flow control */
1098         hw->fc.requested_mode = I40E_FC_NONE;
1099         i40e_set_fc(hw, &aq_fail, TRUE);
1100
1101         /* Set the global registers with default ether type value */
1102         ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
1103         if (ret != I40E_SUCCESS) {
1104                 PMD_INIT_LOG(ERR, "Failed to set the default outer "
1105                              "VLAN ether type");
1106                 goto err_setup_pf_switch;
1107         }
1108
1109         /* PF setup, which includes VSI setup */
1110         ret = i40e_pf_setup(pf);
1111         if (ret) {
1112                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1113                 goto err_setup_pf_switch;
1114         }
1115
1116         /* reset all stats of the device, including pf and main vsi */
1117         i40e_dev_stats_reset(dev);
1118
1119         vsi = pf->main_vsi;
1120
1121         /* Disable double vlan by default */
1122         i40e_vsi_config_double_vlan(vsi, FALSE);
1123
1124         /* Disable S-TAG identification when floating_veb is disabled */
1125         if (!pf->floating_veb) {
1126                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1127                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1128                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1129                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1130                 }
1131         }
1132
1133         if (!vsi->max_macaddrs)
1134                 len = ETHER_ADDR_LEN;
1135         else
1136                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1137
1138         /* Should be after VSI initialized */
1139         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1140         if (!dev->data->mac_addrs) {
1141                 PMD_INIT_LOG(ERR, "Failed to allocated memory "
1142                                         "for storing mac address");
1143                 goto err_mac_alloc;
1144         }
1145         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1146                                         &dev->data->mac_addrs[0]);
1147
1148         /* initialize pf host driver to setup SRIOV resource if applicable */
1149         i40e_pf_host_init(dev);
1150
1151         /* register callback func to eal lib */
1152         rte_intr_callback_register(&(pci_dev->intr_handle),
1153                 i40e_dev_interrupt_handler, (void *)dev);
1154
1155         /* configure and enable device interrupt */
1156         i40e_pf_config_irq0(hw, TRUE);
1157         i40e_pf_enable_irq0(hw);
1158
1159         /* enable uio intr after callback register */
1160         rte_intr_enable(&(pci_dev->intr_handle));
1161         /*
1162          * Add an ethertype filter to drop all flow control frames transmitted
1163          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1164          * frames to wire.
1165          */
1166         i40e_add_tx_flow_control_drop_filter(pf);
1167
1168         /* Set the max frame size to 0x2600 by default,
1169          * in case other drivers changed the default value.
1170          */
1171         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1172
1173         /* initialize mirror rule list */
1174         TAILQ_INIT(&pf->mirror_list);
1175
1176         /* Init dcb to sw mode by default */
1177         ret = i40e_dcb_init_configure(dev, TRUE);
1178         if (ret != I40E_SUCCESS) {
1179                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1180                 pf->flags &= ~I40E_FLAG_DCB;
1181         }
1182
1183         return 0;
1184
1185 err_mac_alloc:
1186         i40e_vsi_release(pf->main_vsi);
1187 err_setup_pf_switch:
1188 err_get_mac_addr:
1189 err_configure_lan_hmc:
1190         (void)i40e_shutdown_lan_hmc(hw);
1191 err_init_lan_hmc:
1192         i40e_res_pool_destroy(&pf->msix_pool);
1193 err_msix_pool_init:
1194         i40e_res_pool_destroy(&pf->qp_pool);
1195 err_qp_pool_init:
1196 err_parameter_init:
1197 err_get_capabilities:
1198 err_sync_phy_type:
1199         (void)i40e_shutdown_adminq(hw);
1200
1201         return ret;
1202 }
1203
1204 static int
1205 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1206 {
1207         struct rte_pci_device *pci_dev;
1208         struct i40e_hw *hw;
1209         struct i40e_filter_control_settings settings;
1210         int ret;
1211         uint8_t aq_fail = 0;
1212
1213         PMD_INIT_FUNC_TRACE();
1214
1215         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1216                 return 0;
1217
1218         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1219         pci_dev = dev->pci_dev;
1220
1221         if (hw->adapter_stopped == 0)
1222                 i40e_dev_close(dev);
1223
1224         dev->dev_ops = NULL;
1225         dev->rx_pkt_burst = NULL;
1226         dev->tx_pkt_burst = NULL;
1227
1228         /* Clear PXE mode */
1229         i40e_clear_pxe_mode(hw);
1230
1231         /* Unconfigure filter control */
1232         memset(&settings, 0, sizeof(settings));
1233         ret = i40e_set_filter_control(hw, &settings);
1234         if (ret)
1235                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1236                                         ret);
1237
1238         /* Disable flow control */
1239         hw->fc.requested_mode = I40E_FC_NONE;
1240         i40e_set_fc(hw, &aq_fail, TRUE);
1241
1242         /* uninitialize pf host driver */
1243         i40e_pf_host_uninit(dev);
1244
1245         rte_free(dev->data->mac_addrs);
1246         dev->data->mac_addrs = NULL;
1247
1248         /* disable uio intr before callback unregister */
1249         rte_intr_disable(&(pci_dev->intr_handle));
1250
1251         /* register callback func to eal lib */
1252         rte_intr_callback_unregister(&(pci_dev->intr_handle),
1253                 i40e_dev_interrupt_handler, (void *)dev);
1254
1255         return 0;
1256 }
1257
1258 static int
1259 i40e_dev_configure(struct rte_eth_dev *dev)
1260 {
1261         struct i40e_adapter *ad =
1262                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1263         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1264         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1265         int i, ret;
1266
1267         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1268          * bulk allocation or vector Rx preconditions we will reset it.
1269          */
1270         ad->rx_bulk_alloc_allowed = true;
1271         ad->rx_vec_allowed = true;
1272         ad->tx_simple_allowed = true;
1273         ad->tx_vec_allowed = true;
1274
1275         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1276                 ret = i40e_fdir_setup(pf);
1277                 if (ret != I40E_SUCCESS) {
1278                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1279                         return -ENOTSUP;
1280                 }
1281                 ret = i40e_fdir_configure(dev);
1282                 if (ret < 0) {
1283                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1284                         goto err;
1285                 }
1286         } else
1287                 i40e_fdir_teardown(pf);
1288
1289         ret = i40e_dev_init_vlan(dev);
1290         if (ret < 0)
1291                 goto err;
1292
1293         /* VMDQ setup.
1294          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1295          *  RSS setting have different requirements.
1296          *  General PMD driver call sequence are NIC init, configure,
1297          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1298          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1299          *  applicable. So, VMDQ setting has to be done before
1300          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1301          *  For RSS setting, it will try to calculate actual configured RX queue
1302          *  number, which will be available after rx_queue_setup(). dev_start()
1303          *  function is good to place RSS setup.
1304          */
1305         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1306                 ret = i40e_vmdq_setup(dev);
1307                 if (ret)
1308                         goto err;
1309         }
1310
1311         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1312                 ret = i40e_dcb_setup(dev);
1313                 if (ret) {
1314                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1315                         goto err_dcb;
1316                 }
1317         }
1318
1319         return 0;
1320
1321 err_dcb:
1322         /* need to release vmdq resource if exists */
1323         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1324                 i40e_vsi_release(pf->vmdq[i].vsi);
1325                 pf->vmdq[i].vsi = NULL;
1326         }
1327         rte_free(pf->vmdq);
1328         pf->vmdq = NULL;
1329 err:
1330         /* need to release fdir resource if exists */
1331         i40e_fdir_teardown(pf);
1332         return ret;
1333 }
1334
1335 void
1336 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1337 {
1338         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1339         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1340         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1341         uint16_t msix_vect = vsi->msix_intr;
1342         uint16_t i;
1343
1344         for (i = 0; i < vsi->nb_qps; i++) {
1345                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1346                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1347                 rte_wmb();
1348         }
1349
1350         if (vsi->type != I40E_VSI_SRIOV) {
1351                 if (!rte_intr_allow_others(intr_handle)) {
1352                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1353                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1354                         I40E_WRITE_REG(hw,
1355                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1356                                        0);
1357                 } else {
1358                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1359                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1360                         I40E_WRITE_REG(hw,
1361                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1362                                                        msix_vect - 1), 0);
1363                 }
1364         } else {
1365                 uint32_t reg;
1366                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1367                         vsi->user_param + (msix_vect - 1);
1368
1369                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1370                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1371         }
1372         I40E_WRITE_FLUSH(hw);
1373 }
1374
1375 static void
1376 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1377                        int base_queue, int nb_queue)
1378 {
1379         int i;
1380         uint32_t val;
1381         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1382
1383         /* Bind all RX queues to allocated MSIX interrupt */
1384         for (i = 0; i < nb_queue; i++) {
1385                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1386                         I40E_QINT_RQCTL_ITR_INDX_MASK |
1387                         ((base_queue + i + 1) <<
1388                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1389                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1390                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1391
1392                 if (i == nb_queue - 1)
1393                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1394                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1395         }
1396
1397         /* Write first RX queue to Link list register as the head element */
1398         if (vsi->type != I40E_VSI_SRIOV) {
1399                 uint16_t interval =
1400                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1401
1402                 if (msix_vect == I40E_MISC_VEC_ID) {
1403                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1404                                        (base_queue <<
1405                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1406                                        (0x0 <<
1407                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1408                         I40E_WRITE_REG(hw,
1409                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1410                                        interval);
1411                 } else {
1412                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1413                                        (base_queue <<
1414                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1415                                        (0x0 <<
1416                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1417                         I40E_WRITE_REG(hw,
1418                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1419                                                        msix_vect - 1),
1420                                        interval);
1421                 }
1422         } else {
1423                 uint32_t reg;
1424
1425                 if (msix_vect == I40E_MISC_VEC_ID) {
1426                         I40E_WRITE_REG(hw,
1427                                        I40E_VPINT_LNKLST0(vsi->user_param),
1428                                        (base_queue <<
1429                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1430                                        (0x0 <<
1431                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1432                 } else {
1433                         /* num_msix_vectors_vf needs to minus irq0 */
1434                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1435                                 vsi->user_param + (msix_vect - 1);
1436
1437                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1438                                        (base_queue <<
1439                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1440                                        (0x0 <<
1441                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1442                 }
1443         }
1444
1445         I40E_WRITE_FLUSH(hw);
1446 }
1447
1448 void
1449 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
1450 {
1451         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1452         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1453         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1454         uint16_t msix_vect = vsi->msix_intr;
1455         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1456         uint16_t queue_idx = 0;
1457         int record = 0;
1458         uint32_t val;
1459         int i;
1460
1461         for (i = 0; i < vsi->nb_qps; i++) {
1462                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1463                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1464         }
1465
1466         /* INTENA flag is not auto-cleared for interrupt */
1467         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1468         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1469                 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1470                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1471         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1472
1473         /* VF bind interrupt */
1474         if (vsi->type == I40E_VSI_SRIOV) {
1475                 __vsi_queues_bind_intr(vsi, msix_vect,
1476                                        vsi->base_queue, vsi->nb_qps);
1477                 return;
1478         }
1479
1480         /* PF & VMDq bind interrupt */
1481         if (rte_intr_dp_is_en(intr_handle)) {
1482                 if (vsi->type == I40E_VSI_MAIN) {
1483                         queue_idx = 0;
1484                         record = 1;
1485                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1486                         struct i40e_vsi *main_vsi =
1487                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1488                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1489                         record = 1;
1490                 }
1491         }
1492
1493         for (i = 0; i < vsi->nb_used_qps; i++) {
1494                 if (nb_msix <= 1) {
1495                         if (!rte_intr_allow_others(intr_handle))
1496                                 /* allow to share MISC_VEC_ID */
1497                                 msix_vect = I40E_MISC_VEC_ID;
1498
1499                         /* no enough msix_vect, map all to one */
1500                         __vsi_queues_bind_intr(vsi, msix_vect,
1501                                                vsi->base_queue + i,
1502                                                vsi->nb_used_qps - i);
1503                         for (; !!record && i < vsi->nb_used_qps; i++)
1504                                 intr_handle->intr_vec[queue_idx + i] =
1505                                         msix_vect;
1506                         break;
1507                 }
1508                 /* 1:1 queue/msix_vect mapping */
1509                 __vsi_queues_bind_intr(vsi, msix_vect,
1510                                        vsi->base_queue + i, 1);
1511                 if (!!record)
1512                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1513
1514                 msix_vect++;
1515                 nb_msix--;
1516         }
1517 }
1518
1519 static void
1520 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1521 {
1522         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1523         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1524         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1525         uint16_t interval = i40e_calc_itr_interval(\
1526                 RTE_LIBRTE_I40E_ITR_INTERVAL);
1527         uint16_t msix_intr, i;
1528
1529         if (rte_intr_allow_others(intr_handle))
1530                 for (i = 0; i < vsi->nb_msix; i++) {
1531                         msix_intr = vsi->msix_intr + i;
1532                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1533                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1534                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1535                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1536                                 (interval <<
1537                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1538                 }
1539         else
1540                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1541                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1542                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1543                                (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1544                                (interval <<
1545                                 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1546
1547         I40E_WRITE_FLUSH(hw);
1548 }
1549
1550 static void
1551 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1552 {
1553         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1554         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1555         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1556         uint16_t msix_intr, i;
1557
1558         if (rte_intr_allow_others(intr_handle))
1559                 for (i = 0; i < vsi->nb_msix; i++) {
1560                         msix_intr = vsi->msix_intr + i;
1561                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1562                                        0);
1563                 }
1564         else
1565                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1566
1567         I40E_WRITE_FLUSH(hw);
1568 }
1569
1570 static inline uint8_t
1571 i40e_parse_link_speeds(uint16_t link_speeds)
1572 {
1573         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1574
1575         if (link_speeds & ETH_LINK_SPEED_40G)
1576                 link_speed |= I40E_LINK_SPEED_40GB;
1577         if (link_speeds & ETH_LINK_SPEED_25G)
1578                 link_speed |= I40E_LINK_SPEED_25GB;
1579         if (link_speeds & ETH_LINK_SPEED_20G)
1580                 link_speed |= I40E_LINK_SPEED_20GB;
1581         if (link_speeds & ETH_LINK_SPEED_10G)
1582                 link_speed |= I40E_LINK_SPEED_10GB;
1583         if (link_speeds & ETH_LINK_SPEED_1G)
1584                 link_speed |= I40E_LINK_SPEED_1GB;
1585         if (link_speeds & ETH_LINK_SPEED_100M)
1586                 link_speed |= I40E_LINK_SPEED_100MB;
1587
1588         return link_speed;
1589 }
1590
1591 static int
1592 i40e_phy_conf_link(struct i40e_hw *hw,
1593                    uint8_t abilities,
1594                    uint8_t force_speed)
1595 {
1596         enum i40e_status_code status;
1597         struct i40e_aq_get_phy_abilities_resp phy_ab;
1598         struct i40e_aq_set_phy_config phy_conf;
1599         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1600                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1601                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1602                         I40E_AQ_PHY_FLAG_LOW_POWER;
1603         const uint8_t advt = I40E_LINK_SPEED_40GB |
1604                         I40E_LINK_SPEED_25GB |
1605                         I40E_LINK_SPEED_10GB |
1606                         I40E_LINK_SPEED_1GB |
1607                         I40E_LINK_SPEED_100MB;
1608         int ret = -ENOTSUP;
1609
1610
1611         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1612                                               NULL);
1613         if (status)
1614                 return ret;
1615
1616         memset(&phy_conf, 0, sizeof(phy_conf));
1617
1618         /* bits 0-2 use the values from get_phy_abilities_resp */
1619         abilities &= ~mask;
1620         abilities |= phy_ab.abilities & mask;
1621
1622         /* update ablities and speed */
1623         if (abilities & I40E_AQ_PHY_AN_ENABLED)
1624                 phy_conf.link_speed = advt;
1625         else
1626                 phy_conf.link_speed = force_speed;
1627
1628         phy_conf.abilities = abilities;
1629
1630         /* use get_phy_abilities_resp value for the rest */
1631         phy_conf.phy_type = phy_ab.phy_type;
1632         phy_conf.phy_type_ext = phy_ab.phy_type_ext;
1633         phy_conf.fec_config = phy_ab.mod_type_ext;
1634         phy_conf.eee_capability = phy_ab.eee_capability;
1635         phy_conf.eeer = phy_ab.eeer_val;
1636         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1637
1638         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1639                     phy_ab.abilities, phy_ab.link_speed);
1640         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1641                     phy_conf.abilities, phy_conf.link_speed);
1642
1643         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1644         if (status)
1645                 return ret;
1646
1647         return I40E_SUCCESS;
1648 }
1649
1650 static int
1651 i40e_apply_link_speed(struct rte_eth_dev *dev)
1652 {
1653         uint8_t speed;
1654         uint8_t abilities = 0;
1655         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1656         struct rte_eth_conf *conf = &dev->data->dev_conf;
1657
1658         speed = i40e_parse_link_speeds(conf->link_speeds);
1659         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1660         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
1661                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1662         abilities |= I40E_AQ_PHY_LINK_ENABLED;
1663
1664         /* Skip changing speed on 40G interfaces, FW does not support */
1665         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
1666                 speed =  I40E_LINK_SPEED_UNKNOWN;
1667                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1668         }
1669
1670         return i40e_phy_conf_link(hw, abilities, speed);
1671 }
1672
1673 static int
1674 i40e_dev_start(struct rte_eth_dev *dev)
1675 {
1676         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1677         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1678         struct i40e_vsi *main_vsi = pf->main_vsi;
1679         int ret, i;
1680         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1681         uint32_t intr_vector = 0;
1682
1683         hw->adapter_stopped = 0;
1684
1685         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1686                 PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled",
1687                              dev->data->port_id);
1688                 return -EINVAL;
1689         }
1690
1691         rte_intr_disable(intr_handle);
1692
1693         if ((rte_intr_cap_multiple(intr_handle) ||
1694              !RTE_ETH_DEV_SRIOV(dev).active) &&
1695             dev->data->dev_conf.intr_conf.rxq != 0) {
1696                 intr_vector = dev->data->nb_rx_queues;
1697                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1698                         return -1;
1699         }
1700
1701         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1702                 intr_handle->intr_vec =
1703                         rte_zmalloc("intr_vec",
1704                                     dev->data->nb_rx_queues * sizeof(int),
1705                                     0);
1706                 if (!intr_handle->intr_vec) {
1707                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1708                                      " intr_vec\n", dev->data->nb_rx_queues);
1709                         return -ENOMEM;
1710                 }
1711         }
1712
1713         /* Initialize VSI */
1714         ret = i40e_dev_rxtx_init(pf);
1715         if (ret != I40E_SUCCESS) {
1716                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1717                 goto err_up;
1718         }
1719
1720         /* Map queues with MSIX interrupt */
1721         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1722                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1723         i40e_vsi_queues_bind_intr(main_vsi);
1724         i40e_vsi_enable_queues_intr(main_vsi);
1725
1726         /* Map VMDQ VSI queues with MSIX interrupt */
1727         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1728                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1729                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
1730                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1731         }
1732
1733         /* enable FDIR MSIX interrupt */
1734         if (pf->fdir.fdir_vsi) {
1735                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1736                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1737         }
1738
1739         /* Enable all queues which have been configured */
1740         ret = i40e_dev_switch_queues(pf, TRUE);
1741         if (ret != I40E_SUCCESS) {
1742                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
1743                 goto err_up;
1744         }
1745
1746         /* Enable receiving broadcast packets */
1747         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
1748         if (ret != I40E_SUCCESS)
1749                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1750
1751         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1752                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
1753                                                 true, NULL);
1754                 if (ret != I40E_SUCCESS)
1755                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1756         }
1757
1758         /* Apply link configure */
1759         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
1760                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1761                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
1762                                 ETH_LINK_SPEED_40G)) {
1763                 PMD_DRV_LOG(ERR, "Invalid link setting");
1764                 goto err_up;
1765         }
1766         ret = i40e_apply_link_speed(dev);
1767         if (I40E_SUCCESS != ret) {
1768                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
1769                 goto err_up;
1770         }
1771
1772         if (!rte_intr_allow_others(intr_handle)) {
1773                 rte_intr_callback_unregister(intr_handle,
1774                                              i40e_dev_interrupt_handler,
1775                                              (void *)dev);
1776                 /* configure and enable device interrupt */
1777                 i40e_pf_config_irq0(hw, FALSE);
1778                 i40e_pf_enable_irq0(hw);
1779
1780                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1781                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
1782                                      " no intr multiplex\n");
1783         } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
1784                 ret = i40e_aq_set_phy_int_mask(hw,
1785                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
1786                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
1787                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
1788                 if (ret != I40E_SUCCESS)
1789                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1790
1791                 /* Call get_link_info aq commond to enable LSE */
1792                 i40e_dev_link_update(dev, 0);
1793         }
1794
1795         /* enable uio intr after callback register */
1796         rte_intr_enable(intr_handle);
1797
1798         return I40E_SUCCESS;
1799
1800 err_up:
1801         i40e_dev_switch_queues(pf, FALSE);
1802         i40e_dev_clear_queues(dev);
1803
1804         return ret;
1805 }
1806
1807 static void
1808 i40e_dev_stop(struct rte_eth_dev *dev)
1809 {
1810         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1811         struct i40e_vsi *main_vsi = pf->main_vsi;
1812         struct i40e_mirror_rule *p_mirror;
1813         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1814         int i;
1815
1816         /* Disable all queues */
1817         i40e_dev_switch_queues(pf, FALSE);
1818
1819         /* un-map queues with interrupt registers */
1820         i40e_vsi_disable_queues_intr(main_vsi);
1821         i40e_vsi_queues_unbind_intr(main_vsi);
1822
1823         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1824                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
1825                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
1826         }
1827
1828         if (pf->fdir.fdir_vsi) {
1829                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
1830                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
1831         }
1832         /* Clear all queues and release memory */
1833         i40e_dev_clear_queues(dev);
1834
1835         /* Set link down */
1836         i40e_dev_set_link_down(dev);
1837
1838         /* Remove all mirror rules */
1839         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
1840                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
1841                 rte_free(p_mirror);
1842         }
1843         pf->nb_mirror_rule = 0;
1844
1845         if (!rte_intr_allow_others(intr_handle))
1846                 /* resume to the default handler */
1847                 rte_intr_callback_register(intr_handle,
1848                                            i40e_dev_interrupt_handler,
1849                                            (void *)dev);
1850
1851         /* Clean datapath event and queue/vec mapping */
1852         rte_intr_efd_disable(intr_handle);
1853         if (intr_handle->intr_vec) {
1854                 rte_free(intr_handle->intr_vec);
1855                 intr_handle->intr_vec = NULL;
1856         }
1857 }
1858
1859 static void
1860 i40e_dev_close(struct rte_eth_dev *dev)
1861 {
1862         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1863         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1864         uint32_t reg;
1865         int i;
1866
1867         PMD_INIT_FUNC_TRACE();
1868
1869         i40e_dev_stop(dev);
1870         hw->adapter_stopped = 1;
1871         i40e_dev_free_queues(dev);
1872
1873         /* Disable interrupt */
1874         i40e_pf_disable_irq0(hw);
1875         rte_intr_disable(&(dev->pci_dev->intr_handle));
1876
1877         /* shutdown and destroy the HMC */
1878         i40e_shutdown_lan_hmc(hw);
1879
1880         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1881                 i40e_vsi_release(pf->vmdq[i].vsi);
1882                 pf->vmdq[i].vsi = NULL;
1883         }
1884         rte_free(pf->vmdq);
1885         pf->vmdq = NULL;
1886
1887         /* release all the existing VSIs and VEBs */
1888         i40e_fdir_teardown(pf);
1889         i40e_vsi_release(pf->main_vsi);
1890
1891         /* shutdown the adminq */
1892         i40e_aq_queue_shutdown(hw, true);
1893         i40e_shutdown_adminq(hw);
1894
1895         i40e_res_pool_destroy(&pf->qp_pool);
1896         i40e_res_pool_destroy(&pf->msix_pool);
1897
1898         /* force a PF reset to clean anything leftover */
1899         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
1900         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
1901                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1902         I40E_WRITE_FLUSH(hw);
1903 }
1904
1905 static void
1906 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
1907 {
1908         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1909         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1910         struct i40e_vsi *vsi = pf->main_vsi;
1911         int status;
1912
1913         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1914                                                      true, NULL, true);
1915         if (status != I40E_SUCCESS)
1916                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1917
1918         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1919                                                         TRUE, NULL);
1920         if (status != I40E_SUCCESS)
1921                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1922
1923 }
1924
1925 static void
1926 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1927 {
1928         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1929         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1930         struct i40e_vsi *vsi = pf->main_vsi;
1931         int status;
1932
1933         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1934                                                      false, NULL, true);
1935         if (status != I40E_SUCCESS)
1936                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1937
1938         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1939                                                         false, NULL);
1940         if (status != I40E_SUCCESS)
1941                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1942 }
1943
1944 static void
1945 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1946 {
1947         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1948         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1949         struct i40e_vsi *vsi = pf->main_vsi;
1950         int ret;
1951
1952         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1953         if (ret != I40E_SUCCESS)
1954                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1955 }
1956
1957 static void
1958 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1959 {
1960         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1961         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1962         struct i40e_vsi *vsi = pf->main_vsi;
1963         int ret;
1964
1965         if (dev->data->promiscuous == 1)
1966                 return; /* must remain in all_multicast mode */
1967
1968         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1969                                 vsi->seid, FALSE, NULL);
1970         if (ret != I40E_SUCCESS)
1971                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1972 }
1973
1974 /*
1975  * Set device link up.
1976  */
1977 static int
1978 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1979 {
1980         /* re-apply link speed setting */
1981         return i40e_apply_link_speed(dev);
1982 }
1983
1984 /*
1985  * Set device link down.
1986  */
1987 static int
1988 i40e_dev_set_link_down(struct rte_eth_dev *dev)
1989 {
1990         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1991         uint8_t abilities = 0;
1992         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1993
1994         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1995         return i40e_phy_conf_link(hw, abilities, speed);
1996 }
1997
1998 int
1999 i40e_dev_link_update(struct rte_eth_dev *dev,
2000                      int wait_to_complete)
2001 {
2002 #define CHECK_INTERVAL 100  /* 100ms */
2003 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2004         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2005         struct i40e_link_status link_status;
2006         struct rte_eth_link link, old;
2007         int status;
2008         unsigned rep_cnt = MAX_REPEAT_TIME;
2009         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2010
2011         memset(&link, 0, sizeof(link));
2012         memset(&old, 0, sizeof(old));
2013         memset(&link_status, 0, sizeof(link_status));
2014         rte_i40e_dev_atomic_read_link_status(dev, &old);
2015
2016         do {
2017                 /* Get link status information from hardware */
2018                 status = i40e_aq_get_link_info(hw, enable_lse,
2019                                                 &link_status, NULL);
2020                 if (status != I40E_SUCCESS) {
2021                         link.link_speed = ETH_SPEED_NUM_100M;
2022                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2023                         PMD_DRV_LOG(ERR, "Failed to get link info");
2024                         goto out;
2025                 }
2026
2027                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
2028                 if (!wait_to_complete || link.link_status)
2029                         break;
2030
2031                 rte_delay_ms(CHECK_INTERVAL);
2032         } while (--rep_cnt);
2033
2034         if (!link.link_status)
2035                 goto out;
2036
2037         /* i40e uses full duplex only */
2038         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2039
2040         /* Parse the link status */
2041         switch (link_status.link_speed) {
2042         case I40E_LINK_SPEED_100MB:
2043                 link.link_speed = ETH_SPEED_NUM_100M;
2044                 break;
2045         case I40E_LINK_SPEED_1GB:
2046                 link.link_speed = ETH_SPEED_NUM_1G;
2047                 break;
2048         case I40E_LINK_SPEED_10GB:
2049                 link.link_speed = ETH_SPEED_NUM_10G;
2050                 break;
2051         case I40E_LINK_SPEED_20GB:
2052                 link.link_speed = ETH_SPEED_NUM_20G;
2053                 break;
2054         case I40E_LINK_SPEED_25GB:
2055                 link.link_speed = ETH_SPEED_NUM_25G;
2056                 break;
2057         case I40E_LINK_SPEED_40GB:
2058                 link.link_speed = ETH_SPEED_NUM_40G;
2059                 break;
2060         default:
2061                 link.link_speed = ETH_SPEED_NUM_100M;
2062                 break;
2063         }
2064
2065         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2066                         ETH_LINK_SPEED_FIXED);
2067
2068 out:
2069         rte_i40e_dev_atomic_write_link_status(dev, &link);
2070         if (link.link_status == old.link_status)
2071                 return -1;
2072
2073         i40e_notify_all_vfs_link_status(dev);
2074
2075         return 0;
2076 }
2077
2078 /* Get all the statistics of a VSI */
2079 void
2080 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2081 {
2082         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2083         struct i40e_eth_stats *nes = &vsi->eth_stats;
2084         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2085         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2086
2087         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2088                             vsi->offset_loaded, &oes->rx_bytes,
2089                             &nes->rx_bytes);
2090         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2091                             vsi->offset_loaded, &oes->rx_unicast,
2092                             &nes->rx_unicast);
2093         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2094                             vsi->offset_loaded, &oes->rx_multicast,
2095                             &nes->rx_multicast);
2096         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2097                             vsi->offset_loaded, &oes->rx_broadcast,
2098                             &nes->rx_broadcast);
2099         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2100                             &oes->rx_discards, &nes->rx_discards);
2101         /* GLV_REPC not supported */
2102         /* GLV_RMPC not supported */
2103         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2104                             &oes->rx_unknown_protocol,
2105                             &nes->rx_unknown_protocol);
2106         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2107                             vsi->offset_loaded, &oes->tx_bytes,
2108                             &nes->tx_bytes);
2109         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2110                             vsi->offset_loaded, &oes->tx_unicast,
2111                             &nes->tx_unicast);
2112         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2113                             vsi->offset_loaded, &oes->tx_multicast,
2114                             &nes->tx_multicast);
2115         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2116                             vsi->offset_loaded,  &oes->tx_broadcast,
2117                             &nes->tx_broadcast);
2118         /* GLV_TDPC not supported */
2119         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2120                             &oes->tx_errors, &nes->tx_errors);
2121         vsi->offset_loaded = true;
2122
2123         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2124                     vsi->vsi_id);
2125         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2126         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2127         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2128         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2129         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2130         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2131                     nes->rx_unknown_protocol);
2132         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2133         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2134         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2135         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2136         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2137         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2138         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2139                     vsi->vsi_id);
2140 }
2141
2142 static void
2143 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2144 {
2145         unsigned int i;
2146         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2147         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2148
2149         /* Get statistics of struct i40e_eth_stats */
2150         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2151                             I40E_GLPRT_GORCL(hw->port),
2152                             pf->offset_loaded, &os->eth.rx_bytes,
2153                             &ns->eth.rx_bytes);
2154         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2155                             I40E_GLPRT_UPRCL(hw->port),
2156                             pf->offset_loaded, &os->eth.rx_unicast,
2157                             &ns->eth.rx_unicast);
2158         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2159                             I40E_GLPRT_MPRCL(hw->port),
2160                             pf->offset_loaded, &os->eth.rx_multicast,
2161                             &ns->eth.rx_multicast);
2162         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2163                             I40E_GLPRT_BPRCL(hw->port),
2164                             pf->offset_loaded, &os->eth.rx_broadcast,
2165                             &ns->eth.rx_broadcast);
2166         /* Workaround: CRC size should not be included in byte statistics,
2167          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2168          */
2169         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2170                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2171
2172         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2173                             pf->offset_loaded, &os->eth.rx_discards,
2174                             &ns->eth.rx_discards);
2175         /* GLPRT_REPC not supported */
2176         /* GLPRT_RMPC not supported */
2177         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2178                             pf->offset_loaded,
2179                             &os->eth.rx_unknown_protocol,
2180                             &ns->eth.rx_unknown_protocol);
2181         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2182                             I40E_GLPRT_GOTCL(hw->port),
2183                             pf->offset_loaded, &os->eth.tx_bytes,
2184                             &ns->eth.tx_bytes);
2185         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2186                             I40E_GLPRT_UPTCL(hw->port),
2187                             pf->offset_loaded, &os->eth.tx_unicast,
2188                             &ns->eth.tx_unicast);
2189         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2190                             I40E_GLPRT_MPTCL(hw->port),
2191                             pf->offset_loaded, &os->eth.tx_multicast,
2192                             &ns->eth.tx_multicast);
2193         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2194                             I40E_GLPRT_BPTCL(hw->port),
2195                             pf->offset_loaded, &os->eth.tx_broadcast,
2196                             &ns->eth.tx_broadcast);
2197         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2198                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2199         /* GLPRT_TEPC not supported */
2200
2201         /* additional port specific stats */
2202         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2203                             pf->offset_loaded, &os->tx_dropped_link_down,
2204                             &ns->tx_dropped_link_down);
2205         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2206                             pf->offset_loaded, &os->crc_errors,
2207                             &ns->crc_errors);
2208         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2209                             pf->offset_loaded, &os->illegal_bytes,
2210                             &ns->illegal_bytes);
2211         /* GLPRT_ERRBC not supported */
2212         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2213                             pf->offset_loaded, &os->mac_local_faults,
2214                             &ns->mac_local_faults);
2215         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2216                             pf->offset_loaded, &os->mac_remote_faults,
2217                             &ns->mac_remote_faults);
2218         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2219                             pf->offset_loaded, &os->rx_length_errors,
2220                             &ns->rx_length_errors);
2221         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2222                             pf->offset_loaded, &os->link_xon_rx,
2223                             &ns->link_xon_rx);
2224         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2225                             pf->offset_loaded, &os->link_xoff_rx,
2226                             &ns->link_xoff_rx);
2227         for (i = 0; i < 8; i++) {
2228                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2229                                     pf->offset_loaded,
2230                                     &os->priority_xon_rx[i],
2231                                     &ns->priority_xon_rx[i]);
2232                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2233                                     pf->offset_loaded,
2234                                     &os->priority_xoff_rx[i],
2235                                     &ns->priority_xoff_rx[i]);
2236         }
2237         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2238                             pf->offset_loaded, &os->link_xon_tx,
2239                             &ns->link_xon_tx);
2240         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2241                             pf->offset_loaded, &os->link_xoff_tx,
2242                             &ns->link_xoff_tx);
2243         for (i = 0; i < 8; i++) {
2244                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2245                                     pf->offset_loaded,
2246                                     &os->priority_xon_tx[i],
2247                                     &ns->priority_xon_tx[i]);
2248                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2249                                     pf->offset_loaded,
2250                                     &os->priority_xoff_tx[i],
2251                                     &ns->priority_xoff_tx[i]);
2252                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2253                                     pf->offset_loaded,
2254                                     &os->priority_xon_2_xoff[i],
2255                                     &ns->priority_xon_2_xoff[i]);
2256         }
2257         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2258                             I40E_GLPRT_PRC64L(hw->port),
2259                             pf->offset_loaded, &os->rx_size_64,
2260                             &ns->rx_size_64);
2261         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2262                             I40E_GLPRT_PRC127L(hw->port),
2263                             pf->offset_loaded, &os->rx_size_127,
2264                             &ns->rx_size_127);
2265         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2266                             I40E_GLPRT_PRC255L(hw->port),
2267                             pf->offset_loaded, &os->rx_size_255,
2268                             &ns->rx_size_255);
2269         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2270                             I40E_GLPRT_PRC511L(hw->port),
2271                             pf->offset_loaded, &os->rx_size_511,
2272                             &ns->rx_size_511);
2273         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2274                             I40E_GLPRT_PRC1023L(hw->port),
2275                             pf->offset_loaded, &os->rx_size_1023,
2276                             &ns->rx_size_1023);
2277         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2278                             I40E_GLPRT_PRC1522L(hw->port),
2279                             pf->offset_loaded, &os->rx_size_1522,
2280                             &ns->rx_size_1522);
2281         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2282                             I40E_GLPRT_PRC9522L(hw->port),
2283                             pf->offset_loaded, &os->rx_size_big,
2284                             &ns->rx_size_big);
2285         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2286                             pf->offset_loaded, &os->rx_undersize,
2287                             &ns->rx_undersize);
2288         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2289                             pf->offset_loaded, &os->rx_fragments,
2290                             &ns->rx_fragments);
2291         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2292                             pf->offset_loaded, &os->rx_oversize,
2293                             &ns->rx_oversize);
2294         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2295                             pf->offset_loaded, &os->rx_jabber,
2296                             &ns->rx_jabber);
2297         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2298                             I40E_GLPRT_PTC64L(hw->port),
2299                             pf->offset_loaded, &os->tx_size_64,
2300                             &ns->tx_size_64);
2301         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2302                             I40E_GLPRT_PTC127L(hw->port),
2303                             pf->offset_loaded, &os->tx_size_127,
2304                             &ns->tx_size_127);
2305         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2306                             I40E_GLPRT_PTC255L(hw->port),
2307                             pf->offset_loaded, &os->tx_size_255,
2308                             &ns->tx_size_255);
2309         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2310                             I40E_GLPRT_PTC511L(hw->port),
2311                             pf->offset_loaded, &os->tx_size_511,
2312                             &ns->tx_size_511);
2313         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2314                             I40E_GLPRT_PTC1023L(hw->port),
2315                             pf->offset_loaded, &os->tx_size_1023,
2316                             &ns->tx_size_1023);
2317         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2318                             I40E_GLPRT_PTC1522L(hw->port),
2319                             pf->offset_loaded, &os->tx_size_1522,
2320                             &ns->tx_size_1522);
2321         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2322                             I40E_GLPRT_PTC9522L(hw->port),
2323                             pf->offset_loaded, &os->tx_size_big,
2324                             &ns->tx_size_big);
2325         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2326                            pf->offset_loaded,
2327                            &os->fd_sb_match, &ns->fd_sb_match);
2328         /* GLPRT_MSPDC not supported */
2329         /* GLPRT_XEC not supported */
2330
2331         pf->offset_loaded = true;
2332
2333         if (pf->main_vsi)
2334                 i40e_update_vsi_stats(pf->main_vsi);
2335 }
2336
2337 /* Get all statistics of a port */
2338 static void
2339 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2340 {
2341         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2342         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2343         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2344         unsigned i;
2345
2346         /* call read registers - updates values, now write them to struct */
2347         i40e_read_stats_registers(pf, hw);
2348
2349         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
2350                         pf->main_vsi->eth_stats.rx_multicast +
2351                         pf->main_vsi->eth_stats.rx_broadcast -
2352                         pf->main_vsi->eth_stats.rx_discards;
2353         stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
2354                         pf->main_vsi->eth_stats.tx_multicast +
2355                         pf->main_vsi->eth_stats.tx_broadcast;
2356         stats->ibytes   = ns->eth.rx_bytes;
2357         stats->obytes   = ns->eth.tx_bytes;
2358         stats->oerrors  = ns->eth.tx_errors +
2359                         pf->main_vsi->eth_stats.tx_errors;
2360
2361         /* Rx Errors */
2362         stats->imissed  = ns->eth.rx_discards +
2363                         pf->main_vsi->eth_stats.rx_discards;
2364         stats->ierrors  = ns->crc_errors +
2365                         ns->rx_length_errors + ns->rx_undersize +
2366                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2367
2368         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2369         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2370         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2371         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2372         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2373         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2374         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2375                     ns->eth.rx_unknown_protocol);
2376         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2377         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2378         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2379         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2380         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2381         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2382
2383         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2384                     ns->tx_dropped_link_down);
2385         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2386         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2387                     ns->illegal_bytes);
2388         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2389         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2390                     ns->mac_local_faults);
2391         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2392                     ns->mac_remote_faults);
2393         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2394                     ns->rx_length_errors);
2395         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2396         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2397         for (i = 0; i < 8; i++) {
2398                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2399                                 i, ns->priority_xon_rx[i]);
2400                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2401                                 i, ns->priority_xoff_rx[i]);
2402         }
2403         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2404         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2405         for (i = 0; i < 8; i++) {
2406                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2407                                 i, ns->priority_xon_tx[i]);
2408                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2409                                 i, ns->priority_xoff_tx[i]);
2410                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2411                                 i, ns->priority_xon_2_xoff[i]);
2412         }
2413         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2414         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2415         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2416         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2417         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2418         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2419         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2420         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2421         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2422         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2423         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2424         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2425         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2426         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2427         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2428         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2429         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2430         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2431         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2432                         ns->mac_short_packet_dropped);
2433         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2434                     ns->checksum_error);
2435         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2436         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2437 }
2438
2439 /* Reset the statistics */
2440 static void
2441 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2442 {
2443         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2444         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2445
2446         /* Mark PF and VSI stats to update the offset, aka "reset" */
2447         pf->offset_loaded = false;
2448         if (pf->main_vsi)
2449                 pf->main_vsi->offset_loaded = false;
2450
2451         /* read the stats, reading current register values into offset */
2452         i40e_read_stats_registers(pf, hw);
2453 }
2454
2455 static uint32_t
2456 i40e_xstats_calc_num(void)
2457 {
2458         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2459                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
2460                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
2461 }
2462
2463 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2464                                      struct rte_eth_xstat_name *xstats_names,
2465                                      __rte_unused unsigned limit)
2466 {
2467         unsigned count = 0;
2468         unsigned i, prio;
2469
2470         if (xstats_names == NULL)
2471                 return i40e_xstats_calc_num();
2472
2473         /* Note: limit checked in rte_eth_xstats_names() */
2474
2475         /* Get stats from i40e_eth_stats struct */
2476         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2477                 snprintf(xstats_names[count].name,
2478                          sizeof(xstats_names[count].name),
2479                          "%s", rte_i40e_stats_strings[i].name);
2480                 count++;
2481         }
2482
2483         /* Get individiual stats from i40e_hw_port struct */
2484         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2485                 snprintf(xstats_names[count].name,
2486                         sizeof(xstats_names[count].name),
2487                          "%s", rte_i40e_hw_port_strings[i].name);
2488                 count++;
2489         }
2490
2491         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2492                 for (prio = 0; prio < 8; prio++) {
2493                         snprintf(xstats_names[count].name,
2494                                  sizeof(xstats_names[count].name),
2495                                  "rx_priority%u_%s", prio,
2496                                  rte_i40e_rxq_prio_strings[i].name);
2497                         count++;
2498                 }
2499         }
2500
2501         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2502                 for (prio = 0; prio < 8; prio++) {
2503                         snprintf(xstats_names[count].name,
2504                                  sizeof(xstats_names[count].name),
2505                                  "tx_priority%u_%s", prio,
2506                                  rte_i40e_txq_prio_strings[i].name);
2507                         count++;
2508                 }
2509         }
2510         return count;
2511 }
2512
2513 static int
2514 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2515                     unsigned n)
2516 {
2517         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2518         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2519         unsigned i, count, prio;
2520         struct i40e_hw_port_stats *hw_stats = &pf->stats;
2521
2522         count = i40e_xstats_calc_num();
2523         if (n < count)
2524                 return count;
2525
2526         i40e_read_stats_registers(pf, hw);
2527
2528         if (xstats == NULL)
2529                 return 0;
2530
2531         count = 0;
2532
2533         /* Get stats from i40e_eth_stats struct */
2534         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2535                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2536                         rte_i40e_stats_strings[i].offset);
2537                 xstats[count].id = count;
2538                 count++;
2539         }
2540
2541         /* Get individiual stats from i40e_hw_port struct */
2542         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2543                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2544                         rte_i40e_hw_port_strings[i].offset);
2545                 xstats[count].id = count;
2546                 count++;
2547         }
2548
2549         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2550                 for (prio = 0; prio < 8; prio++) {
2551                         xstats[count].value =
2552                                 *(uint64_t *)(((char *)hw_stats) +
2553                                 rte_i40e_rxq_prio_strings[i].offset +
2554                                 (sizeof(uint64_t) * prio));
2555                         xstats[count].id = count;
2556                         count++;
2557                 }
2558         }
2559
2560         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2561                 for (prio = 0; prio < 8; prio++) {
2562                         xstats[count].value =
2563                                 *(uint64_t *)(((char *)hw_stats) +
2564                                 rte_i40e_txq_prio_strings[i].offset +
2565                                 (sizeof(uint64_t) * prio));
2566                         xstats[count].id = count;
2567                         count++;
2568                 }
2569         }
2570
2571         return count;
2572 }
2573
2574 static int
2575 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
2576                                  __rte_unused uint16_t queue_id,
2577                                  __rte_unused uint8_t stat_idx,
2578                                  __rte_unused uint8_t is_rx)
2579 {
2580         PMD_INIT_FUNC_TRACE();
2581
2582         return -ENOSYS;
2583 }
2584
2585 static void
2586 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2587 {
2588         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2589         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2590         struct i40e_vsi *vsi = pf->main_vsi;
2591
2592         dev_info->max_rx_queues = vsi->nb_qps;
2593         dev_info->max_tx_queues = vsi->nb_qps;
2594         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2595         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2596         dev_info->max_mac_addrs = vsi->max_macaddrs;
2597         dev_info->max_vfs = dev->pci_dev->max_vfs;
2598         dev_info->rx_offload_capa =
2599                 DEV_RX_OFFLOAD_VLAN_STRIP |
2600                 DEV_RX_OFFLOAD_QINQ_STRIP |
2601                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2602                 DEV_RX_OFFLOAD_UDP_CKSUM |
2603                 DEV_RX_OFFLOAD_TCP_CKSUM;
2604         dev_info->tx_offload_capa =
2605                 DEV_TX_OFFLOAD_VLAN_INSERT |
2606                 DEV_TX_OFFLOAD_QINQ_INSERT |
2607                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2608                 DEV_TX_OFFLOAD_UDP_CKSUM |
2609                 DEV_TX_OFFLOAD_TCP_CKSUM |
2610                 DEV_TX_OFFLOAD_SCTP_CKSUM |
2611                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2612                 DEV_TX_OFFLOAD_TCP_TSO |
2613                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
2614                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
2615                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
2616                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
2617         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
2618                                                 sizeof(uint32_t);
2619         dev_info->reta_size = pf->hash_lut_size;
2620         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2621
2622         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2623                 .rx_thresh = {
2624                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
2625                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
2626                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
2627                 },
2628                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2629                 .rx_drop_en = 0,
2630         };
2631
2632         dev_info->default_txconf = (struct rte_eth_txconf) {
2633                 .tx_thresh = {
2634                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
2635                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
2636                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
2637                 },
2638                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2639                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2640                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2641                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2642         };
2643
2644         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2645                 .nb_max = I40E_MAX_RING_DESC,
2646                 .nb_min = I40E_MIN_RING_DESC,
2647                 .nb_align = I40E_ALIGN_RING_DESC,
2648         };
2649
2650         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2651                 .nb_max = I40E_MAX_RING_DESC,
2652                 .nb_min = I40E_MIN_RING_DESC,
2653                 .nb_align = I40E_ALIGN_RING_DESC,
2654         };
2655
2656         if (pf->flags & I40E_FLAG_VMDQ) {
2657                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
2658                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
2659                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
2660                                                 pf->max_nb_vmdq_vsi;
2661                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
2662                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
2663                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
2664         }
2665
2666         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
2667                 /* For XL710 */
2668                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
2669         else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
2670                 /* For XXV710 */
2671                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
2672         else
2673                 /* For X710 */
2674                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2675 }
2676
2677 static int
2678 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2679 {
2680         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2681         struct i40e_vsi *vsi = pf->main_vsi;
2682         PMD_INIT_FUNC_TRACE();
2683
2684         if (on)
2685                 return i40e_vsi_add_vlan(vsi, vlan_id);
2686         else
2687                 return i40e_vsi_delete_vlan(vsi, vlan_id);
2688 }
2689
2690 static int
2691 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
2692                    enum rte_vlan_type vlan_type,
2693                    uint16_t tpid)
2694 {
2695         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2696         uint64_t reg_r = 0, reg_w = 0;
2697         uint16_t reg_id = 0;
2698         int ret = 0;
2699         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
2700
2701         switch (vlan_type) {
2702         case ETH_VLAN_TYPE_OUTER:
2703                 if (qinq)
2704                         reg_id = 2;
2705                 else
2706                         reg_id = 3;
2707                 break;
2708         case ETH_VLAN_TYPE_INNER:
2709                 if (qinq)
2710                         reg_id = 3;
2711                 else {
2712                         ret = -EINVAL;
2713                         PMD_DRV_LOG(ERR,
2714                                 "Unsupported vlan type in single vlan.\n");
2715                         return ret;
2716                 }
2717                 break;
2718         default:
2719                 ret = -EINVAL;
2720                 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2721                 return ret;
2722         }
2723         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2724                                           &reg_r, NULL);
2725         if (ret != I40E_SUCCESS) {
2726                 PMD_DRV_LOG(ERR, "Fail to debug read from "
2727                             "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
2728                 ret = -EIO;
2729                 return ret;
2730         }
2731         PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: "
2732                     "0x%08"PRIx64"", reg_id, reg_r);
2733
2734         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
2735         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
2736         if (reg_r == reg_w) {
2737                 ret = 0;
2738                 PMD_DRV_LOG(DEBUG, "No need to write");
2739                 return ret;
2740         }
2741
2742         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2743                                            reg_w, NULL);
2744         if (ret != I40E_SUCCESS) {
2745                 ret = -EIO;
2746                 PMD_DRV_LOG(ERR, "Fail to debug write to "
2747                             "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
2748                 return ret;
2749         }
2750         PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2751                     "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2752
2753         return ret;
2754 }
2755
2756 static void
2757 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2758 {
2759         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2760         struct i40e_vsi *vsi = pf->main_vsi;
2761
2762         if (mask & ETH_VLAN_FILTER_MASK) {
2763                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2764                         i40e_vsi_config_vlan_filter(vsi, TRUE);
2765                 else
2766                         i40e_vsi_config_vlan_filter(vsi, FALSE);
2767         }
2768
2769         if (mask & ETH_VLAN_STRIP_MASK) {
2770                 /* Enable or disable VLAN stripping */
2771                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2772                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
2773                 else
2774                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
2775         }
2776
2777         if (mask & ETH_VLAN_EXTEND_MASK) {
2778                 if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
2779                         i40e_vsi_config_double_vlan(vsi, TRUE);
2780                         /* Set global registers with default ether type value */
2781                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
2782                                            ETHER_TYPE_VLAN);
2783                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
2784                                            ETHER_TYPE_VLAN);
2785                 }
2786                 else
2787                         i40e_vsi_config_double_vlan(vsi, FALSE);
2788         }
2789 }
2790
2791 static void
2792 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
2793                           __rte_unused uint16_t queue,
2794                           __rte_unused int on)
2795 {
2796         PMD_INIT_FUNC_TRACE();
2797 }
2798
2799 static int
2800 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2801 {
2802         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2803         struct i40e_vsi *vsi = pf->main_vsi;
2804         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
2805         struct i40e_vsi_vlan_pvid_info info;
2806
2807         memset(&info, 0, sizeof(info));
2808         info.on = on;
2809         if (info.on)
2810                 info.config.pvid = pvid;
2811         else {
2812                 info.config.reject.tagged =
2813                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
2814                 info.config.reject.untagged =
2815                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
2816         }
2817
2818         return i40e_vsi_vlan_pvid_set(vsi, &info);
2819 }
2820
2821 static int
2822 i40e_dev_led_on(struct rte_eth_dev *dev)
2823 {
2824         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2825         uint32_t mode = i40e_led_get(hw);
2826
2827         if (mode == 0)
2828                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
2829
2830         return 0;
2831 }
2832
2833 static int
2834 i40e_dev_led_off(struct rte_eth_dev *dev)
2835 {
2836         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2837         uint32_t mode = i40e_led_get(hw);
2838
2839         if (mode != 0)
2840                 i40e_led_set(hw, 0, false);
2841
2842         return 0;
2843 }
2844
2845 static int
2846 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2847 {
2848         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2849         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2850
2851         fc_conf->pause_time = pf->fc_conf.pause_time;
2852         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
2853         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
2854
2855          /* Return current mode according to actual setting*/
2856         switch (hw->fc.current_mode) {
2857         case I40E_FC_FULL:
2858                 fc_conf->mode = RTE_FC_FULL;
2859                 break;
2860         case I40E_FC_TX_PAUSE:
2861                 fc_conf->mode = RTE_FC_TX_PAUSE;
2862                 break;
2863         case I40E_FC_RX_PAUSE:
2864                 fc_conf->mode = RTE_FC_RX_PAUSE;
2865                 break;
2866         case I40E_FC_NONE:
2867         default:
2868                 fc_conf->mode = RTE_FC_NONE;
2869         };
2870
2871         return 0;
2872 }
2873
2874 static int
2875 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2876 {
2877         uint32_t mflcn_reg, fctrl_reg, reg;
2878         uint32_t max_high_water;
2879         uint8_t i, aq_failure;
2880         int err;
2881         struct i40e_hw *hw;
2882         struct i40e_pf *pf;
2883         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
2884                 [RTE_FC_NONE] = I40E_FC_NONE,
2885                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
2886                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
2887                 [RTE_FC_FULL] = I40E_FC_FULL
2888         };
2889
2890         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
2891
2892         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
2893         if ((fc_conf->high_water > max_high_water) ||
2894                         (fc_conf->high_water < fc_conf->low_water)) {
2895                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
2896                         "High_water must <= %d.", max_high_water);
2897                 return -EINVAL;
2898         }
2899
2900         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2901         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2902         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
2903
2904         pf->fc_conf.pause_time = fc_conf->pause_time;
2905         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
2906         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
2907
2908         PMD_INIT_FUNC_TRACE();
2909
2910         /* All the link flow control related enable/disable register
2911          * configuration is handle by the F/W
2912          */
2913         err = i40e_set_fc(hw, &aq_failure, true);
2914         if (err < 0)
2915                 return -ENOSYS;
2916
2917         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
2918                 /* Configure flow control refresh threshold,
2919                  * the value for stat_tx_pause_refresh_timer[8]
2920                  * is used for global pause operation.
2921                  */
2922
2923                 I40E_WRITE_REG(hw,
2924                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
2925                                pf->fc_conf.pause_time);
2926
2927                 /* configure the timer value included in transmitted pause
2928                  * frame,
2929                  * the value for stat_tx_pause_quanta[8] is used for global
2930                  * pause operation
2931                  */
2932                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
2933                                pf->fc_conf.pause_time);
2934
2935                 fctrl_reg = I40E_READ_REG(hw,
2936                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
2937
2938                 if (fc_conf->mac_ctrl_frame_fwd != 0)
2939                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
2940                 else
2941                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
2942
2943                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
2944                                fctrl_reg);
2945         } else {
2946                 /* Configure pause time (2 TCs per register) */
2947                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
2948                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
2949                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
2950
2951                 /* Configure flow control refresh threshold value */
2952                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
2953                                pf->fc_conf.pause_time / 2);
2954
2955                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2956
2957                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
2958                  *depending on configuration
2959                  */
2960                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
2961                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
2962                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
2963                 } else {
2964                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
2965                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
2966                 }
2967
2968                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
2969         }
2970
2971         /* config the water marker both based on the packets and bytes */
2972         I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
2973                        (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2974                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2975         I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
2976                        (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2977                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2978         I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
2979                        pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2980                        << I40E_KILOSHIFT);
2981         I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
2982                        pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2983                        << I40E_KILOSHIFT);
2984
2985         I40E_WRITE_FLUSH(hw);
2986
2987         return 0;
2988 }
2989
2990 static int
2991 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
2992                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
2993 {
2994         PMD_INIT_FUNC_TRACE();
2995
2996         return -ENOSYS;
2997 }
2998
2999 /* Add a MAC address, and update filters */
3000 static void
3001 i40e_macaddr_add(struct rte_eth_dev *dev,
3002                  struct ether_addr *mac_addr,
3003                  __rte_unused uint32_t index,
3004                  uint32_t pool)
3005 {
3006         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3007         struct i40e_mac_filter_info mac_filter;
3008         struct i40e_vsi *vsi;
3009         int ret;
3010
3011         /* If VMDQ not enabled or configured, return */
3012         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3013                           !pf->nb_cfg_vmdq_vsi)) {
3014                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3015                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3016                         pool);
3017                 return;
3018         }
3019
3020         if (pool > pf->nb_cfg_vmdq_vsi) {
3021                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3022                                 pool, pf->nb_cfg_vmdq_vsi);
3023                 return;
3024         }
3025
3026         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3027         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3028                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3029         else
3030                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3031
3032         if (pool == 0)
3033                 vsi = pf->main_vsi;
3034         else
3035                 vsi = pf->vmdq[pool - 1].vsi;
3036
3037         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3038         if (ret != I40E_SUCCESS) {
3039                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3040                 return;
3041         }
3042 }
3043
3044 /* Remove a MAC address, and update filters */
3045 static void
3046 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3047 {
3048         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3049         struct i40e_vsi *vsi;
3050         struct rte_eth_dev_data *data = dev->data;
3051         struct ether_addr *macaddr;
3052         int ret;
3053         uint32_t i;
3054         uint64_t pool_sel;
3055
3056         macaddr = &(data->mac_addrs[index]);
3057
3058         pool_sel = dev->data->mac_pool_sel[index];
3059
3060         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3061                 if (pool_sel & (1ULL << i)) {
3062                         if (i == 0)
3063                                 vsi = pf->main_vsi;
3064                         else {
3065                                 /* No VMDQ pool enabled or configured */
3066                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3067                                         (i > pf->nb_cfg_vmdq_vsi)) {
3068                                         PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
3069                                                         "/configured");
3070                                         return;
3071                                 }
3072                                 vsi = pf->vmdq[i - 1].vsi;
3073                         }
3074                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3075
3076                         if (ret) {
3077                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3078                                 return;
3079                         }
3080                 }
3081         }
3082 }
3083
3084 /* Set perfect match or hash match of MAC and VLAN for a VF */
3085 static int
3086 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3087                  struct rte_eth_mac_filter *filter,
3088                  bool add)
3089 {
3090         struct i40e_hw *hw;
3091         struct i40e_mac_filter_info mac_filter;
3092         struct ether_addr old_mac;
3093         struct ether_addr *new_mac;
3094         struct i40e_pf_vf *vf = NULL;
3095         uint16_t vf_id;
3096         int ret;
3097
3098         if (pf == NULL) {
3099                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3100                 return -EINVAL;
3101         }
3102         hw = I40E_PF_TO_HW(pf);
3103
3104         if (filter == NULL) {
3105                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3106                 return -EINVAL;
3107         }
3108
3109         new_mac = &filter->mac_addr;
3110
3111         if (is_zero_ether_addr(new_mac)) {
3112                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3113                 return -EINVAL;
3114         }
3115
3116         vf_id = filter->dst_id;
3117
3118         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3119                 PMD_DRV_LOG(ERR, "Invalid argument.");
3120                 return -EINVAL;
3121         }
3122         vf = &pf->vfs[vf_id];
3123
3124         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3125                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3126                 return -EINVAL;
3127         }
3128
3129         if (add) {
3130                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3131                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3132                                 ETHER_ADDR_LEN);
3133                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3134                                  ETHER_ADDR_LEN);
3135
3136                 mac_filter.filter_type = filter->filter_type;
3137                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3138                 if (ret != I40E_SUCCESS) {
3139                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3140                         return -1;
3141                 }
3142                 ether_addr_copy(new_mac, &pf->dev_addr);
3143         } else {
3144                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3145                                 ETHER_ADDR_LEN);
3146                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3147                 if (ret != I40E_SUCCESS) {
3148                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3149                         return -1;
3150                 }
3151
3152                 /* Clear device address as it has been removed */
3153                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3154                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3155         }
3156
3157         return 0;
3158 }
3159
3160 /* MAC filter handle */
3161 static int
3162 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3163                 void *arg)
3164 {
3165         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3166         struct rte_eth_mac_filter *filter;
3167         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3168         int ret = I40E_NOT_SUPPORTED;
3169
3170         filter = (struct rte_eth_mac_filter *)(arg);
3171
3172         switch (filter_op) {
3173         case RTE_ETH_FILTER_NOP:
3174                 ret = I40E_SUCCESS;
3175                 break;
3176         case RTE_ETH_FILTER_ADD:
3177                 i40e_pf_disable_irq0(hw);
3178                 if (filter->is_vf)
3179                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3180                 i40e_pf_enable_irq0(hw);
3181                 break;
3182         case RTE_ETH_FILTER_DELETE:
3183                 i40e_pf_disable_irq0(hw);
3184                 if (filter->is_vf)
3185                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3186                 i40e_pf_enable_irq0(hw);
3187                 break;
3188         default:
3189                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3190                 ret = I40E_ERR_PARAM;
3191                 break;
3192         }
3193
3194         return ret;
3195 }
3196
3197 static int
3198 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3199 {
3200         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3201         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3202         int ret;
3203
3204         if (!lut)
3205                 return -EINVAL;
3206
3207         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3208                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3209                                           lut, lut_size);
3210                 if (ret) {
3211                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3212                         return ret;
3213                 }
3214         } else {
3215                 uint32_t *lut_dw = (uint32_t *)lut;
3216                 uint16_t i, lut_size_dw = lut_size / 4;
3217
3218                 for (i = 0; i < lut_size_dw; i++)
3219                         lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
3220         }
3221
3222         return 0;
3223 }
3224
3225 static int
3226 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3227 {
3228         struct i40e_pf *pf;
3229         struct i40e_hw *hw;
3230         int ret;
3231
3232         if (!vsi || !lut)
3233                 return -EINVAL;
3234
3235         pf = I40E_VSI_TO_PF(vsi);
3236         hw = I40E_VSI_TO_HW(vsi);
3237
3238         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3239                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3240                                           lut, lut_size);
3241                 if (ret) {
3242                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3243                         return ret;
3244                 }
3245         } else {
3246                 uint32_t *lut_dw = (uint32_t *)lut;
3247                 uint16_t i, lut_size_dw = lut_size / 4;
3248
3249                 for (i = 0; i < lut_size_dw; i++)
3250                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
3251                 I40E_WRITE_FLUSH(hw);
3252         }
3253
3254         return 0;
3255 }
3256
3257 static int
3258 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3259                          struct rte_eth_rss_reta_entry64 *reta_conf,
3260                          uint16_t reta_size)
3261 {
3262         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3263         uint16_t i, lut_size = pf->hash_lut_size;
3264         uint16_t idx, shift;
3265         uint8_t *lut;
3266         int ret;
3267
3268         if (reta_size != lut_size ||
3269                 reta_size > ETH_RSS_RETA_SIZE_512) {
3270                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3271                         "(%d) doesn't match the number hardware can supported "
3272                                         "(%d)\n", reta_size, lut_size);
3273                 return -EINVAL;
3274         }
3275
3276         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3277         if (!lut) {
3278                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3279                 return -ENOMEM;
3280         }
3281         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3282         if (ret)
3283                 goto out;
3284         for (i = 0; i < reta_size; i++) {
3285                 idx = i / RTE_RETA_GROUP_SIZE;
3286                 shift = i % RTE_RETA_GROUP_SIZE;
3287                 if (reta_conf[idx].mask & (1ULL << shift))
3288                         lut[i] = reta_conf[idx].reta[shift];
3289         }
3290         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3291
3292 out:
3293         rte_free(lut);
3294
3295         return ret;
3296 }
3297
3298 static int
3299 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3300                         struct rte_eth_rss_reta_entry64 *reta_conf,
3301                         uint16_t reta_size)
3302 {
3303         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3304         uint16_t i, lut_size = pf->hash_lut_size;
3305         uint16_t idx, shift;
3306         uint8_t *lut;
3307         int ret;
3308
3309         if (reta_size != lut_size ||
3310                 reta_size > ETH_RSS_RETA_SIZE_512) {
3311                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3312                         "(%d) doesn't match the number hardware can supported "
3313                                         "(%d)\n", reta_size, lut_size);
3314                 return -EINVAL;
3315         }
3316
3317         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3318         if (!lut) {
3319                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3320                 return -ENOMEM;
3321         }
3322
3323         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3324         if (ret)
3325                 goto out;
3326         for (i = 0; i < reta_size; i++) {
3327                 idx = i / RTE_RETA_GROUP_SIZE;
3328                 shift = i % RTE_RETA_GROUP_SIZE;
3329                 if (reta_conf[idx].mask & (1ULL << shift))
3330                         reta_conf[idx].reta[shift] = lut[i];
3331         }
3332
3333 out:
3334         rte_free(lut);
3335
3336         return ret;
3337 }
3338
3339 /**
3340  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
3341  * @hw:   pointer to the HW structure
3342  * @mem:  pointer to mem struct to fill out
3343  * @size: size of memory requested
3344  * @alignment: what to align the allocation to
3345  **/
3346 enum i40e_status_code
3347 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3348                         struct i40e_dma_mem *mem,
3349                         u64 size,
3350                         u32 alignment)
3351 {
3352         const struct rte_memzone *mz = NULL;
3353         char z_name[RTE_MEMZONE_NAMESIZE];
3354
3355         if (!mem)
3356                 return I40E_ERR_PARAM;
3357
3358         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
3359         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
3360                                          alignment, RTE_PGSIZE_2M);
3361         if (!mz)
3362                 return I40E_ERR_NO_MEMORY;
3363
3364         mem->size = size;
3365         mem->va = mz->addr;
3366         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
3367         mem->zone = (const void *)mz;
3368         PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
3369                     "%"PRIu64, mz->name, mem->pa);
3370
3371         return I40E_SUCCESS;
3372 }
3373
3374 /**
3375  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
3376  * @hw:   pointer to the HW structure
3377  * @mem:  ptr to mem struct to free
3378  **/
3379 enum i40e_status_code
3380 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3381                     struct i40e_dma_mem *mem)
3382 {
3383         if (!mem)
3384                 return I40E_ERR_PARAM;
3385
3386         PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
3387                     "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
3388                     mem->pa);
3389         rte_memzone_free((const struct rte_memzone *)mem->zone);
3390         mem->zone = NULL;
3391         mem->va = NULL;
3392         mem->pa = (u64)0;
3393
3394         return I40E_SUCCESS;
3395 }
3396
3397 /**
3398  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
3399  * @hw:   pointer to the HW structure
3400  * @mem:  pointer to mem struct to fill out
3401  * @size: size of memory requested
3402  **/
3403 enum i40e_status_code
3404 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3405                          struct i40e_virt_mem *mem,
3406                          u32 size)
3407 {
3408         if (!mem)
3409                 return I40E_ERR_PARAM;
3410
3411         mem->size = size;
3412         mem->va = rte_zmalloc("i40e", size, 0);
3413
3414         if (mem->va)
3415                 return I40E_SUCCESS;
3416         else
3417                 return I40E_ERR_NO_MEMORY;
3418 }
3419
3420 /**
3421  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
3422  * @hw:   pointer to the HW structure
3423  * @mem:  pointer to mem struct to free
3424  **/
3425 enum i40e_status_code
3426 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3427                      struct i40e_virt_mem *mem)
3428 {
3429         if (!mem)
3430                 return I40E_ERR_PARAM;
3431
3432         rte_free(mem->va);
3433         mem->va = NULL;
3434
3435         return I40E_SUCCESS;
3436 }
3437
3438 void
3439 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3440 {
3441         rte_spinlock_init(&sp->spinlock);
3442 }
3443
3444 void
3445 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3446 {
3447         rte_spinlock_lock(&sp->spinlock);
3448 }
3449
3450 void
3451 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3452 {
3453         rte_spinlock_unlock(&sp->spinlock);
3454 }
3455
3456 void
3457 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
3458 {
3459         return;
3460 }
3461
3462 /**
3463  * Get the hardware capabilities, which will be parsed
3464  * and saved into struct i40e_hw.
3465  */
3466 static int
3467 i40e_get_cap(struct i40e_hw *hw)
3468 {
3469         struct i40e_aqc_list_capabilities_element_resp *buf;
3470         uint16_t len, size = 0;
3471         int ret;
3472
3473         /* Calculate a huge enough buff for saving response data temporarily */
3474         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
3475                                                 I40E_MAX_CAP_ELE_NUM;
3476         buf = rte_zmalloc("i40e", len, 0);
3477         if (!buf) {
3478                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
3479                 return I40E_ERR_NO_MEMORY;
3480         }
3481
3482         /* Get, parse the capabilities and save it to hw */
3483         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
3484                         i40e_aqc_opc_list_func_capabilities, NULL);
3485         if (ret != I40E_SUCCESS)
3486                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
3487
3488         /* Free the temporary buffer after being used */
3489         rte_free(buf);
3490
3491         return ret;
3492 }
3493
3494 static int
3495 i40e_pf_parameter_init(struct rte_eth_dev *dev)
3496 {
3497         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3498         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3499         uint16_t qp_count = 0, vsi_count = 0;
3500
3501         if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
3502                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
3503                 return -EINVAL;
3504         }
3505         /* Add the parameter init for LFC */
3506         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
3507         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
3508         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
3509
3510         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
3511         pf->max_num_vsi = hw->func_caps.num_vsis;
3512         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
3513         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
3514         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3515
3516         /* FDir queue/VSI allocation */
3517         pf->fdir_qp_offset = 0;
3518         if (hw->func_caps.fd) {
3519                 pf->flags |= I40E_FLAG_FDIR;
3520                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
3521         } else {
3522                 pf->fdir_nb_qps = 0;
3523         }
3524         qp_count += pf->fdir_nb_qps;
3525         vsi_count += 1;
3526
3527         /* LAN queue/VSI allocation */
3528         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
3529         if (!hw->func_caps.rss) {
3530                 pf->lan_nb_qps = 1;
3531         } else {
3532                 pf->flags |= I40E_FLAG_RSS;
3533                 if (hw->mac.type == I40E_MAC_X722)
3534                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
3535                 pf->lan_nb_qps = pf->lan_nb_qp_max;
3536         }
3537         qp_count += pf->lan_nb_qps;
3538         vsi_count += 1;
3539
3540         /* VF queue/VSI allocation */
3541         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
3542         if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
3543                 pf->flags |= I40E_FLAG_SRIOV;
3544                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3545                 pf->vf_num = dev->pci_dev->max_vfs;
3546                 PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
3547                             "in total %u queues", pf->vf_num, pf->vf_nb_qps,
3548                             pf->vf_nb_qps * pf->vf_num);
3549         } else {
3550                 pf->vf_nb_qps = 0;
3551                 pf->vf_num = 0;
3552         }
3553         qp_count += pf->vf_nb_qps * pf->vf_num;
3554         vsi_count += pf->vf_num;
3555
3556         /* VMDq queue/VSI allocation */
3557         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
3558         pf->vmdq_nb_qps = 0;
3559         pf->max_nb_vmdq_vsi = 0;
3560         if (hw->func_caps.vmdq) {
3561                 if (qp_count < hw->func_caps.num_tx_qp &&
3562                         vsi_count < hw->func_caps.num_vsis) {
3563                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
3564                                 qp_count) / pf->vmdq_nb_qp_max;
3565
3566                         /* Limit the maximum number of VMDq vsi to the maximum
3567                          * ethdev can support
3568                          */
3569                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3570                                 hw->func_caps.num_vsis - vsi_count);
3571                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3572                                 ETH_64_POOLS);
3573                         if (pf->max_nb_vmdq_vsi) {
3574                                 pf->flags |= I40E_FLAG_VMDQ;
3575                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
3576                                 PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
3577                                             "per VMDQ VSI, in total %u queues",
3578                                             pf->max_nb_vmdq_vsi,
3579                                             pf->vmdq_nb_qps, pf->vmdq_nb_qps *
3580                                             pf->max_nb_vmdq_vsi);
3581                         } else {
3582                                 PMD_DRV_LOG(INFO, "No enough queues left for "
3583                                             "VMDq");
3584                         }
3585                 } else {
3586                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
3587                 }
3588         }
3589         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
3590         vsi_count += pf->max_nb_vmdq_vsi;
3591
3592         if (hw->func_caps.dcb)
3593                 pf->flags |= I40E_FLAG_DCB;
3594
3595         if (qp_count > hw->func_caps.num_tx_qp) {
3596                 PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
3597                             "the hardware maximum %u", qp_count,
3598                             hw->func_caps.num_tx_qp);
3599                 return -EINVAL;
3600         }
3601         if (vsi_count > hw->func_caps.num_vsis) {
3602                 PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
3603                             "the hardware maximum %u", vsi_count,
3604                             hw->func_caps.num_vsis);
3605                 return -EINVAL;
3606         }
3607
3608         return 0;
3609 }
3610
3611 static int
3612 i40e_pf_get_switch_config(struct i40e_pf *pf)
3613 {
3614         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3615         struct i40e_aqc_get_switch_config_resp *switch_config;
3616         struct i40e_aqc_switch_config_element_resp *element;
3617         uint16_t start_seid = 0, num_reported;
3618         int ret;
3619
3620         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
3621                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
3622         if (!switch_config) {
3623                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
3624                 return -ENOMEM;
3625         }
3626
3627         /* Get the switch configurations */
3628         ret = i40e_aq_get_switch_config(hw, switch_config,
3629                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
3630         if (ret != I40E_SUCCESS) {
3631                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
3632                 goto fail;
3633         }
3634         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
3635         if (num_reported != 1) { /* The number should be 1 */
3636                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
3637                 goto fail;
3638         }
3639
3640         /* Parse the switch configuration elements */
3641         element = &(switch_config->element[0]);
3642         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
3643                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
3644                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
3645         } else
3646                 PMD_DRV_LOG(INFO, "Unknown element type");
3647
3648 fail:
3649         rte_free(switch_config);
3650
3651         return ret;
3652 }
3653
3654 static int
3655 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
3656                         uint32_t num)
3657 {
3658         struct pool_entry *entry;
3659
3660         if (pool == NULL || num == 0)
3661                 return -EINVAL;
3662
3663         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
3664         if (entry == NULL) {
3665                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
3666                 return -ENOMEM;
3667         }
3668
3669         /* queue heap initialize */
3670         pool->num_free = num;
3671         pool->num_alloc = 0;
3672         pool->base = base;
3673         LIST_INIT(&pool->alloc_list);
3674         LIST_INIT(&pool->free_list);
3675
3676         /* Initialize element  */
3677         entry->base = 0;
3678         entry->len = num;
3679
3680         LIST_INSERT_HEAD(&pool->free_list, entry, next);
3681         return 0;
3682 }
3683
3684 static void
3685 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
3686 {
3687         struct pool_entry *entry, *next_entry;
3688
3689         if (pool == NULL)
3690                 return;
3691
3692         for (entry = LIST_FIRST(&pool->alloc_list);
3693                         entry && (next_entry = LIST_NEXT(entry, next), 1);
3694                         entry = next_entry) {
3695                 LIST_REMOVE(entry, next);
3696                 rte_free(entry);
3697         }
3698
3699         for (entry = LIST_FIRST(&pool->free_list);
3700                         entry && (next_entry = LIST_NEXT(entry, next), 1);
3701                         entry = next_entry) {
3702                 LIST_REMOVE(entry, next);
3703                 rte_free(entry);
3704         }
3705
3706         pool->num_free = 0;
3707         pool->num_alloc = 0;
3708         pool->base = 0;
3709         LIST_INIT(&pool->alloc_list);
3710         LIST_INIT(&pool->free_list);
3711 }
3712
3713 static int
3714 i40e_res_pool_free(struct i40e_res_pool_info *pool,
3715                        uint32_t base)
3716 {
3717         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
3718         uint32_t pool_offset;
3719         int insert;
3720
3721         if (pool == NULL) {
3722                 PMD_DRV_LOG(ERR, "Invalid parameter");
3723                 return -EINVAL;
3724         }
3725
3726         pool_offset = base - pool->base;
3727         /* Lookup in alloc list */
3728         LIST_FOREACH(entry, &pool->alloc_list, next) {
3729                 if (entry->base == pool_offset) {
3730                         valid_entry = entry;
3731                         LIST_REMOVE(entry, next);
3732                         break;
3733                 }
3734         }
3735
3736         /* Not find, return */
3737         if (valid_entry == NULL) {
3738                 PMD_DRV_LOG(ERR, "Failed to find entry");
3739                 return -EINVAL;
3740         }
3741
3742         /**
3743          * Found it, move it to free list  and try to merge.
3744          * In order to make merge easier, always sort it by qbase.
3745          * Find adjacent prev and last entries.
3746          */
3747         prev = next = NULL;
3748         LIST_FOREACH(entry, &pool->free_list, next) {
3749                 if (entry->base > valid_entry->base) {
3750                         next = entry;
3751                         break;
3752                 }
3753                 prev = entry;
3754         }
3755
3756         insert = 0;
3757         /* Try to merge with next one*/
3758         if (next != NULL) {
3759                 /* Merge with next one */
3760                 if (valid_entry->base + valid_entry->len == next->base) {
3761                         next->base = valid_entry->base;
3762                         next->len += valid_entry->len;
3763                         rte_free(valid_entry);
3764                         valid_entry = next;
3765                         insert = 1;
3766                 }
3767         }
3768
3769         if (prev != NULL) {
3770                 /* Merge with previous one */
3771                 if (prev->base + prev->len == valid_entry->base) {
3772                         prev->len += valid_entry->len;
3773                         /* If it merge with next one, remove next node */
3774                         if (insert == 1) {
3775                                 LIST_REMOVE(valid_entry, next);
3776                                 rte_free(valid_entry);
3777                         } else {
3778                                 rte_free(valid_entry);
3779                                 insert = 1;
3780                         }
3781                 }
3782         }
3783
3784         /* Not find any entry to merge, insert */
3785         if (insert == 0) {
3786                 if (prev != NULL)
3787                         LIST_INSERT_AFTER(prev, valid_entry, next);
3788                 else if (next != NULL)
3789                         LIST_INSERT_BEFORE(next, valid_entry, next);
3790                 else /* It's empty list, insert to head */
3791                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
3792         }
3793
3794         pool->num_free += valid_entry->len;
3795         pool->num_alloc -= valid_entry->len;
3796
3797         return 0;
3798 }
3799
3800 static int
3801 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
3802                        uint16_t num)
3803 {
3804         struct pool_entry *entry, *valid_entry;
3805
3806         if (pool == NULL || num == 0) {
3807                 PMD_DRV_LOG(ERR, "Invalid parameter");
3808                 return -EINVAL;
3809         }
3810
3811         if (pool->num_free < num) {
3812                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
3813                             num, pool->num_free);
3814                 return -ENOMEM;
3815         }
3816
3817         valid_entry = NULL;
3818         /* Lookup  in free list and find most fit one */
3819         LIST_FOREACH(entry, &pool->free_list, next) {
3820                 if (entry->len >= num) {
3821                         /* Find best one */
3822                         if (entry->len == num) {
3823                                 valid_entry = entry;
3824                                 break;
3825                         }
3826                         if (valid_entry == NULL || valid_entry->len > entry->len)
3827                                 valid_entry = entry;
3828                 }
3829         }
3830
3831         /* Not find one to satisfy the request, return */
3832         if (valid_entry == NULL) {
3833                 PMD_DRV_LOG(ERR, "No valid entry found");
3834                 return -ENOMEM;
3835         }
3836         /**
3837          * The entry have equal queue number as requested,
3838          * remove it from alloc_list.
3839          */
3840         if (valid_entry->len == num) {
3841                 LIST_REMOVE(valid_entry, next);
3842         } else {
3843                 /**
3844                  * The entry have more numbers than requested,
3845                  * create a new entry for alloc_list and minus its
3846                  * queue base and number in free_list.
3847                  */
3848                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
3849                 if (entry == NULL) {
3850                         PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3851                                     "resource pool");
3852                         return -ENOMEM;
3853                 }
3854                 entry->base = valid_entry->base;
3855                 entry->len = num;
3856                 valid_entry->base += num;
3857                 valid_entry->len -= num;
3858                 valid_entry = entry;
3859         }
3860
3861         /* Insert it into alloc list, not sorted */
3862         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
3863
3864         pool->num_free -= valid_entry->len;
3865         pool->num_alloc += valid_entry->len;
3866
3867         return valid_entry->base + pool->base;
3868 }
3869
3870 /**
3871  * bitmap_is_subset - Check whether src2 is subset of src1
3872  **/
3873 static inline int
3874 bitmap_is_subset(uint8_t src1, uint8_t src2)
3875 {
3876         return !((src1 ^ src2) & src2);
3877 }
3878
3879 static enum i40e_status_code
3880 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3881 {
3882         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3883
3884         /* If DCB is not supported, only default TC is supported */
3885         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
3886                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
3887                 return I40E_NOT_SUPPORTED;
3888         }
3889
3890         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
3891                 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
3892                             "HW support 0x%x", hw->func_caps.enabled_tcmap,
3893                             enabled_tcmap);
3894                 return I40E_NOT_SUPPORTED;
3895         }
3896         return I40E_SUCCESS;
3897 }
3898
3899 int
3900 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
3901                                 struct i40e_vsi_vlan_pvid_info *info)
3902 {
3903         struct i40e_hw *hw;
3904         struct i40e_vsi_context ctxt;
3905         uint8_t vlan_flags = 0;
3906         int ret;
3907
3908         if (vsi == NULL || info == NULL) {
3909                 PMD_DRV_LOG(ERR, "invalid parameters");
3910                 return I40E_ERR_PARAM;
3911         }
3912
3913         if (info->on) {
3914                 vsi->info.pvid = info->config.pvid;
3915                 /**
3916                  * If insert pvid is enabled, only tagged pkts are
3917                  * allowed to be sent out.
3918                  */
3919                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
3920                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3921         } else {
3922                 vsi->info.pvid = 0;
3923                 if (info->config.reject.tagged == 0)
3924                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3925
3926                 if (info->config.reject.untagged == 0)
3927                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
3928         }
3929         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
3930                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
3931         vsi->info.port_vlan_flags |= vlan_flags;
3932         vsi->info.valid_sections =
3933                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3934         memset(&ctxt, 0, sizeof(ctxt));
3935         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3936         ctxt.seid = vsi->seid;
3937
3938         hw = I40E_VSI_TO_HW(vsi);
3939         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3940         if (ret != I40E_SUCCESS)
3941                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
3942
3943         return ret;
3944 }
3945
3946 static int
3947 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3948 {
3949         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3950         int i, ret;
3951         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
3952
3953         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3954         if (ret != I40E_SUCCESS)
3955                 return ret;
3956
3957         if (!vsi->seid) {
3958                 PMD_DRV_LOG(ERR, "seid not valid");
3959                 return -EINVAL;
3960         }
3961
3962         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
3963         tc_bw_data.tc_valid_bits = enabled_tcmap;
3964         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3965                 tc_bw_data.tc_bw_credits[i] =
3966                         (enabled_tcmap & (1 << i)) ? 1 : 0;
3967
3968         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
3969         if (ret != I40E_SUCCESS) {
3970                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
3971                 return ret;
3972         }
3973
3974         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
3975                                         sizeof(vsi->info.qs_handle));
3976         return I40E_SUCCESS;
3977 }
3978
3979 static enum i40e_status_code
3980 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
3981                                  struct i40e_aqc_vsi_properties_data *info,
3982                                  uint8_t enabled_tcmap)
3983 {
3984         enum i40e_status_code ret;
3985         int i, total_tc = 0;
3986         uint16_t qpnum_per_tc, bsf, qp_idx;
3987
3988         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3989         if (ret != I40E_SUCCESS)
3990                 return ret;
3991
3992         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3993                 if (enabled_tcmap & (1 << i))
3994                         total_tc++;
3995         vsi->enabled_tc = enabled_tcmap;
3996
3997         /* Number of queues per enabled TC */
3998         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
3999         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4000         bsf = rte_bsf32(qpnum_per_tc);
4001
4002         /* Adjust the queue number to actual queues that can be applied */
4003         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4004                 vsi->nb_qps = qpnum_per_tc * total_tc;
4005
4006         /**
4007          * Configure TC and queue mapping parameters, for enabled TC,
4008          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4009          * default queue will serve it.
4010          */
4011         qp_idx = 0;
4012         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4013                 if (vsi->enabled_tc & (1 << i)) {
4014                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4015                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4016                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4017                         qp_idx += qpnum_per_tc;
4018                 } else
4019                         info->tc_mapping[i] = 0;
4020         }
4021
4022         /* Associate queue number with VSI */
4023         if (vsi->type == I40E_VSI_SRIOV) {
4024                 info->mapping_flags |=
4025                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4026                 for (i = 0; i < vsi->nb_qps; i++)
4027                         info->queue_mapping[i] =
4028                                 rte_cpu_to_le_16(vsi->base_queue + i);
4029         } else {
4030                 info->mapping_flags |=
4031                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4032                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4033         }
4034         info->valid_sections |=
4035                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4036
4037         return I40E_SUCCESS;
4038 }
4039
4040 static int
4041 i40e_veb_release(struct i40e_veb *veb)
4042 {
4043         struct i40e_vsi *vsi;
4044         struct i40e_hw *hw;
4045
4046         if (veb == NULL)
4047                 return -EINVAL;
4048
4049         if (!TAILQ_EMPTY(&veb->head)) {
4050                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4051                 return -EACCES;
4052         }
4053         /* associate_vsi field is NULL for floating VEB */
4054         if (veb->associate_vsi != NULL) {
4055                 vsi = veb->associate_vsi;
4056                 hw = I40E_VSI_TO_HW(vsi);
4057
4058                 vsi->uplink_seid = veb->uplink_seid;
4059                 vsi->veb = NULL;
4060         } else {
4061                 veb->associate_pf->main_vsi->floating_veb = NULL;
4062                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4063         }
4064
4065         i40e_aq_delete_element(hw, veb->seid, NULL);
4066         rte_free(veb);
4067         return I40E_SUCCESS;
4068 }
4069
4070 /* Setup a veb */
4071 static struct i40e_veb *
4072 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4073 {
4074         struct i40e_veb *veb;
4075         int ret;
4076         struct i40e_hw *hw;
4077
4078         if (pf == NULL) {
4079                 PMD_DRV_LOG(ERR,
4080                             "veb setup failed, associated PF shouldn't null");
4081                 return NULL;
4082         }
4083         hw = I40E_PF_TO_HW(pf);
4084
4085         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4086         if (!veb) {
4087                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4088                 goto fail;
4089         }
4090
4091         veb->associate_vsi = vsi;
4092         veb->associate_pf = pf;
4093         TAILQ_INIT(&veb->head);
4094         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4095
4096         /* create floating veb if vsi is NULL */
4097         if (vsi != NULL) {
4098                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4099                                       I40E_DEFAULT_TCMAP, false,
4100                                       &veb->seid, false, NULL);
4101         } else {
4102                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4103                                       true, &veb->seid, false, NULL);
4104         }
4105
4106         if (ret != I40E_SUCCESS) {
4107                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4108                             hw->aq.asq_last_status);
4109                 goto fail;
4110         }
4111         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4112
4113         /* get statistics index */
4114         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4115                                 &veb->stats_idx, NULL, NULL, NULL);
4116         if (ret != I40E_SUCCESS) {
4117                 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
4118                             hw->aq.asq_last_status);
4119                 goto fail;
4120         }
4121         /* Get VEB bandwidth, to be implemented */
4122         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4123         if (vsi)
4124                 vsi->uplink_seid = veb->seid;
4125
4126         return veb;
4127 fail:
4128         rte_free(veb);
4129         return NULL;
4130 }
4131
4132 int
4133 i40e_vsi_release(struct i40e_vsi *vsi)
4134 {
4135         struct i40e_pf *pf;
4136         struct i40e_hw *hw;
4137         struct i40e_vsi_list *vsi_list;
4138         void *temp;
4139         int ret;
4140         struct i40e_mac_filter *f;
4141         uint16_t user_param;
4142
4143         if (!vsi)
4144                 return I40E_SUCCESS;
4145
4146         if (!vsi->adapter)
4147                 return -EFAULT;
4148
4149         user_param = vsi->user_param;
4150
4151         pf = I40E_VSI_TO_PF(vsi);
4152         hw = I40E_VSI_TO_HW(vsi);
4153
4154         /* VSI has child to attach, release child first */
4155         if (vsi->veb) {
4156                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4157                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4158                                 return -1;
4159                 }
4160                 i40e_veb_release(vsi->veb);
4161         }
4162
4163         if (vsi->floating_veb) {
4164                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4165                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4166                                 return -1;
4167                 }
4168         }
4169
4170         /* Remove all macvlan filters of the VSI */
4171         i40e_vsi_remove_all_macvlan_filter(vsi);
4172         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4173                 rte_free(f);
4174
4175         if (vsi->type != I40E_VSI_MAIN &&
4176             ((vsi->type != I40E_VSI_SRIOV) ||
4177             !pf->floating_veb_list[user_param])) {
4178                 /* Remove vsi from parent's sibling list */
4179                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4180                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4181                         return I40E_ERR_PARAM;
4182                 }
4183                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4184                                 &vsi->sib_vsi_list, list);
4185
4186                 /* Remove all switch element of the VSI */
4187                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4188                 if (ret != I40E_SUCCESS)
4189                         PMD_DRV_LOG(ERR, "Failed to delete element");
4190         }
4191
4192         if ((vsi->type == I40E_VSI_SRIOV) &&
4193             pf->floating_veb_list[user_param]) {
4194                 /* Remove vsi from parent's sibling list */
4195                 if (vsi->parent_vsi == NULL ||
4196                     vsi->parent_vsi->floating_veb == NULL) {
4197                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4198                         return I40E_ERR_PARAM;
4199                 }
4200                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4201                              &vsi->sib_vsi_list, list);
4202
4203                 /* Remove all switch element of the VSI */
4204                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4205                 if (ret != I40E_SUCCESS)
4206                         PMD_DRV_LOG(ERR, "Failed to delete element");
4207         }
4208
4209         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4210
4211         if (vsi->type != I40E_VSI_SRIOV)
4212                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4213         rte_free(vsi);
4214
4215         return I40E_SUCCESS;
4216 }
4217
4218 static int
4219 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4220 {
4221         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4222         struct i40e_aqc_remove_macvlan_element_data def_filter;
4223         struct i40e_mac_filter_info filter;
4224         int ret;
4225
4226         if (vsi->type != I40E_VSI_MAIN)
4227                 return I40E_ERR_CONFIG;
4228         memset(&def_filter, 0, sizeof(def_filter));
4229         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4230                                         ETH_ADDR_LEN);
4231         def_filter.vlan_tag = 0;
4232         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4233                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4234         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4235         if (ret != I40E_SUCCESS) {
4236                 struct i40e_mac_filter *f;
4237                 struct ether_addr *mac;
4238
4239                 PMD_DRV_LOG(WARNING, "Cannot remove the default "
4240                             "macvlan filter");
4241                 /* It needs to add the permanent mac into mac list */
4242                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4243                 if (f == NULL) {
4244                         PMD_DRV_LOG(ERR, "failed to allocate memory");
4245                         return I40E_ERR_NO_MEMORY;
4246                 }
4247                 mac = &f->mac_info.mac_addr;
4248                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
4249                                 ETH_ADDR_LEN);
4250                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4251                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4252                 vsi->mac_num++;
4253
4254                 return ret;
4255         }
4256         (void)rte_memcpy(&filter.mac_addr,
4257                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
4258         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4259         return i40e_vsi_add_mac(vsi, &filter);
4260 }
4261
4262 /*
4263  * i40e_vsi_get_bw_config - Query VSI BW Information
4264  * @vsi: the VSI to be queried
4265  *
4266  * Returns 0 on success, negative value on failure
4267  */
4268 static enum i40e_status_code
4269 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
4270 {
4271         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
4272         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
4273         struct i40e_hw *hw = &vsi->adapter->hw;
4274         i40e_status ret;
4275         int i;
4276         uint32_t bw_max;
4277
4278         memset(&bw_config, 0, sizeof(bw_config));
4279         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4280         if (ret != I40E_SUCCESS) {
4281                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
4282                             hw->aq.asq_last_status);
4283                 return ret;
4284         }
4285
4286         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
4287         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
4288                                         &ets_sla_config, NULL);
4289         if (ret != I40E_SUCCESS) {
4290                 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
4291                             "configuration %u", hw->aq.asq_last_status);
4292                 return ret;
4293         }
4294
4295         /* store and print out BW info */
4296         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
4297         vsi->bw_info.bw_max = bw_config.max_bw;
4298         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
4299         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
4300         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
4301                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
4302                      I40E_16_BIT_WIDTH);
4303         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4304                 vsi->bw_info.bw_ets_share_credits[i] =
4305                                 ets_sla_config.share_credits[i];
4306                 vsi->bw_info.bw_ets_credits[i] =
4307                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
4308                 /* 4 bits per TC, 4th bit is reserved */
4309                 vsi->bw_info.bw_ets_max[i] =
4310                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
4311                                   RTE_LEN2MASK(3, uint8_t));
4312                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
4313                             vsi->bw_info.bw_ets_share_credits[i]);
4314                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
4315                             vsi->bw_info.bw_ets_credits[i]);
4316                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
4317                             vsi->bw_info.bw_ets_max[i]);
4318         }
4319
4320         return I40E_SUCCESS;
4321 }
4322
4323 /* i40e_enable_pf_lb
4324  * @pf: pointer to the pf structure
4325  *
4326  * allow loopback on pf
4327  */
4328 static inline void
4329 i40e_enable_pf_lb(struct i40e_pf *pf)
4330 {
4331         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4332         struct i40e_vsi_context ctxt;
4333         int ret;
4334
4335         /* Use the FW API if FW >= v5.0 */
4336         if (hw->aq.fw_maj_ver < 5) {
4337                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
4338                 return;
4339         }
4340
4341         memset(&ctxt, 0, sizeof(ctxt));
4342         ctxt.seid = pf->main_vsi_seid;
4343         ctxt.pf_num = hw->pf_id;
4344         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4345         if (ret) {
4346                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
4347                             ret, hw->aq.asq_last_status);
4348                 return;
4349         }
4350         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4351         ctxt.info.valid_sections =
4352                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4353         ctxt.info.switch_id |=
4354                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4355
4356         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4357         if (ret)
4358                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n",
4359                             hw->aq.asq_last_status);
4360 }
4361
4362 /* Setup a VSI */
4363 struct i40e_vsi *
4364 i40e_vsi_setup(struct i40e_pf *pf,
4365                enum i40e_vsi_type type,
4366                struct i40e_vsi *uplink_vsi,
4367                uint16_t user_param)
4368 {
4369         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4370         struct i40e_vsi *vsi;
4371         struct i40e_mac_filter_info filter;
4372         int ret;
4373         struct i40e_vsi_context ctxt;
4374         struct ether_addr broadcast =
4375                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
4376
4377         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
4378             uplink_vsi == NULL) {
4379                 PMD_DRV_LOG(ERR, "VSI setup failed, "
4380                             "VSI link shouldn't be NULL");
4381                 return NULL;
4382         }
4383
4384         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
4385                 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
4386                             "uplink VSI should be NULL");
4387                 return NULL;
4388         }
4389
4390         /* two situations
4391          * 1.type is not MAIN and uplink vsi is not NULL
4392          * If uplink vsi didn't setup VEB, create one first under veb field
4393          * 2.type is SRIOV and the uplink is NULL
4394          * If floating VEB is NULL, create one veb under floating veb field
4395          */
4396
4397         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
4398             uplink_vsi->veb == NULL) {
4399                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
4400
4401                 if (uplink_vsi->veb == NULL) {
4402                         PMD_DRV_LOG(ERR, "VEB setup failed");
4403                         return NULL;
4404                 }
4405                 /* set ALLOWLOOPBACk on pf, when veb is created */
4406                 i40e_enable_pf_lb(pf);
4407         }
4408
4409         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
4410             pf->main_vsi->floating_veb == NULL) {
4411                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
4412
4413                 if (pf->main_vsi->floating_veb == NULL) {
4414                         PMD_DRV_LOG(ERR, "VEB setup failed");
4415                         return NULL;
4416                 }
4417         }
4418
4419         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
4420         if (!vsi) {
4421                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
4422                 return NULL;
4423         }
4424         TAILQ_INIT(&vsi->mac_list);
4425         vsi->type = type;
4426         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
4427         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
4428         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
4429         vsi->user_param = user_param;
4430         /* Allocate queues */
4431         switch (vsi->type) {
4432         case I40E_VSI_MAIN  :
4433                 vsi->nb_qps = pf->lan_nb_qps;
4434                 break;
4435         case I40E_VSI_SRIOV :
4436                 vsi->nb_qps = pf->vf_nb_qps;
4437                 break;
4438         case I40E_VSI_VMDQ2:
4439                 vsi->nb_qps = pf->vmdq_nb_qps;
4440                 break;
4441         case I40E_VSI_FDIR:
4442                 vsi->nb_qps = pf->fdir_nb_qps;
4443                 break;
4444         default:
4445                 goto fail_mem;
4446         }
4447         /*
4448          * The filter status descriptor is reported in rx queue 0,
4449          * while the tx queue for fdir filter programming has no
4450          * such constraints, can be non-zero queues.
4451          * To simplify it, choose FDIR vsi use queue 0 pair.
4452          * To make sure it will use queue 0 pair, queue allocation
4453          * need be done before this function is called
4454          */
4455         if (type != I40E_VSI_FDIR) {
4456                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
4457                         if (ret < 0) {
4458                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
4459                                                 vsi->seid, ret);
4460                                 goto fail_mem;
4461                         }
4462                         vsi->base_queue = ret;
4463         } else
4464                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
4465
4466         /* VF has MSIX interrupt in VF range, don't allocate here */
4467         if (type == I40E_VSI_MAIN) {
4468                 ret = i40e_res_pool_alloc(&pf->msix_pool,
4469                                           RTE_MIN(vsi->nb_qps,
4470                                                   RTE_MAX_RXTX_INTR_VEC_ID));
4471                 if (ret < 0) {
4472                         PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
4473                                     vsi->seid, ret);
4474                         goto fail_queue_alloc;
4475                 }
4476                 vsi->msix_intr = ret;
4477                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
4478         } else if (type != I40E_VSI_SRIOV) {
4479                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
4480                 if (ret < 0) {
4481                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
4482                         goto fail_queue_alloc;
4483                 }
4484                 vsi->msix_intr = ret;
4485                 vsi->nb_msix = 1;
4486         } else {
4487                 vsi->msix_intr = 0;
4488                 vsi->nb_msix = 0;
4489         }
4490
4491         /* Add VSI */
4492         if (type == I40E_VSI_MAIN) {
4493                 /* For main VSI, no need to add since it's default one */
4494                 vsi->uplink_seid = pf->mac_seid;
4495                 vsi->seid = pf->main_vsi_seid;
4496                 /* Bind queues with specific MSIX interrupt */
4497                 /**
4498                  * Needs 2 interrupt at least, one for misc cause which will
4499                  * enabled from OS side, Another for queues binding the
4500                  * interrupt from device side only.
4501                  */
4502
4503                 /* Get default VSI parameters from hardware */
4504                 memset(&ctxt, 0, sizeof(ctxt));
4505                 ctxt.seid = vsi->seid;
4506                 ctxt.pf_num = hw->pf_id;
4507                 ctxt.uplink_seid = vsi->uplink_seid;
4508                 ctxt.vf_num = 0;
4509                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4510                 if (ret != I40E_SUCCESS) {
4511                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
4512                         goto fail_msix_alloc;
4513                 }
4514                 (void)rte_memcpy(&vsi->info, &ctxt.info,
4515                         sizeof(struct i40e_aqc_vsi_properties_data));
4516                 vsi->vsi_id = ctxt.vsi_number;
4517                 vsi->info.valid_sections = 0;
4518
4519                 /* Configure tc, enabled TC0 only */
4520                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
4521                         I40E_SUCCESS) {
4522                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
4523                         goto fail_msix_alloc;
4524                 }
4525
4526                 /* TC, queue mapping */
4527                 memset(&ctxt, 0, sizeof(ctxt));
4528                 vsi->info.valid_sections |=
4529                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4530                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
4531                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4532                 (void)rte_memcpy(&ctxt.info, &vsi->info,
4533                         sizeof(struct i40e_aqc_vsi_properties_data));
4534                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4535                                                 I40E_DEFAULT_TCMAP);
4536                 if (ret != I40E_SUCCESS) {
4537                         PMD_DRV_LOG(ERR, "Failed to configure "
4538                                     "TC queue mapping");
4539                         goto fail_msix_alloc;
4540                 }
4541                 ctxt.seid = vsi->seid;
4542                 ctxt.pf_num = hw->pf_id;
4543                 ctxt.uplink_seid = vsi->uplink_seid;
4544                 ctxt.vf_num = 0;
4545
4546                 /* Update VSI parameters */
4547                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4548                 if (ret != I40E_SUCCESS) {
4549                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
4550                         goto fail_msix_alloc;
4551                 }
4552
4553                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
4554                                                 sizeof(vsi->info.tc_mapping));
4555                 (void)rte_memcpy(&vsi->info.queue_mapping,
4556                                 &ctxt.info.queue_mapping,
4557                         sizeof(vsi->info.queue_mapping));
4558                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
4559                 vsi->info.valid_sections = 0;
4560
4561                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
4562                                 ETH_ADDR_LEN);
4563
4564                 /**
4565                  * Updating default filter settings are necessary to prevent
4566                  * reception of tagged packets.
4567                  * Some old firmware configurations load a default macvlan
4568                  * filter which accepts both tagged and untagged packets.
4569                  * The updating is to use a normal filter instead if needed.
4570                  * For NVM 4.2.2 or after, the updating is not needed anymore.
4571                  * The firmware with correct configurations load the default
4572                  * macvlan filter which is expected and cannot be removed.
4573                  */
4574                 i40e_update_default_filter_setting(vsi);
4575                 i40e_config_qinq(hw, vsi);
4576         } else if (type == I40E_VSI_SRIOV) {
4577                 memset(&ctxt, 0, sizeof(ctxt));
4578                 /**
4579                  * For other VSI, the uplink_seid equals to uplink VSI's
4580                  * uplink_seid since they share same VEB
4581                  */
4582                 if (uplink_vsi == NULL)
4583                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
4584                 else
4585                         vsi->uplink_seid = uplink_vsi->uplink_seid;
4586                 ctxt.pf_num = hw->pf_id;
4587                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
4588                 ctxt.uplink_seid = vsi->uplink_seid;
4589                 ctxt.connection_type = 0x1;
4590                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
4591
4592                 /* Use the VEB configuration if FW >= v5.0 */
4593                 if (hw->aq.fw_maj_ver >= 5) {
4594                         /* Configure switch ID */
4595                         ctxt.info.valid_sections |=
4596                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4597                         ctxt.info.switch_id =
4598                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4599                 }
4600
4601                 /* Configure port/vlan */
4602                 ctxt.info.valid_sections |=
4603                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4604                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4605                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4606                                                 I40E_DEFAULT_TCMAP);
4607                 if (ret != I40E_SUCCESS) {
4608                         PMD_DRV_LOG(ERR, "Failed to configure "
4609                                     "TC queue mapping");
4610                         goto fail_msix_alloc;
4611                 }
4612                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4613                 ctxt.info.valid_sections |=
4614                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4615                 /**
4616                  * Since VSI is not created yet, only configure parameter,
4617                  * will add vsi below.
4618                  */
4619
4620                 i40e_config_qinq(hw, vsi);
4621         } else if (type == I40E_VSI_VMDQ2) {
4622                 memset(&ctxt, 0, sizeof(ctxt));
4623                 /*
4624                  * For other VSI, the uplink_seid equals to uplink VSI's
4625                  * uplink_seid since they share same VEB
4626                  */
4627                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4628                 ctxt.pf_num = hw->pf_id;
4629                 ctxt.vf_num = 0;
4630                 ctxt.uplink_seid = vsi->uplink_seid;
4631                 ctxt.connection_type = 0x1;
4632                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
4633
4634                 ctxt.info.valid_sections |=
4635                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4636                 /* user_param carries flag to enable loop back */
4637                 if (user_param) {
4638                         ctxt.info.switch_id =
4639                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
4640                         ctxt.info.switch_id |=
4641                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4642                 }
4643
4644                 /* Configure port/vlan */
4645                 ctxt.info.valid_sections |=
4646                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4647                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4648                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4649                                                 I40E_DEFAULT_TCMAP);
4650                 if (ret != I40E_SUCCESS) {
4651                         PMD_DRV_LOG(ERR, "Failed to configure "
4652                                         "TC queue mapping");
4653                         goto fail_msix_alloc;
4654                 }
4655                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4656                 ctxt.info.valid_sections |=
4657                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4658         } else if (type == I40E_VSI_FDIR) {
4659                 memset(&ctxt, 0, sizeof(ctxt));
4660                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4661                 ctxt.pf_num = hw->pf_id;
4662                 ctxt.vf_num = 0;
4663                 ctxt.uplink_seid = vsi->uplink_seid;
4664                 ctxt.connection_type = 0x1;     /* regular data port */
4665                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4666                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4667                                                 I40E_DEFAULT_TCMAP);
4668                 if (ret != I40E_SUCCESS) {
4669                         PMD_DRV_LOG(ERR, "Failed to configure "
4670                                         "TC queue mapping.");
4671                         goto fail_msix_alloc;
4672                 }
4673                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4674                 ctxt.info.valid_sections |=
4675                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4676         } else {
4677                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
4678                 goto fail_msix_alloc;
4679         }
4680
4681         if (vsi->type != I40E_VSI_MAIN) {
4682                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
4683                 if (ret != I40E_SUCCESS) {
4684                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
4685                                     hw->aq.asq_last_status);
4686                         goto fail_msix_alloc;
4687                 }
4688                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
4689                 vsi->info.valid_sections = 0;
4690                 vsi->seid = ctxt.seid;
4691                 vsi->vsi_id = ctxt.vsi_number;
4692                 vsi->sib_vsi_list.vsi = vsi;
4693                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
4694                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
4695                                           &vsi->sib_vsi_list, list);
4696                 } else {
4697                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
4698                                           &vsi->sib_vsi_list, list);
4699                 }
4700         }
4701
4702         /* MAC/VLAN configuration */
4703         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
4704         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4705
4706         ret = i40e_vsi_add_mac(vsi, &filter);
4707         if (ret != I40E_SUCCESS) {
4708                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4709                 goto fail_msix_alloc;
4710         }
4711
4712         /* Get VSI BW information */
4713         i40e_vsi_get_bw_config(vsi);
4714         return vsi;
4715 fail_msix_alloc:
4716         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
4717 fail_queue_alloc:
4718         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
4719 fail_mem:
4720         rte_free(vsi);
4721         return NULL;
4722 }
4723
4724 /* Configure vlan filter on or off */
4725 int
4726 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
4727 {
4728         int i, num;
4729         struct i40e_mac_filter *f;
4730         void *temp;
4731         struct i40e_mac_filter_info *mac_filter;
4732         enum rte_mac_filter_type desired_filter;
4733         int ret = I40E_SUCCESS;
4734
4735         if (on) {
4736                 /* Filter to match MAC and VLAN */
4737                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
4738         } else {
4739                 /* Filter to match only MAC */
4740                 desired_filter = RTE_MAC_PERFECT_MATCH;
4741         }
4742
4743         num = vsi->mac_num;
4744
4745         mac_filter = rte_zmalloc("mac_filter_info_data",
4746                                  num * sizeof(*mac_filter), 0);
4747         if (mac_filter == NULL) {
4748                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4749                 return I40E_ERR_NO_MEMORY;
4750         }
4751
4752         i = 0;
4753
4754         /* Remove all existing mac */
4755         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
4756                 mac_filter[i] = f->mac_info;
4757                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
4758                 if (ret) {
4759                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
4760                                     on ? "enable" : "disable");
4761                         goto DONE;
4762                 }
4763                 i++;
4764         }
4765
4766         /* Override with new filter */
4767         for (i = 0; i < num; i++) {
4768                 mac_filter[i].filter_type = desired_filter;
4769                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
4770                 if (ret) {
4771                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
4772                                     on ? "enable" : "disable");
4773                         goto DONE;
4774                 }
4775         }
4776
4777 DONE:
4778         rte_free(mac_filter);
4779         return ret;
4780 }
4781
4782 /* Configure vlan stripping on or off */
4783 int
4784 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
4785 {
4786         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4787         struct i40e_vsi_context ctxt;
4788         uint8_t vlan_flags;
4789         int ret = I40E_SUCCESS;
4790
4791         /* Check if it has been already on or off */
4792         if (vsi->info.valid_sections &
4793                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
4794                 if (on) {
4795                         if ((vsi->info.port_vlan_flags &
4796                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
4797                                 return 0; /* already on */
4798                 } else {
4799                         if ((vsi->info.port_vlan_flags &
4800                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
4801                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
4802                                 return 0; /* already off */
4803                 }
4804         }
4805
4806         if (on)
4807                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4808         else
4809                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
4810         vsi->info.valid_sections =
4811                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4812         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
4813         vsi->info.port_vlan_flags |= vlan_flags;
4814         ctxt.seid = vsi->seid;
4815         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4816         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4817         if (ret)
4818                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
4819                             on ? "enable" : "disable");
4820
4821         return ret;
4822 }
4823
4824 static int
4825 i40e_dev_init_vlan(struct rte_eth_dev *dev)
4826 {
4827         struct rte_eth_dev_data *data = dev->data;
4828         int ret;
4829         int mask = 0;
4830
4831         /* Apply vlan offload setting */
4832         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
4833         i40e_vlan_offload_set(dev, mask);
4834
4835         /* Apply double-vlan setting, not implemented yet */
4836
4837         /* Apply pvid setting */
4838         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
4839                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
4840         if (ret)
4841                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
4842
4843         return ret;
4844 }
4845
4846 static int
4847 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
4848 {
4849         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4850
4851         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
4852 }
4853
4854 static int
4855 i40e_update_flow_control(struct i40e_hw *hw)
4856 {
4857 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
4858         struct i40e_link_status link_status;
4859         uint32_t rxfc = 0, txfc = 0, reg;
4860         uint8_t an_info;
4861         int ret;
4862
4863         memset(&link_status, 0, sizeof(link_status));
4864         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
4865         if (ret != I40E_SUCCESS) {
4866                 PMD_DRV_LOG(ERR, "Failed to get link status information");
4867                 goto write_reg; /* Disable flow control */
4868         }
4869
4870         an_info = hw->phy.link_info.an_info;
4871         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
4872                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
4873                 ret = I40E_ERR_NOT_READY;
4874                 goto write_reg; /* Disable flow control */
4875         }
4876         /**
4877          * If link auto negotiation is enabled, flow control needs to
4878          * be configured according to it
4879          */
4880         switch (an_info & I40E_LINK_PAUSE_RXTX) {
4881         case I40E_LINK_PAUSE_RXTX:
4882                 rxfc = 1;
4883                 txfc = 1;
4884                 hw->fc.current_mode = I40E_FC_FULL;
4885                 break;
4886         case I40E_AQ_LINK_PAUSE_RX:
4887                 rxfc = 1;
4888                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
4889                 break;
4890         case I40E_AQ_LINK_PAUSE_TX:
4891                 txfc = 1;
4892                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
4893                 break;
4894         default:
4895                 hw->fc.current_mode = I40E_FC_NONE;
4896                 break;
4897         }
4898
4899 write_reg:
4900         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
4901                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
4902         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4903         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
4904         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
4905         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
4906
4907         return ret;
4908 }
4909
4910 /* PF setup */
4911 static int
4912 i40e_pf_setup(struct i40e_pf *pf)
4913 {
4914         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4915         struct i40e_filter_control_settings settings;
4916         struct i40e_vsi *vsi;
4917         int ret;
4918
4919         /* Clear all stats counters */
4920         pf->offset_loaded = FALSE;
4921         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
4922         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
4923
4924         ret = i40e_pf_get_switch_config(pf);
4925         if (ret != I40E_SUCCESS) {
4926                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
4927                 return ret;
4928         }
4929         if (pf->flags & I40E_FLAG_FDIR) {
4930                 /* make queue allocated first, let FDIR use queue pair 0*/
4931                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
4932                 if (ret != I40E_FDIR_QUEUE_ID) {
4933                         PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
4934                                     " ret =%d", ret);
4935                         pf->flags &= ~I40E_FLAG_FDIR;
4936                 }
4937         }
4938         /*  main VSI setup */
4939         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
4940         if (!vsi) {
4941                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
4942                 return I40E_ERR_NOT_READY;
4943         }
4944         pf->main_vsi = vsi;
4945
4946         /* Configure filter control */
4947         memset(&settings, 0, sizeof(settings));
4948         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
4949                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
4950         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
4951                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
4952         else {
4953                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
4954                                                 hw->func_caps.rss_table_size);
4955                 return I40E_ERR_PARAM;
4956         }
4957         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
4958                         "size: %u\n", hw->func_caps.rss_table_size);
4959         pf->hash_lut_size = hw->func_caps.rss_table_size;
4960
4961         /* Enable ethtype and macvlan filters */
4962         settings.enable_ethtype = TRUE;
4963         settings.enable_macvlan = TRUE;
4964         ret = i40e_set_filter_control(hw, &settings);
4965         if (ret)
4966                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
4967                                                                 ret);
4968
4969         /* Update flow control according to the auto negotiation */
4970         i40e_update_flow_control(hw);
4971
4972         return I40E_SUCCESS;
4973 }
4974
4975 int
4976 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
4977 {
4978         uint32_t reg;
4979         uint16_t j;
4980
4981         /**
4982          * Set or clear TX Queue Disable flags,
4983          * which is required by hardware.
4984          */
4985         i40e_pre_tx_queue_cfg(hw, q_idx, on);
4986         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
4987
4988         /* Wait until the request is finished */
4989         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4990                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4991                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4992                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
4993                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
4994                                                         & 0x1))) {
4995                         break;
4996                 }
4997         }
4998         if (on) {
4999                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5000                         return I40E_SUCCESS; /* already on, skip next steps */
5001
5002                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5003                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5004         } else {
5005                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5006                         return I40E_SUCCESS; /* already off, skip next steps */
5007                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5008         }
5009         /* Write the register */
5010         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5011         /* Check the result */
5012         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5013                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5014                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5015                 if (on) {
5016                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5017                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5018                                 break;
5019                 } else {
5020                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5021                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5022                                 break;
5023                 }
5024         }
5025         /* Check if it is timeout */
5026         if (j >= I40E_CHK_Q_ENA_COUNT) {
5027                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5028                             (on ? "enable" : "disable"), q_idx);
5029                 return I40E_ERR_TIMEOUT;
5030         }
5031
5032         return I40E_SUCCESS;
5033 }
5034
5035 /* Swith on or off the tx queues */
5036 static int
5037 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5038 {
5039         struct rte_eth_dev_data *dev_data = pf->dev_data;
5040         struct i40e_tx_queue *txq;
5041         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5042         uint16_t i;
5043         int ret;
5044
5045         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5046                 txq = dev_data->tx_queues[i];
5047                 /* Don't operate the queue if not configured or
5048                  * if starting only per queue */
5049                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5050                         continue;
5051                 if (on)
5052                         ret = i40e_dev_tx_queue_start(dev, i);
5053                 else
5054                         ret = i40e_dev_tx_queue_stop(dev, i);
5055                 if ( ret != I40E_SUCCESS)
5056                         return ret;
5057         }
5058
5059         return I40E_SUCCESS;
5060 }
5061
5062 int
5063 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5064 {
5065         uint32_t reg;
5066         uint16_t j;
5067
5068         /* Wait until the request is finished */
5069         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5070                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5071                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5072                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5073                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5074                         break;
5075         }
5076
5077         if (on) {
5078                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5079                         return I40E_SUCCESS; /* Already on, skip next steps */
5080                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5081         } else {
5082                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5083                         return I40E_SUCCESS; /* Already off, skip next steps */
5084                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5085         }
5086
5087         /* Write the register */
5088         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5089         /* Check the result */
5090         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5091                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5092                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5093                 if (on) {
5094                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5095                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5096                                 break;
5097                 } else {
5098                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5099                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5100                                 break;
5101                 }
5102         }
5103
5104         /* Check if it is timeout */
5105         if (j >= I40E_CHK_Q_ENA_COUNT) {
5106                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5107                             (on ? "enable" : "disable"), q_idx);
5108                 return I40E_ERR_TIMEOUT;
5109         }
5110
5111         return I40E_SUCCESS;
5112 }
5113 /* Switch on or off the rx queues */
5114 static int
5115 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5116 {
5117         struct rte_eth_dev_data *dev_data = pf->dev_data;
5118         struct i40e_rx_queue *rxq;
5119         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5120         uint16_t i;
5121         int ret;
5122
5123         for (i = 0; i < dev_data->nb_rx_queues; i++) {
5124                 rxq = dev_data->rx_queues[i];
5125                 /* Don't operate the queue if not configured or
5126                  * if starting only per queue */
5127                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5128                         continue;
5129                 if (on)
5130                         ret = i40e_dev_rx_queue_start(dev, i);
5131                 else
5132                         ret = i40e_dev_rx_queue_stop(dev, i);
5133                 if (ret != I40E_SUCCESS)
5134                         return ret;
5135         }
5136
5137         return I40E_SUCCESS;
5138 }
5139
5140 /* Switch on or off all the rx/tx queues */
5141 int
5142 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5143 {
5144         int ret;
5145
5146         if (on) {
5147                 /* enable rx queues before enabling tx queues */
5148                 ret = i40e_dev_switch_rx_queues(pf, on);
5149                 if (ret) {
5150                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5151                         return ret;
5152                 }
5153                 ret = i40e_dev_switch_tx_queues(pf, on);
5154         } else {
5155                 /* Stop tx queues before stopping rx queues */
5156                 ret = i40e_dev_switch_tx_queues(pf, on);
5157                 if (ret) {
5158                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5159                         return ret;
5160                 }
5161                 ret = i40e_dev_switch_rx_queues(pf, on);
5162         }
5163
5164         return ret;
5165 }
5166
5167 /* Initialize VSI for TX */
5168 static int
5169 i40e_dev_tx_init(struct i40e_pf *pf)
5170 {
5171         struct rte_eth_dev_data *data = pf->dev_data;
5172         uint16_t i;
5173         uint32_t ret = I40E_SUCCESS;
5174         struct i40e_tx_queue *txq;
5175
5176         for (i = 0; i < data->nb_tx_queues; i++) {
5177                 txq = data->tx_queues[i];
5178                 if (!txq || !txq->q_set)
5179                         continue;
5180                 ret = i40e_tx_queue_init(txq);
5181                 if (ret != I40E_SUCCESS)
5182                         break;
5183         }
5184         if (ret == I40E_SUCCESS)
5185                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5186                                      ->eth_dev);
5187
5188         return ret;
5189 }
5190
5191 /* Initialize VSI for RX */
5192 static int
5193 i40e_dev_rx_init(struct i40e_pf *pf)
5194 {
5195         struct rte_eth_dev_data *data = pf->dev_data;
5196         int ret = I40E_SUCCESS;
5197         uint16_t i;
5198         struct i40e_rx_queue *rxq;
5199
5200         i40e_pf_config_mq_rx(pf);
5201         for (i = 0; i < data->nb_rx_queues; i++) {
5202                 rxq = data->rx_queues[i];
5203                 if (!rxq || !rxq->q_set)
5204                         continue;
5205
5206                 ret = i40e_rx_queue_init(rxq);
5207                 if (ret != I40E_SUCCESS) {
5208                         PMD_DRV_LOG(ERR, "Failed to do RX queue "
5209                                     "initialization");
5210                         break;
5211                 }
5212         }
5213         if (ret == I40E_SUCCESS)
5214                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5215                                      ->eth_dev);
5216
5217         return ret;
5218 }
5219
5220 static int
5221 i40e_dev_rxtx_init(struct i40e_pf *pf)
5222 {
5223         int err;
5224
5225         err = i40e_dev_tx_init(pf);
5226         if (err) {
5227                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
5228                 return err;
5229         }
5230         err = i40e_dev_rx_init(pf);
5231         if (err) {
5232                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
5233                 return err;
5234         }
5235
5236         return err;
5237 }
5238
5239 static int
5240 i40e_vmdq_setup(struct rte_eth_dev *dev)
5241 {
5242         struct rte_eth_conf *conf = &dev->data->dev_conf;
5243         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5244         int i, err, conf_vsis, j, loop;
5245         struct i40e_vsi *vsi;
5246         struct i40e_vmdq_info *vmdq_info;
5247         struct rte_eth_vmdq_rx_conf *vmdq_conf;
5248         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5249
5250         /*
5251          * Disable interrupt to avoid message from VF. Furthermore, it will
5252          * avoid race condition in VSI creation/destroy.
5253          */
5254         i40e_pf_disable_irq0(hw);
5255
5256         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
5257                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
5258                 return -ENOTSUP;
5259         }
5260
5261         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
5262         if (conf_vsis > pf->max_nb_vmdq_vsi) {
5263                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
5264                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
5265                         pf->max_nb_vmdq_vsi);
5266                 return -ENOTSUP;
5267         }
5268
5269         if (pf->vmdq != NULL) {
5270                 PMD_INIT_LOG(INFO, "VMDQ already configured");
5271                 return 0;
5272         }
5273
5274         pf->vmdq = rte_zmalloc("vmdq_info_struct",
5275                                 sizeof(*vmdq_info) * conf_vsis, 0);
5276
5277         if (pf->vmdq == NULL) {
5278                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
5279                 return -ENOMEM;
5280         }
5281
5282         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
5283
5284         /* Create VMDQ VSI */
5285         for (i = 0; i < conf_vsis; i++) {
5286                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
5287                                 vmdq_conf->enable_loop_back);
5288                 if (vsi == NULL) {
5289                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
5290                         err = -1;
5291                         goto err_vsi_setup;
5292                 }
5293                 vmdq_info = &pf->vmdq[i];
5294                 vmdq_info->pf = pf;
5295                 vmdq_info->vsi = vsi;
5296         }
5297         pf->nb_cfg_vmdq_vsi = conf_vsis;
5298
5299         /* Configure Vlan */
5300         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
5301         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
5302                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
5303                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
5304                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
5305                                         vmdq_conf->pool_map[i].vlan_id, j);
5306
5307                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
5308                                                 vmdq_conf->pool_map[i].vlan_id);
5309                                 if (err) {
5310                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
5311                                         err = -1;
5312                                         goto err_vsi_setup;
5313                                 }
5314                         }
5315                 }
5316         }
5317
5318         i40e_pf_enable_irq0(hw);
5319
5320         return 0;
5321
5322 err_vsi_setup:
5323         for (i = 0; i < conf_vsis; i++)
5324                 if (pf->vmdq[i].vsi == NULL)
5325                         break;
5326                 else
5327                         i40e_vsi_release(pf->vmdq[i].vsi);
5328
5329         rte_free(pf->vmdq);
5330         pf->vmdq = NULL;
5331         i40e_pf_enable_irq0(hw);
5332         return err;
5333 }
5334
5335 static void
5336 i40e_stat_update_32(struct i40e_hw *hw,
5337                    uint32_t reg,
5338                    bool offset_loaded,
5339                    uint64_t *offset,
5340                    uint64_t *stat)
5341 {
5342         uint64_t new_data;
5343
5344         new_data = (uint64_t)I40E_READ_REG(hw, reg);
5345         if (!offset_loaded)
5346                 *offset = new_data;
5347
5348         if (new_data >= *offset)
5349                 *stat = (uint64_t)(new_data - *offset);
5350         else
5351                 *stat = (uint64_t)((new_data +
5352                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
5353 }
5354
5355 static void
5356 i40e_stat_update_48(struct i40e_hw *hw,
5357                    uint32_t hireg,
5358                    uint32_t loreg,
5359                    bool offset_loaded,
5360                    uint64_t *offset,
5361                    uint64_t *stat)
5362 {
5363         uint64_t new_data;
5364
5365         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
5366         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
5367                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
5368
5369         if (!offset_loaded)
5370                 *offset = new_data;
5371
5372         if (new_data >= *offset)
5373                 *stat = new_data - *offset;
5374         else
5375                 *stat = (uint64_t)((new_data +
5376                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
5377
5378         *stat &= I40E_48_BIT_MASK;
5379 }
5380
5381 /* Disable IRQ0 */
5382 void
5383 i40e_pf_disable_irq0(struct i40e_hw *hw)
5384 {
5385         /* Disable all interrupt types */
5386         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
5387         I40E_WRITE_FLUSH(hw);
5388 }
5389
5390 /* Enable IRQ0 */
5391 void
5392 i40e_pf_enable_irq0(struct i40e_hw *hw)
5393 {
5394         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
5395                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
5396                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
5397                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
5398         I40E_WRITE_FLUSH(hw);
5399 }
5400
5401 static void
5402 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
5403 {
5404         /* read pending request and disable first */
5405         i40e_pf_disable_irq0(hw);
5406         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
5407         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
5408                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
5409
5410         if (no_queue)
5411                 /* Link no queues with irq0 */
5412                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
5413                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
5414 }
5415
5416 static void
5417 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
5418 {
5419         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5420         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5421         int i;
5422         uint16_t abs_vf_id;
5423         uint32_t index, offset, val;
5424
5425         if (!pf->vfs)
5426                 return;
5427         /**
5428          * Try to find which VF trigger a reset, use absolute VF id to access
5429          * since the reg is global register.
5430          */
5431         for (i = 0; i < pf->vf_num; i++) {
5432                 abs_vf_id = hw->func_caps.vf_base_id + i;
5433                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
5434                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
5435                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
5436                 /* VFR event occured */
5437                 if (val & (0x1 << offset)) {
5438                         int ret;
5439
5440                         /* Clear the event first */
5441                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
5442                                                         (0x1 << offset));
5443                         PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
5444                         /**
5445                          * Only notify a VF reset event occured,
5446                          * don't trigger another SW reset
5447                          */
5448                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
5449                         if (ret != I40E_SUCCESS)
5450                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
5451                 }
5452         }
5453 }
5454
5455 static void
5456 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
5457 {
5458         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5459         struct i40e_virtchnl_pf_event event;
5460         int i;
5461
5462         event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
5463         event.event_data.link_event.link_status =
5464                 dev->data->dev_link.link_status;
5465         event.event_data.link_event.link_speed =
5466                 (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
5467
5468         for (i = 0; i < pf->vf_num; i++)
5469                 i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT,
5470                                 I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
5471 }
5472
5473 static void
5474 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
5475 {
5476         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5477         struct i40e_arq_event_info info;
5478         uint16_t pending, opcode;
5479         int ret;
5480
5481         info.buf_len = I40E_AQ_BUF_SZ;
5482         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
5483         if (!info.msg_buf) {
5484                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
5485                 return;
5486         }
5487
5488         pending = 1;
5489         while (pending) {
5490                 ret = i40e_clean_arq_element(hw, &info, &pending);
5491
5492                 if (ret != I40E_SUCCESS) {
5493                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
5494                                     "aq_err: %u", hw->aq.asq_last_status);
5495                         break;
5496                 }
5497                 opcode = rte_le_to_cpu_16(info.desc.opcode);
5498
5499                 switch (opcode) {
5500                 case i40e_aqc_opc_send_msg_to_pf:
5501                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
5502                         i40e_pf_host_handle_vf_msg(dev,
5503                                         rte_le_to_cpu_16(info.desc.retval),
5504                                         rte_le_to_cpu_32(info.desc.cookie_high),
5505                                         rte_le_to_cpu_32(info.desc.cookie_low),
5506                                         info.msg_buf,
5507                                         info.msg_len);
5508                         break;
5509                 case i40e_aqc_opc_get_link_status:
5510                         ret = i40e_dev_link_update(dev, 0);
5511                         if (!ret)
5512                                 _rte_eth_dev_callback_process(dev,
5513                                         RTE_ETH_EVENT_INTR_LSC, NULL);
5514                         break;
5515                 default:
5516                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
5517                                     opcode);
5518                         break;
5519                 }
5520         }
5521         rte_free(info.msg_buf);
5522 }
5523
5524 /**
5525  * Interrupt handler triggered by NIC  for handling
5526  * specific interrupt.
5527  *
5528  * @param handle
5529  *  Pointer to interrupt handle.
5530  * @param param
5531  *  The address of parameter (struct rte_eth_dev *) regsitered before.
5532  *
5533  * @return
5534  *  void
5535  */
5536 static void
5537 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
5538                            void *param)
5539 {
5540         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5541         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5542         uint32_t icr0;
5543
5544         /* Disable interrupt */
5545         i40e_pf_disable_irq0(hw);
5546
5547         /* read out interrupt causes */
5548         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
5549
5550         /* No interrupt event indicated */
5551         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
5552                 PMD_DRV_LOG(INFO, "No interrupt event");
5553                 goto done;
5554         }
5555 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
5556         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
5557                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
5558         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
5559                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
5560         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
5561                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
5562         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
5563                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
5564         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
5565                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
5566         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
5567                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
5568         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
5569                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
5570 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
5571
5572         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
5573                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
5574                 i40e_dev_handle_vfr_event(dev);
5575         }
5576         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
5577                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
5578                 i40e_dev_handle_aq_msg(dev);
5579         }
5580
5581 done:
5582         /* Enable interrupt */
5583         i40e_pf_enable_irq0(hw);
5584         rte_intr_enable(&(dev->pci_dev->intr_handle));
5585 }
5586
5587 static int
5588 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
5589                          struct i40e_macvlan_filter *filter,
5590                          int total)
5591 {
5592         int ele_num, ele_buff_size;
5593         int num, actual_num, i;
5594         uint16_t flags;
5595         int ret = I40E_SUCCESS;
5596         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5597         struct i40e_aqc_add_macvlan_element_data *req_list;
5598
5599         if (filter == NULL  || total == 0)
5600                 return I40E_ERR_PARAM;
5601         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5602         ele_buff_size = hw->aq.asq_buf_size;
5603
5604         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
5605         if (req_list == NULL) {
5606                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5607                 return I40E_ERR_NO_MEMORY;
5608         }
5609
5610         num = 0;
5611         do {
5612                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5613                 memset(req_list, 0, ele_buff_size);
5614
5615                 for (i = 0; i < actual_num; i++) {
5616                         (void)rte_memcpy(req_list[i].mac_addr,
5617                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
5618                         req_list[i].vlan_tag =
5619                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
5620
5621                         switch (filter[num + i].filter_type) {
5622                         case RTE_MAC_PERFECT_MATCH:
5623                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
5624                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5625                                 break;
5626                         case RTE_MACVLAN_PERFECT_MATCH:
5627                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
5628                                 break;
5629                         case RTE_MAC_HASH_MATCH:
5630                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
5631                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5632                                 break;
5633                         case RTE_MACVLAN_HASH_MATCH:
5634                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
5635                                 break;
5636                         default:
5637                                 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
5638                                 ret = I40E_ERR_PARAM;
5639                                 goto DONE;
5640                         }
5641
5642                         req_list[i].queue_number = 0;
5643
5644                         req_list[i].flags = rte_cpu_to_le_16(flags);
5645                 }
5646
5647                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
5648                                                 actual_num, NULL);
5649                 if (ret != I40E_SUCCESS) {
5650                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
5651                         goto DONE;
5652                 }
5653                 num += actual_num;
5654         } while (num < total);
5655
5656 DONE:
5657         rte_free(req_list);
5658         return ret;
5659 }
5660
5661 static int
5662 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
5663                             struct i40e_macvlan_filter *filter,
5664                             int total)
5665 {
5666         int ele_num, ele_buff_size;
5667         int num, actual_num, i;
5668         uint16_t flags;
5669         int ret = I40E_SUCCESS;
5670         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5671         struct i40e_aqc_remove_macvlan_element_data *req_list;
5672
5673         if (filter == NULL  || total == 0)
5674                 return I40E_ERR_PARAM;
5675
5676         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5677         ele_buff_size = hw->aq.asq_buf_size;
5678
5679         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
5680         if (req_list == NULL) {
5681                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5682                 return I40E_ERR_NO_MEMORY;
5683         }
5684
5685         num = 0;
5686         do {
5687                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5688                 memset(req_list, 0, ele_buff_size);
5689
5690                 for (i = 0; i < actual_num; i++) {
5691                         (void)rte_memcpy(req_list[i].mac_addr,
5692                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
5693                         req_list[i].vlan_tag =
5694                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
5695
5696                         switch (filter[num + i].filter_type) {
5697                         case RTE_MAC_PERFECT_MATCH:
5698                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5699                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5700                                 break;
5701                         case RTE_MACVLAN_PERFECT_MATCH:
5702                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
5703                                 break;
5704                         case RTE_MAC_HASH_MATCH:
5705                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
5706                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5707                                 break;
5708                         case RTE_MACVLAN_HASH_MATCH:
5709                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
5710                                 break;
5711                         default:
5712                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
5713                                 ret = I40E_ERR_PARAM;
5714                                 goto DONE;
5715                         }
5716                         req_list[i].flags = rte_cpu_to_le_16(flags);
5717                 }
5718
5719                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
5720                                                 actual_num, NULL);
5721                 if (ret != I40E_SUCCESS) {
5722                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
5723                         goto DONE;
5724                 }
5725                 num += actual_num;
5726         } while (num < total);
5727
5728 DONE:
5729         rte_free(req_list);
5730         return ret;
5731 }
5732
5733 /* Find out specific MAC filter */
5734 static struct i40e_mac_filter *
5735 i40e_find_mac_filter(struct i40e_vsi *vsi,
5736                          struct ether_addr *macaddr)
5737 {
5738         struct i40e_mac_filter *f;
5739
5740         TAILQ_FOREACH(f, &vsi->mac_list, next) {
5741                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
5742                         return f;
5743         }
5744
5745         return NULL;
5746 }
5747
5748 static bool
5749 i40e_find_vlan_filter(struct i40e_vsi *vsi,
5750                          uint16_t vlan_id)
5751 {
5752         uint32_t vid_idx, vid_bit;
5753
5754         if (vlan_id > ETH_VLAN_ID_MAX)
5755                 return 0;
5756
5757         vid_idx = I40E_VFTA_IDX(vlan_id);
5758         vid_bit = I40E_VFTA_BIT(vlan_id);
5759
5760         if (vsi->vfta[vid_idx] & vid_bit)
5761                 return 1;
5762         else
5763                 return 0;
5764 }
5765
5766 static void
5767 i40e_set_vlan_filter(struct i40e_vsi *vsi,
5768                          uint16_t vlan_id, bool on)
5769 {
5770         uint32_t vid_idx, vid_bit;
5771
5772         if (vlan_id > ETH_VLAN_ID_MAX)
5773                 return;
5774
5775         vid_idx = I40E_VFTA_IDX(vlan_id);
5776         vid_bit = I40E_VFTA_BIT(vlan_id);
5777
5778         if (on)
5779                 vsi->vfta[vid_idx] |= vid_bit;
5780         else
5781                 vsi->vfta[vid_idx] &= ~vid_bit;
5782 }
5783
5784 /**
5785  * Find all vlan options for specific mac addr,
5786  * return with actual vlan found.
5787  */
5788 static inline int
5789 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
5790                            struct i40e_macvlan_filter *mv_f,
5791                            int num, struct ether_addr *addr)
5792 {
5793         int i;
5794         uint32_t j, k;
5795
5796         /**
5797          * Not to use i40e_find_vlan_filter to decrease the loop time,
5798          * although the code looks complex.
5799           */
5800         if (num < vsi->vlan_num)
5801                 return I40E_ERR_PARAM;
5802
5803         i = 0;
5804         for (j = 0; j < I40E_VFTA_SIZE; j++) {
5805                 if (vsi->vfta[j]) {
5806                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
5807                                 if (vsi->vfta[j] & (1 << k)) {
5808                                         if (i > num - 1) {
5809                                                 PMD_DRV_LOG(ERR, "vlan number "
5810                                                             "not match");
5811                                                 return I40E_ERR_PARAM;
5812                                         }
5813                                         (void)rte_memcpy(&mv_f[i].macaddr,
5814                                                         addr, ETH_ADDR_LEN);
5815                                         mv_f[i].vlan_id =
5816                                                 j * I40E_UINT32_BIT_SIZE + k;
5817                                         i++;
5818                                 }
5819                         }
5820                 }
5821         }
5822         return I40E_SUCCESS;
5823 }
5824
5825 static inline int
5826 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
5827                            struct i40e_macvlan_filter *mv_f,
5828                            int num,
5829                            uint16_t vlan)
5830 {
5831         int i = 0;
5832         struct i40e_mac_filter *f;
5833
5834         if (num < vsi->mac_num)
5835                 return I40E_ERR_PARAM;
5836
5837         TAILQ_FOREACH(f, &vsi->mac_list, next) {
5838                 if (i > num - 1) {
5839                         PMD_DRV_LOG(ERR, "buffer number not match");
5840                         return I40E_ERR_PARAM;
5841                 }
5842                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
5843                                 ETH_ADDR_LEN);
5844                 mv_f[i].vlan_id = vlan;
5845                 mv_f[i].filter_type = f->mac_info.filter_type;
5846                 i++;
5847         }
5848
5849         return I40E_SUCCESS;
5850 }
5851
5852 static int
5853 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
5854 {
5855         int i, j, num;
5856         struct i40e_mac_filter *f;
5857         struct i40e_macvlan_filter *mv_f;
5858         int ret = I40E_SUCCESS;
5859
5860         if (vsi == NULL || vsi->mac_num == 0)
5861                 return I40E_ERR_PARAM;
5862
5863         /* Case that no vlan is set */
5864         if (vsi->vlan_num == 0)
5865                 num = vsi->mac_num;
5866         else
5867                 num = vsi->mac_num * vsi->vlan_num;
5868
5869         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
5870         if (mv_f == NULL) {
5871                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5872                 return I40E_ERR_NO_MEMORY;
5873         }
5874
5875         i = 0;
5876         if (vsi->vlan_num == 0) {
5877                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5878                         (void)rte_memcpy(&mv_f[i].macaddr,
5879                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
5880                         mv_f[i].filter_type = f->mac_info.filter_type;
5881                         mv_f[i].vlan_id = 0;
5882                         i++;
5883                 }
5884         } else {
5885                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5886                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
5887                                         vsi->vlan_num, &f->mac_info.mac_addr);
5888                         if (ret != I40E_SUCCESS)
5889                                 goto DONE;
5890                         for (j = i; j < i + vsi->vlan_num; j++)
5891                                 mv_f[j].filter_type = f->mac_info.filter_type;
5892                         i += vsi->vlan_num;
5893                 }
5894         }
5895
5896         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
5897 DONE:
5898         rte_free(mv_f);
5899
5900         return ret;
5901 }
5902
5903 int
5904 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5905 {
5906         struct i40e_macvlan_filter *mv_f;
5907         int mac_num;
5908         int ret = I40E_SUCCESS;
5909
5910         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
5911                 return I40E_ERR_PARAM;
5912
5913         /* If it's already set, just return */
5914         if (i40e_find_vlan_filter(vsi,vlan))
5915                 return I40E_SUCCESS;
5916
5917         mac_num = vsi->mac_num;
5918
5919         if (mac_num == 0) {
5920                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5921                 return I40E_ERR_PARAM;
5922         }
5923
5924         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5925
5926         if (mv_f == NULL) {
5927                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5928                 return I40E_ERR_NO_MEMORY;
5929         }
5930
5931         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
5932
5933         if (ret != I40E_SUCCESS)
5934                 goto DONE;
5935
5936         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
5937
5938         if (ret != I40E_SUCCESS)
5939                 goto DONE;
5940
5941         i40e_set_vlan_filter(vsi, vlan, 1);
5942
5943         vsi->vlan_num++;
5944         ret = I40E_SUCCESS;
5945 DONE:
5946         rte_free(mv_f);
5947         return ret;
5948 }
5949
5950 int
5951 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5952 {
5953         struct i40e_macvlan_filter *mv_f;
5954         int mac_num;
5955         int ret = I40E_SUCCESS;
5956
5957         /**
5958          * Vlan 0 is the generic filter for untagged packets
5959          * and can't be removed.
5960          */
5961         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
5962                 return I40E_ERR_PARAM;
5963
5964         /* If can't find it, just return */
5965         if (!i40e_find_vlan_filter(vsi, vlan))
5966                 return I40E_ERR_PARAM;
5967
5968         mac_num = vsi->mac_num;
5969
5970         if (mac_num == 0) {
5971                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5972                 return I40E_ERR_PARAM;
5973         }
5974
5975         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5976
5977         if (mv_f == NULL) {
5978                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5979                 return I40E_ERR_NO_MEMORY;
5980         }
5981
5982         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
5983
5984         if (ret != I40E_SUCCESS)
5985                 goto DONE;
5986
5987         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
5988
5989         if (ret != I40E_SUCCESS)
5990                 goto DONE;
5991
5992         /* This is last vlan to remove, replace all mac filter with vlan 0 */
5993         if (vsi->vlan_num == 1) {
5994                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
5995                 if (ret != I40E_SUCCESS)
5996                         goto DONE;
5997
5998                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
5999                 if (ret != I40E_SUCCESS)
6000                         goto DONE;
6001         }
6002
6003         i40e_set_vlan_filter(vsi, vlan, 0);
6004
6005         vsi->vlan_num--;
6006         ret = I40E_SUCCESS;
6007 DONE:
6008         rte_free(mv_f);
6009         return ret;
6010 }
6011
6012 int
6013 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6014 {
6015         struct i40e_mac_filter *f;
6016         struct i40e_macvlan_filter *mv_f;
6017         int i, vlan_num = 0;
6018         int ret = I40E_SUCCESS;
6019
6020         /* If it's add and we've config it, return */
6021         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6022         if (f != NULL)
6023                 return I40E_SUCCESS;
6024         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6025                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6026
6027                 /**
6028                  * If vlan_num is 0, that's the first time to add mac,
6029                  * set mask for vlan_id 0.
6030                  */
6031                 if (vsi->vlan_num == 0) {
6032                         i40e_set_vlan_filter(vsi, 0, 1);
6033                         vsi->vlan_num = 1;
6034                 }
6035                 vlan_num = vsi->vlan_num;
6036         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6037                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6038                 vlan_num = 1;
6039
6040         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6041         if (mv_f == NULL) {
6042                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6043                 return I40E_ERR_NO_MEMORY;
6044         }
6045
6046         for (i = 0; i < vlan_num; i++) {
6047                 mv_f[i].filter_type = mac_filter->filter_type;
6048                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6049                                 ETH_ADDR_LEN);
6050         }
6051
6052         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6053                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6054                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6055                                         &mac_filter->mac_addr);
6056                 if (ret != I40E_SUCCESS)
6057                         goto DONE;
6058         }
6059
6060         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6061         if (ret != I40E_SUCCESS)
6062                 goto DONE;
6063
6064         /* Add the mac addr into mac list */
6065         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6066         if (f == NULL) {
6067                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6068                 ret = I40E_ERR_NO_MEMORY;
6069                 goto DONE;
6070         }
6071         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6072                         ETH_ADDR_LEN);
6073         f->mac_info.filter_type = mac_filter->filter_type;
6074         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6075         vsi->mac_num++;
6076
6077         ret = I40E_SUCCESS;
6078 DONE:
6079         rte_free(mv_f);
6080
6081         return ret;
6082 }
6083
6084 int
6085 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6086 {
6087         struct i40e_mac_filter *f;
6088         struct i40e_macvlan_filter *mv_f;
6089         int i, vlan_num;
6090         enum rte_mac_filter_type filter_type;
6091         int ret = I40E_SUCCESS;
6092
6093         /* Can't find it, return an error */
6094         f = i40e_find_mac_filter(vsi, addr);
6095         if (f == NULL)
6096                 return I40E_ERR_PARAM;
6097
6098         vlan_num = vsi->vlan_num;
6099         filter_type = f->mac_info.filter_type;
6100         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6101                 filter_type == RTE_MACVLAN_HASH_MATCH) {
6102                 if (vlan_num == 0) {
6103                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
6104                         return I40E_ERR_PARAM;
6105                 }
6106         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6107                         filter_type == RTE_MAC_HASH_MATCH)
6108                 vlan_num = 1;
6109
6110         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6111         if (mv_f == NULL) {
6112                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6113                 return I40E_ERR_NO_MEMORY;
6114         }
6115
6116         for (i = 0; i < vlan_num; i++) {
6117                 mv_f[i].filter_type = filter_type;
6118                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6119                                 ETH_ADDR_LEN);
6120         }
6121         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6122                         filter_type == RTE_MACVLAN_HASH_MATCH) {
6123                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6124                 if (ret != I40E_SUCCESS)
6125                         goto DONE;
6126         }
6127
6128         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6129         if (ret != I40E_SUCCESS)
6130                 goto DONE;
6131
6132         /* Remove the mac addr into mac list */
6133         TAILQ_REMOVE(&vsi->mac_list, f, next);
6134         rte_free(f);
6135         vsi->mac_num--;
6136
6137         ret = I40E_SUCCESS;
6138 DONE:
6139         rte_free(mv_f);
6140         return ret;
6141 }
6142
6143 /* Configure hash enable flags for RSS */
6144 uint64_t
6145 i40e_config_hena(uint64_t flags, enum i40e_mac_type type)
6146 {
6147         uint64_t hena = 0;
6148
6149         if (!flags)
6150                 return hena;
6151
6152         if (flags & ETH_RSS_FRAG_IPV4)
6153                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
6154         if (flags & ETH_RSS_NONFRAG_IPV4_TCP) {
6155                 if (type == I40E_MAC_X722) {
6156                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
6157                          (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
6158                 } else
6159                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
6160         }
6161         if (flags & ETH_RSS_NONFRAG_IPV4_UDP) {
6162                 if (type == I40E_MAC_X722) {
6163                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
6164                          (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
6165                          (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
6166                 } else
6167                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
6168         }
6169         if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
6170                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
6171         if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
6172                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
6173         if (flags & ETH_RSS_FRAG_IPV6)
6174                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
6175         if (flags & ETH_RSS_NONFRAG_IPV6_TCP) {
6176                 if (type == I40E_MAC_X722) {
6177                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
6178                          (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
6179                 } else
6180                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
6181         }
6182         if (flags & ETH_RSS_NONFRAG_IPV6_UDP) {
6183                 if (type == I40E_MAC_X722) {
6184                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
6185                          (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
6186                          (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
6187                 } else
6188                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
6189         }
6190         if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
6191                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
6192         if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
6193                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
6194         if (flags & ETH_RSS_L2_PAYLOAD)
6195                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
6196
6197         return hena;
6198 }
6199
6200 /* Parse the hash enable flags */
6201 uint64_t
6202 i40e_parse_hena(uint64_t flags)
6203 {
6204         uint64_t rss_hf = 0;
6205
6206         if (!flags)
6207                 return rss_hf;
6208         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
6209                 rss_hf |= ETH_RSS_FRAG_IPV4;
6210         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
6211                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6212 #ifdef X722_SUPPORT
6213         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
6214                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6215 #endif
6216         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
6217                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6218 #ifdef X722_SUPPORT
6219         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
6220                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6221         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
6222                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6223 #endif
6224         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
6225                 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
6226         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
6227                 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
6228         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
6229                 rss_hf |= ETH_RSS_FRAG_IPV6;
6230         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
6231                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6232 #ifdef X722_SUPPORT
6233         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
6234                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6235 #endif
6236         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
6237                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6238 #ifdef X722_SUPPORT
6239         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
6240                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6241         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
6242                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6243 #endif
6244         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
6245                 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
6246         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
6247                 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
6248         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
6249                 rss_hf |= ETH_RSS_L2_PAYLOAD;
6250
6251         return rss_hf;
6252 }
6253
6254 /* Disable RSS */
6255 static void
6256 i40e_pf_disable_rss(struct i40e_pf *pf)
6257 {
6258         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6259         uint64_t hena;
6260
6261         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6262         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6263         if (hw->mac.type == I40E_MAC_X722)
6264                 hena &= ~I40E_RSS_HENA_ALL_X722;
6265         else
6266                 hena &= ~I40E_RSS_HENA_ALL;
6267         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6268         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6269         I40E_WRITE_FLUSH(hw);
6270 }
6271
6272 static int
6273 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6274 {
6275         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6276         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6277         int ret = 0;
6278
6279         if (!key || key_len == 0) {
6280                 PMD_DRV_LOG(DEBUG, "No key to be configured");
6281                 return 0;
6282         } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6283                 sizeof(uint32_t)) {
6284                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
6285                 return -EINVAL;
6286         }
6287
6288         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6289                 struct i40e_aqc_get_set_rss_key_data *key_dw =
6290                         (struct i40e_aqc_get_set_rss_key_data *)key;
6291
6292                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
6293                 if (ret)
6294                         PMD_INIT_LOG(ERR, "Failed to configure RSS key "
6295                                      "via AQ");
6296         } else {
6297                 uint32_t *hash_key = (uint32_t *)key;
6298                 uint16_t i;
6299
6300                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6301                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
6302                 I40E_WRITE_FLUSH(hw);
6303         }
6304
6305         return ret;
6306 }
6307
6308 static int
6309 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
6310 {
6311         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6312         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6313         int ret;
6314
6315         if (!key || !key_len)
6316                 return -EINVAL;
6317
6318         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6319                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
6320                         (struct i40e_aqc_get_set_rss_key_data *)key);
6321                 if (ret) {
6322                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
6323                         return ret;
6324                 }
6325         } else {
6326                 uint32_t *key_dw = (uint32_t *)key;
6327                 uint16_t i;
6328
6329                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6330                         key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
6331         }
6332         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
6333
6334         return 0;
6335 }
6336
6337 static int
6338 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
6339 {
6340         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6341         uint64_t rss_hf;
6342         uint64_t hena;
6343         int ret;
6344
6345         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
6346                                rss_conf->rss_key_len);
6347         if (ret)
6348                 return ret;
6349
6350         rss_hf = rss_conf->rss_hf;
6351         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6352         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6353         if (hw->mac.type == I40E_MAC_X722)
6354                 hena &= ~I40E_RSS_HENA_ALL_X722;
6355         else
6356                 hena &= ~I40E_RSS_HENA_ALL;
6357         hena |= i40e_config_hena(rss_hf, hw->mac.type);
6358         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6359         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6360         I40E_WRITE_FLUSH(hw);
6361
6362         return 0;
6363 }
6364
6365 static int
6366 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
6367                          struct rte_eth_rss_conf *rss_conf)
6368 {
6369         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6370         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6371         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
6372         uint64_t hena;
6373
6374         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6375         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6376         if (!(hena & ((hw->mac.type == I40E_MAC_X722)
6377                  ? I40E_RSS_HENA_ALL_X722
6378                  : I40E_RSS_HENA_ALL))) { /* RSS disabled */
6379                 if (rss_hf != 0) /* Enable RSS */
6380                         return -EINVAL;
6381                 return 0; /* Nothing to do */
6382         }
6383         /* RSS enabled */
6384         if (rss_hf == 0) /* Disable RSS */
6385                 return -EINVAL;
6386
6387         return i40e_hw_rss_hash_set(pf, rss_conf);
6388 }
6389
6390 static int
6391 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
6392                            struct rte_eth_rss_conf *rss_conf)
6393 {
6394         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6395         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6396         uint64_t hena;
6397
6398         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
6399                          &rss_conf->rss_key_len);
6400
6401         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6402         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6403         rss_conf->rss_hf = i40e_parse_hena(hena);
6404
6405         return 0;
6406 }
6407
6408 static int
6409 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
6410 {
6411         switch (filter_type) {
6412         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
6413                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
6414                 break;
6415         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
6416                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
6417                 break;
6418         case RTE_TUNNEL_FILTER_IMAC_TENID:
6419                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
6420                 break;
6421         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
6422                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
6423                 break;
6424         case ETH_TUNNEL_FILTER_IMAC:
6425                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
6426                 break;
6427         case ETH_TUNNEL_FILTER_OIP:
6428                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
6429                 break;
6430         case ETH_TUNNEL_FILTER_IIP:
6431                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
6432                 break;
6433         default:
6434                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
6435                 return -EINVAL;
6436         }
6437
6438         return 0;
6439 }
6440
6441 static int
6442 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
6443                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
6444                         uint8_t add)
6445 {
6446         uint16_t ip_type;
6447         uint32_t ipv4_addr;
6448         uint8_t i, tun_type = 0;
6449         /* internal varialbe to convert ipv6 byte order */
6450         uint32_t convert_ipv6[4];
6451         int val, ret = 0;
6452         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6453         struct i40e_vsi *vsi = pf->main_vsi;
6454         struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
6455         struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
6456
6457         cld_filter = rte_zmalloc("tunnel_filter",
6458                 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
6459                 0);
6460
6461         if (NULL == cld_filter) {
6462                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
6463                 return -EINVAL;
6464         }
6465         pfilter = cld_filter;
6466
6467         ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac);
6468         ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac);
6469
6470         pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan);
6471         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
6472                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
6473                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
6474                 rte_memcpy(&pfilter->ipaddr.v4.data,
6475                                 &rte_cpu_to_le_32(ipv4_addr),
6476                                 sizeof(pfilter->ipaddr.v4.data));
6477         } else {
6478                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
6479                 for (i = 0; i < 4; i++) {
6480                         convert_ipv6[i] =
6481                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
6482                 }
6483                 rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6,
6484                                 sizeof(pfilter->ipaddr.v6.data));
6485         }
6486
6487         /* check tunneled type */
6488         switch (tunnel_filter->tunnel_type) {
6489         case RTE_TUNNEL_TYPE_VXLAN:
6490                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
6491                 break;
6492         case RTE_TUNNEL_TYPE_NVGRE:
6493                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
6494                 break;
6495         case RTE_TUNNEL_TYPE_IP_IN_GRE:
6496                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
6497                 break;
6498         default:
6499                 /* Other tunnel types is not supported. */
6500                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
6501                 rte_free(cld_filter);
6502                 return -EINVAL;
6503         }
6504
6505         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
6506                                                 &pfilter->flags);
6507         if (val < 0) {
6508                 rte_free(cld_filter);
6509                 return -EINVAL;
6510         }
6511
6512         pfilter->flags |= rte_cpu_to_le_16(
6513                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
6514                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
6515         pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
6516         pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
6517
6518         if (add)
6519                 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
6520         else
6521                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
6522                                                 cld_filter, 1);
6523
6524         rte_free(cld_filter);
6525         return ret;
6526 }
6527
6528 static int
6529 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
6530 {
6531         uint8_t i;
6532
6533         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6534                 if (pf->vxlan_ports[i] == port)
6535                         return i;
6536         }
6537
6538         return -1;
6539 }
6540
6541 static int
6542 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
6543 {
6544         int  idx, ret;
6545         uint8_t filter_idx;
6546         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6547
6548         idx = i40e_get_vxlan_port_idx(pf, port);
6549
6550         /* Check if port already exists */
6551         if (idx >= 0) {
6552                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
6553                 return -EINVAL;
6554         }
6555
6556         /* Now check if there is space to add the new port */
6557         idx = i40e_get_vxlan_port_idx(pf, 0);
6558         if (idx < 0) {
6559                 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
6560                         "not adding port %d", port);
6561                 return -ENOSPC;
6562         }
6563
6564         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
6565                                         &filter_idx, NULL);
6566         if (ret < 0) {
6567                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
6568                 return -1;
6569         }
6570
6571         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
6572                          port,  filter_idx);
6573
6574         /* New port: add it and mark its index in the bitmap */
6575         pf->vxlan_ports[idx] = port;
6576         pf->vxlan_bitmap |= (1 << idx);
6577
6578         if (!(pf->flags & I40E_FLAG_VXLAN))
6579                 pf->flags |= I40E_FLAG_VXLAN;
6580
6581         return 0;
6582 }
6583
6584 static int
6585 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
6586 {
6587         int idx;
6588         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6589
6590         if (!(pf->flags & I40E_FLAG_VXLAN)) {
6591                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
6592                 return -EINVAL;
6593         }
6594
6595         idx = i40e_get_vxlan_port_idx(pf, port);
6596
6597         if (idx < 0) {
6598                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
6599                 return -EINVAL;
6600         }
6601
6602         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
6603                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
6604                 return -1;
6605         }
6606
6607         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
6608                         port, idx);
6609
6610         pf->vxlan_ports[idx] = 0;
6611         pf->vxlan_bitmap &= ~(1 << idx);
6612
6613         if (!pf->vxlan_bitmap)
6614                 pf->flags &= ~I40E_FLAG_VXLAN;
6615
6616         return 0;
6617 }
6618
6619 /* Add UDP tunneling port */
6620 static int
6621 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
6622                              struct rte_eth_udp_tunnel *udp_tunnel)
6623 {
6624         int ret = 0;
6625         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6626
6627         if (udp_tunnel == NULL)
6628                 return -EINVAL;
6629
6630         switch (udp_tunnel->prot_type) {
6631         case RTE_TUNNEL_TYPE_VXLAN:
6632                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
6633                 break;
6634
6635         case RTE_TUNNEL_TYPE_GENEVE:
6636         case RTE_TUNNEL_TYPE_TEREDO:
6637                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6638                 ret = -1;
6639                 break;
6640
6641         default:
6642                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6643                 ret = -1;
6644                 break;
6645         }
6646
6647         return ret;
6648 }
6649
6650 /* Remove UDP tunneling port */
6651 static int
6652 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
6653                              struct rte_eth_udp_tunnel *udp_tunnel)
6654 {
6655         int ret = 0;
6656         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6657
6658         if (udp_tunnel == NULL)
6659                 return -EINVAL;
6660
6661         switch (udp_tunnel->prot_type) {
6662         case RTE_TUNNEL_TYPE_VXLAN:
6663                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
6664                 break;
6665         case RTE_TUNNEL_TYPE_GENEVE:
6666         case RTE_TUNNEL_TYPE_TEREDO:
6667                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6668                 ret = -1;
6669                 break;
6670         default:
6671                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6672                 ret = -1;
6673                 break;
6674         }
6675
6676         return ret;
6677 }
6678
6679 /* Calculate the maximum number of contiguous PF queues that are configured */
6680 static int
6681 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
6682 {
6683         struct rte_eth_dev_data *data = pf->dev_data;
6684         int i, num;
6685         struct i40e_rx_queue *rxq;
6686
6687         num = 0;
6688         for (i = 0; i < pf->lan_nb_qps; i++) {
6689                 rxq = data->rx_queues[i];
6690                 if (rxq && rxq->q_set)
6691                         num++;
6692                 else
6693                         break;
6694         }
6695
6696         return num;
6697 }
6698
6699 /* Configure RSS */
6700 static int
6701 i40e_pf_config_rss(struct i40e_pf *pf)
6702 {
6703         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6704         struct rte_eth_rss_conf rss_conf;
6705         uint32_t i, lut = 0;
6706         uint16_t j, num;
6707
6708         /*
6709          * If both VMDQ and RSS enabled, not all of PF queues are configured.
6710          * It's necessary to calulate the actual PF queues that are configured.
6711          */
6712         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
6713                 num = i40e_pf_calc_configured_queues_num(pf);
6714         else
6715                 num = pf->dev_data->nb_rx_queues;
6716
6717         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
6718         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
6719                         num);
6720
6721         if (num == 0) {
6722                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
6723                 return -ENOTSUP;
6724         }
6725
6726         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
6727                 if (j == num)
6728                         j = 0;
6729                 lut = (lut << 8) | (j & ((0x1 <<
6730                         hw->func_caps.rss_table_entry_width) - 1));
6731                 if ((i & 3) == 3)
6732                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
6733         }
6734
6735         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
6736         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
6737                 i40e_pf_disable_rss(pf);
6738                 return 0;
6739         }
6740         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
6741                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
6742                 /* Random default keys */
6743                 static uint32_t rss_key_default[] = {0x6b793944,
6744                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
6745                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
6746                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
6747
6748                 rss_conf.rss_key = (uint8_t *)rss_key_default;
6749                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6750                                                         sizeof(uint32_t);
6751         }
6752
6753         return i40e_hw_rss_hash_set(pf, &rss_conf);
6754 }
6755
6756 static int
6757 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
6758                                struct rte_eth_tunnel_filter_conf *filter)
6759 {
6760         if (pf == NULL || filter == NULL) {
6761                 PMD_DRV_LOG(ERR, "Invalid parameter");
6762                 return -EINVAL;
6763         }
6764
6765         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
6766                 PMD_DRV_LOG(ERR, "Invalid queue ID");
6767                 return -EINVAL;
6768         }
6769
6770         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
6771                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
6772                 return -EINVAL;
6773         }
6774
6775         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
6776                 (is_zero_ether_addr(&filter->outer_mac))) {
6777                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
6778                 return -EINVAL;
6779         }
6780
6781         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
6782                 (is_zero_ether_addr(&filter->inner_mac))) {
6783                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
6784                 return -EINVAL;
6785         }
6786
6787         return 0;
6788 }
6789
6790 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
6791 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
6792 static int
6793 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
6794 {
6795         uint32_t val, reg;
6796         int ret = -EINVAL;
6797
6798         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
6799         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
6800
6801         if (len == 3) {
6802                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
6803         } else if (len == 4) {
6804                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
6805         } else {
6806                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
6807                 return ret;
6808         }
6809
6810         if (reg != val) {
6811                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
6812                                                    reg, NULL);
6813                 if (ret != 0)
6814                         return ret;
6815         } else {
6816                 ret = 0;
6817         }
6818         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
6819                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
6820
6821         return ret;
6822 }
6823
6824 static int
6825 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
6826 {
6827         int ret = -EINVAL;
6828
6829         if (!hw || !cfg)
6830                 return -EINVAL;
6831
6832         switch (cfg->cfg_type) {
6833         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
6834                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
6835                 break;
6836         default:
6837                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
6838                 break;
6839         }
6840
6841         return ret;
6842 }
6843
6844 static int
6845 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
6846                                enum rte_filter_op filter_op,
6847                                void *arg)
6848 {
6849         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6850         int ret = I40E_ERR_PARAM;
6851
6852         switch (filter_op) {
6853         case RTE_ETH_FILTER_SET:
6854                 ret = i40e_dev_global_config_set(hw,
6855                         (struct rte_eth_global_cfg *)arg);
6856                 break;
6857         default:
6858                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6859                 break;
6860         }
6861
6862         return ret;
6863 }
6864
6865 static int
6866 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
6867                           enum rte_filter_op filter_op,
6868                           void *arg)
6869 {
6870         struct rte_eth_tunnel_filter_conf *filter;
6871         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6872         int ret = I40E_SUCCESS;
6873
6874         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
6875
6876         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
6877                 return I40E_ERR_PARAM;
6878
6879         switch (filter_op) {
6880         case RTE_ETH_FILTER_NOP:
6881                 if (!(pf->flags & I40E_FLAG_VXLAN))
6882                         ret = I40E_NOT_SUPPORTED;
6883                 break;
6884         case RTE_ETH_FILTER_ADD:
6885                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
6886                 break;
6887         case RTE_ETH_FILTER_DELETE:
6888                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
6889                 break;
6890         default:
6891                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6892                 ret = I40E_ERR_PARAM;
6893                 break;
6894         }
6895
6896         return ret;
6897 }
6898
6899 static int
6900 i40e_pf_config_mq_rx(struct i40e_pf *pf)
6901 {
6902         int ret = 0;
6903         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
6904
6905         /* RSS setup */
6906         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
6907                 ret = i40e_pf_config_rss(pf);
6908         else
6909                 i40e_pf_disable_rss(pf);
6910
6911         return ret;
6912 }
6913
6914 /* Get the symmetric hash enable configurations per port */
6915 static void
6916 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
6917 {
6918         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
6919
6920         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
6921 }
6922
6923 /* Set the symmetric hash enable configurations per port */
6924 static void
6925 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
6926 {
6927         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
6928
6929         if (enable > 0) {
6930                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
6931                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
6932                                                         "been enabled");
6933                         return;
6934                 }
6935                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6936         } else {
6937                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
6938                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
6939                                                         "been disabled");
6940                         return;
6941                 }
6942                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6943         }
6944         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
6945         I40E_WRITE_FLUSH(hw);
6946 }
6947
6948 /*
6949  * Get global configurations of hash function type and symmetric hash enable
6950  * per flow type (pctype). Note that global configuration means it affects all
6951  * the ports on the same NIC.
6952  */
6953 static int
6954 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
6955                                    struct rte_eth_hash_global_conf *g_cfg)
6956 {
6957         uint32_t reg, mask = I40E_FLOW_TYPES;
6958         uint16_t i;
6959         enum i40e_filter_pctype pctype;
6960
6961         memset(g_cfg, 0, sizeof(*g_cfg));
6962         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
6963         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
6964                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
6965         else
6966                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
6967         PMD_DRV_LOG(DEBUG, "Hash function is %s",
6968                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
6969
6970         for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
6971                 if (!(mask & (1UL << i)))
6972                         continue;
6973                 mask &= ~(1UL << i);
6974                 /* Bit set indicats the coresponding flow type is supported */
6975                 g_cfg->valid_bit_mask[0] |= (1UL << i);
6976                 /* if flowtype is invalid, continue */
6977                 if (!I40E_VALID_FLOW(i))
6978                         continue;
6979                 pctype = i40e_flowtype_to_pctype(i);
6980                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
6981                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
6982                         g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
6983         }
6984
6985         return 0;
6986 }
6987
6988 static int
6989 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
6990 {
6991         uint32_t i;
6992         uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
6993
6994         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
6995                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
6996                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
6997                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
6998                                                 g_cfg->hash_func);
6999                 return -EINVAL;
7000         }
7001
7002         /*
7003          * As i40e supports less than 32 flow types, only first 32 bits need to
7004          * be checked.
7005          */
7006         mask0 = g_cfg->valid_bit_mask[0];
7007         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
7008                 if (i == 0) {
7009                         /* Check if any unsupported flow type configured */
7010                         if ((mask0 | i40e_mask) ^ i40e_mask)
7011                                 goto mask_err;
7012                 } else {
7013                         if (g_cfg->valid_bit_mask[i])
7014                                 goto mask_err;
7015                 }
7016         }
7017
7018         return 0;
7019
7020 mask_err:
7021         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
7022
7023         return -EINVAL;
7024 }
7025
7026 /*
7027  * Set global configurations of hash function type and symmetric hash enable
7028  * per flow type (pctype). Note any modifying global configuration will affect
7029  * all the ports on the same NIC.
7030  */
7031 static int
7032 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
7033                                    struct rte_eth_hash_global_conf *g_cfg)
7034 {
7035         int ret;
7036         uint16_t i;
7037         uint32_t reg;
7038         uint32_t mask0 = g_cfg->valid_bit_mask[0];
7039         enum i40e_filter_pctype pctype;
7040
7041         /* Check the input parameters */
7042         ret = i40e_hash_global_config_check(g_cfg);
7043         if (ret < 0)
7044                 return ret;
7045
7046         for (i = 0; mask0 && i < UINT32_BIT; i++) {
7047                 if (!(mask0 & (1UL << i)))
7048                         continue;
7049                 mask0 &= ~(1UL << i);
7050                 /* if flowtype is invalid, continue */
7051                 if (!I40E_VALID_FLOW(i))
7052                         continue;
7053                 pctype = i40e_flowtype_to_pctype(i);
7054                 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
7055                                 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
7056                 if (hw->mac.type == I40E_MAC_X722) {
7057                         if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
7058                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7059                                   I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
7060                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7061                                   I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
7062                                   reg);
7063                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7064                                   I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
7065                                   reg);
7066                         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
7067                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7068                                   I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
7069                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7070                                   I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
7071                                   reg);
7072                         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
7073                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7074                                   I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
7075                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7076                                   I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
7077                                   reg);
7078                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7079                                   I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
7080                                   reg);
7081                         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
7082                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7083                                   I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
7084                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7085                                   I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
7086                                   reg);
7087                         } else {
7088                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
7089                                   reg);
7090                         }
7091                 } else {
7092                         i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
7093                 }
7094         }
7095
7096         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
7097         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
7098                 /* Toeplitz */
7099                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
7100                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
7101                                                                 "Toeplitz");
7102                         goto out;
7103                 }
7104                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
7105         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
7106                 /* Simple XOR */
7107                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
7108                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
7109                                                         "Simple XOR");
7110                         goto out;
7111                 }
7112                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
7113         } else
7114                 /* Use the default, and keep it as it is */
7115                 goto out;
7116
7117         i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
7118
7119 out:
7120         I40E_WRITE_FLUSH(hw);
7121
7122         return 0;
7123 }
7124
7125 /**
7126  * Valid input sets for hash and flow director filters per PCTYPE
7127  */
7128 static uint64_t
7129 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
7130                 enum rte_filter_type filter)
7131 {
7132         uint64_t valid;
7133
7134         static const uint64_t valid_hash_inset_table[] = {
7135                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
7136                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7137                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7138                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
7139                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
7140                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7141                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7142                         I40E_INSET_FLEX_PAYLOAD,
7143                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7144                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7145                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7146                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7147                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7148                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7149                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7150                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7151                         I40E_INSET_FLEX_PAYLOAD,
7152 #ifdef X722_SUPPORT
7153                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7154                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7155                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7156                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7157                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7158                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7159                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7160                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7161                         I40E_INSET_FLEX_PAYLOAD,
7162                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7163                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7164                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7165                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7166                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7167                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7168                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7169                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7170                         I40E_INSET_FLEX_PAYLOAD,
7171 #endif
7172                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7173                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7174                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7175                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7176                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7177                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7178                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7179                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7180                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
7181 #ifdef X722_SUPPORT
7182                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7183                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7184                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7185                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7186                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7187                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7188                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7189                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7190                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
7191 #endif
7192                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7193                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7194                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7195                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7196                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7197                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7198                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7199                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7200                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
7201                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7202                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7203                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7204                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7205                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7206                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7207                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7208                         I40E_INSET_FLEX_PAYLOAD,
7209                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
7210                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7211                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7212                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7213                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7214                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
7215                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
7216                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
7217                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7218                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7219                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7220                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7221                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7222                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7223                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7224                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
7225 #ifdef X722_SUPPORT
7226                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
7227                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7228                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7229                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7230                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7231                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7232                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7233                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7234                         I40E_INSET_FLEX_PAYLOAD,
7235                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
7236                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7237                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7238                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7239                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7240                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7241                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7242                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7243                         I40E_INSET_FLEX_PAYLOAD,
7244 #endif
7245                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7246                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7247                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7248                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7249                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7250                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7251                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7252                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7253                         I40E_INSET_FLEX_PAYLOAD,
7254 #ifdef X722_SUPPORT
7255                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
7256                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7257                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7258                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7259                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7260                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7261                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7262                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7263                         I40E_INSET_FLEX_PAYLOAD,
7264 #endif
7265                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7266                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7267                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7268                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7269                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7270                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7271                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7272                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
7273                         I40E_INSET_FLEX_PAYLOAD,
7274                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7275                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7276                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7277                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7278                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7279                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7280                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
7281                         I40E_INSET_FLEX_PAYLOAD,
7282                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
7283                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7284                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7285                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
7286                         I40E_INSET_FLEX_PAYLOAD,
7287         };
7288
7289         /**
7290          * Flow director supports only fields defined in
7291          * union rte_eth_fdir_flow.
7292          */
7293         static const uint64_t valid_fdir_inset_table[] = {
7294                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
7295                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7296                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7297                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
7298                 I40E_INSET_IPV4_TTL,
7299                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7300                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7301                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7302                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7303                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7304 #ifdef X722_SUPPORT
7305                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7306                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7307                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7308                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7309                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7310                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7311                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7312                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7313                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7314                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7315 #endif
7316                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7317                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7318                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7319                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7320                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7321 #ifdef X722_SUPPORT
7322                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7323                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7324                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7325                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7326                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7327 #endif
7328                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7329                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7330                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7331                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7332                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7333                 I40E_INSET_SCTP_VT,
7334                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7335                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7336                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7337                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
7338                 I40E_INSET_IPV4_TTL,
7339                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
7340                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7341                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7342                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
7343                 I40E_INSET_IPV6_HOP_LIMIT,
7344                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7345                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7346                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7347                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7348                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7349 #ifdef X722_SUPPORT
7350                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
7351                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7352                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7353                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7354                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7355                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
7356                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7357                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7358                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7359                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7360 #endif
7361                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7362                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7363                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7364                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7365                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7366 #ifdef X722_SUPPORT
7367                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
7368                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7369                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7370                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7371                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7372 #endif
7373                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7374                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7375                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7376                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7377                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7378                 I40E_INSET_SCTP_VT,
7379                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7380                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7381                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7382                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
7383                 I40E_INSET_IPV6_HOP_LIMIT,
7384                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
7385                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7386                 I40E_INSET_LAST_ETHER_TYPE,
7387         };
7388
7389         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
7390                 return 0;
7391         if (filter == RTE_ETH_FILTER_HASH)
7392                 valid = valid_hash_inset_table[pctype];
7393         else
7394                 valid = valid_fdir_inset_table[pctype];
7395
7396         return valid;
7397 }
7398
7399 /**
7400  * Validate if the input set is allowed for a specific PCTYPE
7401  */
7402 static int
7403 i40e_validate_input_set(enum i40e_filter_pctype pctype,
7404                 enum rte_filter_type filter, uint64_t inset)
7405 {
7406         uint64_t valid;
7407
7408         valid = i40e_get_valid_input_set(pctype, filter);
7409         if (inset & (~valid))
7410                 return -EINVAL;
7411
7412         return 0;
7413 }
7414
7415 /* default input set fields combination per pctype */
7416 static uint64_t
7417 i40e_get_default_input_set(uint16_t pctype)
7418 {
7419         static const uint64_t default_inset_table[] = {
7420                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
7421                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
7422                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7423                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7424                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7425 #ifdef X722_SUPPORT
7426                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7427                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7428                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7429                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7430                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7431                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7432 #endif
7433                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7434                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7435                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7436 #ifdef X722_SUPPORT
7437                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7438                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7439                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7440 #endif
7441                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7442                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7443                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7444                         I40E_INSET_SCTP_VT,
7445                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7446                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
7447                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
7448                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
7449                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7450                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7451                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7452 #ifdef X722_SUPPORT
7453                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
7454                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7455                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7456                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
7457                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7458                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7459 #endif
7460                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7461                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7462                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7463 #ifdef X722_SUPPORT
7464                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
7465                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7466                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7467 #endif
7468                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7469                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7470                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7471                         I40E_INSET_SCTP_VT,
7472                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7473                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
7474                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
7475                         I40E_INSET_LAST_ETHER_TYPE,
7476         };
7477
7478         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
7479                 return 0;
7480
7481         return default_inset_table[pctype];
7482 }
7483
7484 /**
7485  * Parse the input set from index to logical bit masks
7486  */
7487 static int
7488 i40e_parse_input_set(uint64_t *inset,
7489                      enum i40e_filter_pctype pctype,
7490                      enum rte_eth_input_set_field *field,
7491                      uint16_t size)
7492 {
7493         uint16_t i, j;
7494         int ret = -EINVAL;
7495
7496         static const struct {
7497                 enum rte_eth_input_set_field field;
7498                 uint64_t inset;
7499         } inset_convert_table[] = {
7500                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
7501                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
7502                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
7503                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
7504                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
7505                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
7506                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
7507                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
7508                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
7509                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
7510                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
7511                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
7512                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
7513                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
7514                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
7515                         I40E_INSET_IPV6_NEXT_HDR},
7516                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
7517                         I40E_INSET_IPV6_HOP_LIMIT},
7518                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
7519                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
7520                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
7521                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
7522                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
7523                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
7524                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
7525                         I40E_INSET_SCTP_VT},
7526                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
7527                         I40E_INSET_TUNNEL_DMAC},
7528                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
7529                         I40E_INSET_VLAN_TUNNEL},
7530                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
7531                         I40E_INSET_TUNNEL_ID},
7532                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
7533                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
7534                         I40E_INSET_FLEX_PAYLOAD_W1},
7535                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
7536                         I40E_INSET_FLEX_PAYLOAD_W2},
7537                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
7538                         I40E_INSET_FLEX_PAYLOAD_W3},
7539                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
7540                         I40E_INSET_FLEX_PAYLOAD_W4},
7541                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
7542                         I40E_INSET_FLEX_PAYLOAD_W5},
7543                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
7544                         I40E_INSET_FLEX_PAYLOAD_W6},
7545                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
7546                         I40E_INSET_FLEX_PAYLOAD_W7},
7547                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
7548                         I40E_INSET_FLEX_PAYLOAD_W8},
7549         };
7550
7551         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
7552                 return ret;
7553
7554         /* Only one item allowed for default or all */
7555         if (size == 1) {
7556                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
7557                         *inset = i40e_get_default_input_set(pctype);
7558                         return 0;
7559                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
7560                         *inset = I40E_INSET_NONE;
7561                         return 0;
7562                 }
7563         }
7564
7565         for (i = 0, *inset = 0; i < size; i++) {
7566                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
7567                         if (field[i] == inset_convert_table[j].field) {
7568                                 *inset |= inset_convert_table[j].inset;
7569                                 break;
7570                         }
7571                 }
7572
7573                 /* It contains unsupported input set, return immediately */
7574                 if (j == RTE_DIM(inset_convert_table))
7575                         return ret;
7576         }
7577
7578         return 0;
7579 }
7580
7581 /**
7582  * Translate the input set from bit masks to register aware bit masks
7583  * and vice versa
7584  */
7585 static uint64_t
7586 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
7587 {
7588         uint64_t val = 0;
7589         uint16_t i;
7590
7591         struct inset_map {
7592                 uint64_t inset;
7593                 uint64_t inset_reg;
7594         };
7595
7596         static const struct inset_map inset_map_common[] = {
7597                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
7598                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
7599                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
7600                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
7601                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
7602                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
7603                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
7604                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
7605                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
7606                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
7607                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
7608                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
7609                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
7610                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
7611                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
7612                 {I40E_INSET_TUNNEL_DMAC,
7613                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
7614                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
7615                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
7616                 {I40E_INSET_TUNNEL_SRC_PORT,
7617                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
7618                 {I40E_INSET_TUNNEL_DST_PORT,
7619                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
7620                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
7621                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
7622                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
7623                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
7624                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
7625                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
7626                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
7627                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
7628                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
7629         };
7630
7631     /* some different registers map in x722*/
7632         static const struct inset_map inset_map_diff_x722[] = {
7633                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
7634                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
7635                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
7636                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
7637         };
7638
7639         static const struct inset_map inset_map_diff_not_x722[] = {
7640                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
7641                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
7642                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
7643                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
7644         };
7645
7646         if (input == 0)
7647                 return val;
7648
7649         /* Translate input set to register aware inset */
7650         if (type == I40E_MAC_X722) {
7651                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
7652                         if (input & inset_map_diff_x722[i].inset)
7653                                 val |= inset_map_diff_x722[i].inset_reg;
7654                 }
7655         } else {
7656                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
7657                         if (input & inset_map_diff_not_x722[i].inset)
7658                                 val |= inset_map_diff_not_x722[i].inset_reg;
7659                 }
7660         }
7661
7662         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
7663                 if (input & inset_map_common[i].inset)
7664                         val |= inset_map_common[i].inset_reg;
7665         }
7666
7667         return val;
7668 }
7669
7670 static int
7671 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
7672 {
7673         uint8_t i, idx = 0;
7674         uint64_t inset_need_mask = inset;
7675
7676         static const struct {
7677                 uint64_t inset;
7678                 uint32_t mask;
7679         } inset_mask_map[] = {
7680                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
7681                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
7682                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
7683                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
7684                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
7685                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
7686                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
7687                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
7688         };
7689
7690         if (!inset || !mask || !nb_elem)
7691                 return 0;
7692
7693         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
7694                 /* Clear the inset bit, if no MASK is required,
7695                  * for example proto + ttl
7696                  */
7697                 if ((inset & inset_mask_map[i].inset) ==
7698                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
7699                         inset_need_mask &= ~inset_mask_map[i].inset;
7700                 if (!inset_need_mask)
7701                         return 0;
7702         }
7703         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
7704                 if ((inset_need_mask & inset_mask_map[i].inset) ==
7705                     inset_mask_map[i].inset) {
7706                         if (idx >= nb_elem) {
7707                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
7708                                 return -EINVAL;
7709                         }
7710                         mask[idx] = inset_mask_map[i].mask;
7711                         idx++;
7712                 }
7713         }
7714
7715         return idx;
7716 }
7717
7718 static void
7719 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
7720 {
7721         uint32_t reg = i40e_read_rx_ctl(hw, addr);
7722
7723         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
7724         if (reg != val)
7725                 i40e_write_rx_ctl(hw, addr, val);
7726         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
7727                     (uint32_t)i40e_read_rx_ctl(hw, addr));
7728 }
7729
7730 static void
7731 i40e_filter_input_set_init(struct i40e_pf *pf)
7732 {
7733         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7734         enum i40e_filter_pctype pctype;
7735         uint64_t input_set, inset_reg;
7736         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
7737         int num, i;
7738
7739         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
7740              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
7741                 if (hw->mac.type == I40E_MAC_X722) {
7742                         if (!I40E_VALID_PCTYPE_X722(pctype))
7743                                 continue;
7744                 } else {
7745                         if (!I40E_VALID_PCTYPE(pctype))
7746                                 continue;
7747                 }
7748
7749                 input_set = i40e_get_default_input_set(pctype);
7750
7751                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7752                                                    I40E_INSET_MASK_NUM_REG);
7753                 if (num < 0)
7754                         return;
7755                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
7756                                         input_set);
7757
7758                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
7759                                       (uint32_t)(inset_reg & UINT32_MAX));
7760                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
7761                                      (uint32_t)((inset_reg >>
7762                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
7763                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
7764                                       (uint32_t)(inset_reg & UINT32_MAX));
7765                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
7766                                      (uint32_t)((inset_reg >>
7767                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
7768
7769                 for (i = 0; i < num; i++) {
7770                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7771                                              mask_reg[i]);
7772                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7773                                              mask_reg[i]);
7774                 }
7775                 /*clear unused mask registers of the pctype */
7776                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
7777                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7778                                              0);
7779                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7780                                              0);
7781                 }
7782                 I40E_WRITE_FLUSH(hw);
7783
7784                 /* store the default input set */
7785                 pf->hash_input_set[pctype] = input_set;
7786                 pf->fdir.input_set[pctype] = input_set;
7787         }
7788 }
7789
7790 int
7791 i40e_hash_filter_inset_select(struct i40e_hw *hw,
7792                          struct rte_eth_input_set_conf *conf)
7793 {
7794         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
7795         enum i40e_filter_pctype pctype;
7796         uint64_t input_set, inset_reg = 0;
7797         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
7798         int ret, i, num;
7799
7800         if (!conf) {
7801                 PMD_DRV_LOG(ERR, "Invalid pointer");
7802                 return -EFAULT;
7803         }
7804         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
7805             conf->op != RTE_ETH_INPUT_SET_ADD) {
7806                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
7807                 return -EINVAL;
7808         }
7809
7810         if (!I40E_VALID_FLOW(conf->flow_type)) {
7811                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
7812                 return -EINVAL;
7813         }
7814
7815         if (hw->mac.type == I40E_MAC_X722) {
7816                 /* get translated pctype value in fd pctype register */
7817                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
7818                         I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype(
7819                         conf->flow_type)));
7820         } else
7821                 pctype = i40e_flowtype_to_pctype(conf->flow_type);
7822
7823         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
7824                                    conf->inset_size);
7825         if (ret) {
7826                 PMD_DRV_LOG(ERR, "Failed to parse input set");
7827                 return -EINVAL;
7828         }
7829         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH,
7830                                     input_set) != 0) {
7831                 PMD_DRV_LOG(ERR, "Invalid input set");
7832                 return -EINVAL;
7833         }
7834         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
7835                 /* get inset value in register */
7836                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
7837                 inset_reg <<= I40E_32_BIT_WIDTH;
7838                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
7839                 input_set |= pf->hash_input_set[pctype];
7840         }
7841         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7842                                            I40E_INSET_MASK_NUM_REG);
7843         if (num < 0)
7844                 return -EINVAL;
7845
7846         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
7847
7848         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
7849                               (uint32_t)(inset_reg & UINT32_MAX));
7850         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
7851                              (uint32_t)((inset_reg >>
7852                              I40E_32_BIT_WIDTH) & UINT32_MAX));
7853
7854         for (i = 0; i < num; i++)
7855                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7856                                      mask_reg[i]);
7857         /*clear unused mask registers of the pctype */
7858         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
7859                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7860                                      0);
7861         I40E_WRITE_FLUSH(hw);
7862
7863         pf->hash_input_set[pctype] = input_set;
7864         return 0;
7865 }
7866
7867 int
7868 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
7869                          struct rte_eth_input_set_conf *conf)
7870 {
7871         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7872         enum i40e_filter_pctype pctype;
7873         uint64_t input_set, inset_reg = 0;
7874         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
7875         int ret, i, num;
7876
7877         if (!hw || !conf) {
7878                 PMD_DRV_LOG(ERR, "Invalid pointer");
7879                 return -EFAULT;
7880         }
7881         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
7882             conf->op != RTE_ETH_INPUT_SET_ADD) {
7883                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
7884                 return -EINVAL;
7885         }
7886
7887         if (!I40E_VALID_FLOW(conf->flow_type)) {
7888                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
7889                 return -EINVAL;
7890         }
7891
7892         pctype = i40e_flowtype_to_pctype(conf->flow_type);
7893
7894         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
7895                                    conf->inset_size);
7896         if (ret) {
7897                 PMD_DRV_LOG(ERR, "Failed to parse input set");
7898                 return -EINVAL;
7899         }
7900         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
7901                                     input_set) != 0) {
7902                 PMD_DRV_LOG(ERR, "Invalid input set");
7903                 return -EINVAL;
7904         }
7905
7906         /* get inset value in register */
7907         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
7908         inset_reg <<= I40E_32_BIT_WIDTH;
7909         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
7910
7911         /* Can not change the inset reg for flex payload for fdir,
7912          * it is done by writing I40E_PRTQF_FD_FLXINSET
7913          * in i40e_set_flex_mask_on_pctype.
7914          */
7915         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
7916                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
7917         else
7918                 input_set |= pf->fdir.input_set[pctype];
7919         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7920                                            I40E_INSET_MASK_NUM_REG);
7921         if (num < 0)
7922                 return -EINVAL;
7923
7924         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
7925
7926         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
7927                               (uint32_t)(inset_reg & UINT32_MAX));
7928         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
7929                              (uint32_t)((inset_reg >>
7930                              I40E_32_BIT_WIDTH) & UINT32_MAX));
7931
7932         for (i = 0; i < num; i++)
7933                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7934                                      mask_reg[i]);
7935         /*clear unused mask registers of the pctype */
7936         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
7937                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7938                                      0);
7939         I40E_WRITE_FLUSH(hw);
7940
7941         pf->fdir.input_set[pctype] = input_set;
7942         return 0;
7943 }
7944
7945 static int
7946 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7947 {
7948         int ret = 0;
7949
7950         if (!hw || !info) {
7951                 PMD_DRV_LOG(ERR, "Invalid pointer");
7952                 return -EFAULT;
7953         }
7954
7955         switch (info->info_type) {
7956         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7957                 i40e_get_symmetric_hash_enable_per_port(hw,
7958                                         &(info->info.enable));
7959                 break;
7960         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7961                 ret = i40e_get_hash_filter_global_config(hw,
7962                                 &(info->info.global_conf));
7963                 break;
7964         default:
7965                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7966                                                         info->info_type);
7967                 ret = -EINVAL;
7968                 break;
7969         }
7970
7971         return ret;
7972 }
7973
7974 static int
7975 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7976 {
7977         int ret = 0;
7978
7979         if (!hw || !info) {
7980                 PMD_DRV_LOG(ERR, "Invalid pointer");
7981                 return -EFAULT;
7982         }
7983
7984         switch (info->info_type) {
7985         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7986                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
7987                 break;
7988         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7989                 ret = i40e_set_hash_filter_global_config(hw,
7990                                 &(info->info.global_conf));
7991                 break;
7992         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
7993                 ret = i40e_hash_filter_inset_select(hw,
7994                                                &(info->info.input_set_conf));
7995                 break;
7996
7997         default:
7998                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7999                                                         info->info_type);
8000                 ret = -EINVAL;
8001                 break;
8002         }
8003
8004         return ret;
8005 }
8006
8007 /* Operations for hash function */
8008 static int
8009 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
8010                       enum rte_filter_op filter_op,
8011                       void *arg)
8012 {
8013         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8014         int ret = 0;
8015
8016         switch (filter_op) {
8017         case RTE_ETH_FILTER_NOP:
8018                 break;
8019         case RTE_ETH_FILTER_GET:
8020                 ret = i40e_hash_filter_get(hw,
8021                         (struct rte_eth_hash_filter_info *)arg);
8022                 break;
8023         case RTE_ETH_FILTER_SET:
8024                 ret = i40e_hash_filter_set(hw,
8025                         (struct rte_eth_hash_filter_info *)arg);
8026                 break;
8027         default:
8028                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
8029                                                                 filter_op);
8030                 ret = -ENOTSUP;
8031                 break;
8032         }
8033
8034         return ret;
8035 }
8036
8037 /*
8038  * Configure ethertype filter, which can director packet by filtering
8039  * with mac address and ether_type or only ether_type
8040  */
8041 static int
8042 i40e_ethertype_filter_set(struct i40e_pf *pf,
8043                         struct rte_eth_ethertype_filter *filter,
8044                         bool add)
8045 {
8046         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8047         struct i40e_control_filter_stats stats;
8048         uint16_t flags = 0;
8049         int ret;
8050
8051         if (filter->queue >= pf->dev_data->nb_rx_queues) {
8052                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8053                 return -EINVAL;
8054         }
8055         if (filter->ether_type == ETHER_TYPE_IPv4 ||
8056                 filter->ether_type == ETHER_TYPE_IPv6) {
8057                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
8058                         " control packet filter.", filter->ether_type);
8059                 return -EINVAL;
8060         }
8061         if (filter->ether_type == ETHER_TYPE_VLAN)
8062                 PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
8063                         " not supported.");
8064
8065         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
8066                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
8067         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
8068                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
8069         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
8070
8071         memset(&stats, 0, sizeof(stats));
8072         ret = i40e_aq_add_rem_control_packet_filter(hw,
8073                         filter->mac_addr.addr_bytes,
8074                         filter->ether_type, flags,
8075                         pf->main_vsi->seid,
8076                         filter->queue, add, &stats, NULL);
8077
8078         PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
8079                          " mac_etype_used = %u, etype_used = %u,"
8080                          " mac_etype_free = %u, etype_free = %u\n",
8081                          ret, stats.mac_etype_used, stats.etype_used,
8082                          stats.mac_etype_free, stats.etype_free);
8083         if (ret < 0)
8084                 return -ENOSYS;
8085         return 0;
8086 }
8087
8088 /*
8089  * Handle operations for ethertype filter.
8090  */
8091 static int
8092 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
8093                                 enum rte_filter_op filter_op,
8094                                 void *arg)
8095 {
8096         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8097         int ret = 0;
8098
8099         if (filter_op == RTE_ETH_FILTER_NOP)
8100                 return ret;
8101
8102         if (arg == NULL) {
8103                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
8104                             filter_op);
8105                 return -EINVAL;
8106         }
8107
8108         switch (filter_op) {
8109         case RTE_ETH_FILTER_ADD:
8110                 ret = i40e_ethertype_filter_set(pf,
8111                         (struct rte_eth_ethertype_filter *)arg,
8112                         TRUE);
8113                 break;
8114         case RTE_ETH_FILTER_DELETE:
8115                 ret = i40e_ethertype_filter_set(pf,
8116                         (struct rte_eth_ethertype_filter *)arg,
8117                         FALSE);
8118                 break;
8119         default:
8120                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
8121                 ret = -ENOSYS;
8122                 break;
8123         }
8124         return ret;
8125 }
8126
8127 static int
8128 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
8129                      enum rte_filter_type filter_type,
8130                      enum rte_filter_op filter_op,
8131                      void *arg)
8132 {
8133         int ret = 0;
8134
8135         if (dev == NULL)
8136                 return -EINVAL;
8137
8138         switch (filter_type) {
8139         case RTE_ETH_FILTER_NONE:
8140                 /* For global configuration */
8141                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
8142                 break;
8143         case RTE_ETH_FILTER_HASH:
8144                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
8145                 break;
8146         case RTE_ETH_FILTER_MACVLAN:
8147                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
8148                 break;
8149         case RTE_ETH_FILTER_ETHERTYPE:
8150                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
8151                 break;
8152         case RTE_ETH_FILTER_TUNNEL:
8153                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
8154                 break;
8155         case RTE_ETH_FILTER_FDIR:
8156                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
8157                 break;
8158         default:
8159                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
8160                                                         filter_type);
8161                 ret = -EINVAL;
8162                 break;
8163         }
8164
8165         return ret;
8166 }
8167
8168 /*
8169  * Check and enable Extended Tag.
8170  * Enabling Extended Tag is important for 40G performance.
8171  */
8172 static void
8173 i40e_enable_extended_tag(struct rte_eth_dev *dev)
8174 {
8175         uint32_t buf = 0;
8176         int ret;
8177
8178         ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
8179                                       PCI_DEV_CAP_REG);
8180         if (ret < 0) {
8181                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
8182                             PCI_DEV_CAP_REG);
8183                 return;
8184         }
8185         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
8186                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
8187                 return;
8188         }
8189
8190         buf = 0;
8191         ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
8192                                       PCI_DEV_CTRL_REG);
8193         if (ret < 0) {
8194                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
8195                             PCI_DEV_CTRL_REG);
8196                 return;
8197         }
8198         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
8199                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
8200                 return;
8201         }
8202         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
8203         ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
8204                                        PCI_DEV_CTRL_REG);
8205         if (ret < 0) {
8206                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
8207                             PCI_DEV_CTRL_REG);
8208                 return;
8209         }
8210 }
8211
8212 /*
8213  * As some registers wouldn't be reset unless a global hardware reset,
8214  * hardware initialization is needed to put those registers into an
8215  * expected initial state.
8216  */
8217 static void
8218 i40e_hw_init(struct rte_eth_dev *dev)
8219 {
8220         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8221
8222         i40e_enable_extended_tag(dev);
8223
8224         /* clear the PF Queue Filter control register */
8225         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
8226
8227         /* Disable symmetric hash per port */
8228         i40e_set_symmetric_hash_enable_per_port(hw, 0);
8229 }
8230
8231 enum i40e_filter_pctype
8232 i40e_flowtype_to_pctype(uint16_t flow_type)
8233 {
8234         static const enum i40e_filter_pctype pctype_table[] = {
8235                 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
8236                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
8237                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8238                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
8239                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8240                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
8241                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8242                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
8243                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8244                 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
8245                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
8246                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
8247                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
8248                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
8249                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
8250                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
8251                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
8252                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
8253                 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
8254         };
8255
8256         return pctype_table[flow_type];
8257 }
8258
8259 uint16_t
8260 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
8261 {
8262         static const uint16_t flowtype_table[] = {
8263                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
8264                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8265                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
8266 #ifdef X722_SUPPORT
8267                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8268                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
8269                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8270                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
8271 #endif
8272                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8273                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
8274 #ifdef X722_SUPPORT
8275                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8276                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
8277 #endif
8278                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8279                         RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
8280                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8281                         RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
8282                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
8283                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8284                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
8285 #ifdef X722_SUPPORT
8286                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8287                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
8288                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8289                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
8290 #endif
8291                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8292                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
8293 #ifdef X722_SUPPORT
8294                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8295                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
8296 #endif
8297                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8298                         RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
8299                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8300                         RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
8301                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
8302         };
8303
8304         return flowtype_table[pctype];
8305 }
8306
8307 /*
8308  * On X710, performance number is far from the expectation on recent firmware
8309  * versions; on XL710, performance number is also far from the expectation on
8310  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
8311  * mode is enabled and port MAC address is equal to the packet destination MAC
8312  * address. The fix for this issue may not be integrated in the following
8313  * firmware version. So the workaround in software driver is needed. It needs
8314  * to modify the initial values of 3 internal only registers for both X710 and
8315  * XL710. Note that the values for X710 or XL710 could be different, and the
8316  * workaround can be removed when it is fixed in firmware in the future.
8317  */
8318
8319 /* For both X710 and XL710 */
8320 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
8321 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
8322
8323 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
8324 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
8325
8326 /* For X722 */
8327 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
8328 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
8329
8330 /* For X710 */
8331 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
8332 /* For XL710 */
8333 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
8334 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
8335
8336 static int
8337 i40e_dev_sync_phy_type(struct i40e_hw *hw)
8338 {
8339         enum i40e_status_code status;
8340         struct i40e_aq_get_phy_abilities_resp phy_ab;
8341         int ret = -ENOTSUP;
8342
8343         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
8344                                               NULL);
8345
8346         if (status)
8347                 return ret;
8348
8349         return 0;
8350 }
8351
8352 static void
8353 i40e_configure_registers(struct i40e_hw *hw)
8354 {
8355         static struct {
8356                 uint32_t addr;
8357                 uint64_t val;
8358         } reg_table[] = {
8359                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
8360                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
8361                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
8362         };
8363         uint64_t reg;
8364         uint32_t i;
8365         int ret;
8366
8367         for (i = 0; i < RTE_DIM(reg_table); i++) {
8368                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
8369                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
8370                                 reg_table[i].val =
8371                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
8372                         else /* For X710/XL710/XXV710 */
8373                                 reg_table[i].val =
8374                                         I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE;
8375                 }
8376
8377                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
8378                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
8379                                 reg_table[i].val =
8380                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
8381                         else /* For X710/XL710/XXV710 */
8382                                 reg_table[i].val =
8383                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
8384                 }
8385
8386                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
8387                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
8388                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
8389                                 reg_table[i].val =
8390                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
8391                         else /* For X710 */
8392                                 reg_table[i].val =
8393                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
8394                 }
8395
8396                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
8397                                                         &reg, NULL);
8398                 if (ret < 0) {
8399                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
8400                                                         reg_table[i].addr);
8401                         break;
8402                 }
8403                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
8404                                                 reg_table[i].addr, reg);
8405                 if (reg == reg_table[i].val)
8406                         continue;
8407
8408                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
8409                                                 reg_table[i].val, NULL);
8410                 if (ret < 0) {
8411                         PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
8412                                 "address of 0x%"PRIx32, reg_table[i].val,
8413                                                         reg_table[i].addr);
8414                         break;
8415                 }
8416                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
8417                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
8418         }
8419 }
8420
8421 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
8422 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
8423 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
8424 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
8425 static int
8426 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
8427 {
8428         uint32_t reg;
8429         int ret;
8430
8431         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
8432                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
8433                 return -EINVAL;
8434         }
8435
8436         /* Configure for double VLAN RX stripping */
8437         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
8438         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
8439                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
8440                 ret = i40e_aq_debug_write_register(hw,
8441                                                    I40E_VSI_TSR(vsi->vsi_id),
8442                                                    reg, NULL);
8443                 if (ret < 0) {
8444                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
8445                                     vsi->vsi_id);
8446                         return I40E_ERR_CONFIG;
8447                 }
8448         }
8449
8450         /* Configure for double VLAN TX insertion */
8451         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
8452         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
8453                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
8454                 ret = i40e_aq_debug_write_register(hw,
8455                                                    I40E_VSI_L2TAGSTXVALID(
8456                                                    vsi->vsi_id), reg, NULL);
8457                 if (ret < 0) {
8458                         PMD_DRV_LOG(ERR, "Failed to update "
8459                                 "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
8460                         return I40E_ERR_CONFIG;
8461                 }
8462         }
8463
8464         return 0;
8465 }
8466
8467 /**
8468  * i40e_aq_add_mirror_rule
8469  * @hw: pointer to the hardware structure
8470  * @seid: VEB seid to add mirror rule to
8471  * @dst_id: destination vsi seid
8472  * @entries: Buffer which contains the entities to be mirrored
8473  * @count: number of entities contained in the buffer
8474  * @rule_id:the rule_id of the rule to be added
8475  *
8476  * Add a mirror rule for a given veb.
8477  *
8478  **/
8479 static enum i40e_status_code
8480 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
8481                         uint16_t seid, uint16_t dst_id,
8482                         uint16_t rule_type, uint16_t *entries,
8483                         uint16_t count, uint16_t *rule_id)
8484 {
8485         struct i40e_aq_desc desc;
8486         struct i40e_aqc_add_delete_mirror_rule cmd;
8487         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
8488                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
8489                 &desc.params.raw;
8490         uint16_t buff_len;
8491         enum i40e_status_code status;
8492
8493         i40e_fill_default_direct_cmd_desc(&desc,
8494                                           i40e_aqc_opc_add_mirror_rule);
8495         memset(&cmd, 0, sizeof(cmd));
8496
8497         buff_len = sizeof(uint16_t) * count;
8498         desc.datalen = rte_cpu_to_le_16(buff_len);
8499         if (buff_len > 0)
8500                 desc.flags |= rte_cpu_to_le_16(
8501                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
8502         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
8503                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
8504         cmd.num_entries = rte_cpu_to_le_16(count);
8505         cmd.seid = rte_cpu_to_le_16(seid);
8506         cmd.destination = rte_cpu_to_le_16(dst_id);
8507
8508         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
8509         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
8510         PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
8511                          "rule_id = %u"
8512                          " mirror_rules_used = %u, mirror_rules_free = %u,",
8513                          hw->aq.asq_last_status, resp->rule_id,
8514                          resp->mirror_rules_used, resp->mirror_rules_free);
8515         *rule_id = rte_le_to_cpu_16(resp->rule_id);
8516
8517         return status;
8518 }
8519
8520 /**
8521  * i40e_aq_del_mirror_rule
8522  * @hw: pointer to the hardware structure
8523  * @seid: VEB seid to add mirror rule to
8524  * @entries: Buffer which contains the entities to be mirrored
8525  * @count: number of entities contained in the buffer
8526  * @rule_id:the rule_id of the rule to be delete
8527  *
8528  * Delete a mirror rule for a given veb.
8529  *
8530  **/
8531 static enum i40e_status_code
8532 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
8533                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
8534                 uint16_t count, uint16_t rule_id)
8535 {
8536         struct i40e_aq_desc desc;
8537         struct i40e_aqc_add_delete_mirror_rule cmd;
8538         uint16_t buff_len = 0;
8539         enum i40e_status_code status;
8540         void *buff = NULL;
8541
8542         i40e_fill_default_direct_cmd_desc(&desc,
8543                                           i40e_aqc_opc_delete_mirror_rule);
8544         memset(&cmd, 0, sizeof(cmd));
8545         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
8546                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
8547                                                           I40E_AQ_FLAG_RD));
8548                 cmd.num_entries = count;
8549                 buff_len = sizeof(uint16_t) * count;
8550                 desc.datalen = rte_cpu_to_le_16(buff_len);
8551                 buff = (void *)entries;
8552         } else
8553                 /* rule id is filled in destination field for deleting mirror rule */
8554                 cmd.destination = rte_cpu_to_le_16(rule_id);
8555
8556         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
8557                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
8558         cmd.seid = rte_cpu_to_le_16(seid);
8559
8560         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
8561         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
8562
8563         return status;
8564 }
8565
8566 /**
8567  * i40e_mirror_rule_set
8568  * @dev: pointer to the hardware structure
8569  * @mirror_conf: mirror rule info
8570  * @sw_id: mirror rule's sw_id
8571  * @on: enable/disable
8572  *
8573  * set a mirror rule.
8574  *
8575  **/
8576 static int
8577 i40e_mirror_rule_set(struct rte_eth_dev *dev,
8578                         struct rte_eth_mirror_conf *mirror_conf,
8579                         uint8_t sw_id, uint8_t on)
8580 {
8581         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8582         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8583         struct i40e_mirror_rule *it, *mirr_rule = NULL;
8584         struct i40e_mirror_rule *parent = NULL;
8585         uint16_t seid, dst_seid, rule_id;
8586         uint16_t i, j = 0;
8587         int ret;
8588
8589         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
8590
8591         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
8592                 PMD_DRV_LOG(ERR, "mirror rule can not be configured"
8593                         " without veb or vfs.");
8594                 return -ENOSYS;
8595         }
8596         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
8597                 PMD_DRV_LOG(ERR, "mirror table is full.");
8598                 return -ENOSPC;
8599         }
8600         if (mirror_conf->dst_pool > pf->vf_num) {
8601                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
8602                                  mirror_conf->dst_pool);
8603                 return -EINVAL;
8604         }
8605
8606         seid = pf->main_vsi->veb->seid;
8607
8608         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
8609                 if (sw_id <= it->index) {
8610                         mirr_rule = it;
8611                         break;
8612                 }
8613                 parent = it;
8614         }
8615         if (mirr_rule && sw_id == mirr_rule->index) {
8616                 if (on) {
8617                         PMD_DRV_LOG(ERR, "mirror rule exists.");
8618                         return -EEXIST;
8619                 } else {
8620                         ret = i40e_aq_del_mirror_rule(hw, seid,
8621                                         mirr_rule->rule_type,
8622                                         mirr_rule->entries,
8623                                         mirr_rule->num_entries, mirr_rule->id);
8624                         if (ret < 0) {
8625                                 PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
8626                                                    " ret = %d, aq_err = %d.",
8627                                                    ret, hw->aq.asq_last_status);
8628                                 return -ENOSYS;
8629                         }
8630                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
8631                         rte_free(mirr_rule);
8632                         pf->nb_mirror_rule--;
8633                         return 0;
8634                 }
8635         } else if (!on) {
8636                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
8637                 return -ENOENT;
8638         }
8639
8640         mirr_rule = rte_zmalloc("i40e_mirror_rule",
8641                                 sizeof(struct i40e_mirror_rule) , 0);
8642         if (!mirr_rule) {
8643                 PMD_DRV_LOG(ERR, "failed to allocate memory");
8644                 return I40E_ERR_NO_MEMORY;
8645         }
8646         switch (mirror_conf->rule_type) {
8647         case ETH_MIRROR_VLAN:
8648                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
8649                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
8650                                 mirr_rule->entries[j] =
8651                                         mirror_conf->vlan.vlan_id[i];
8652                                 j++;
8653                         }
8654                 }
8655                 if (j == 0) {
8656                         PMD_DRV_LOG(ERR, "vlan is not specified.");
8657                         rte_free(mirr_rule);
8658                         return -EINVAL;
8659                 }
8660                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
8661                 break;
8662         case ETH_MIRROR_VIRTUAL_POOL_UP:
8663         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
8664                 /* check if the specified pool bit is out of range */
8665                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
8666                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
8667                         rte_free(mirr_rule);
8668                         return -EINVAL;
8669                 }
8670                 for (i = 0, j = 0; i < pf->vf_num; i++) {
8671                         if (mirror_conf->pool_mask & (1ULL << i)) {
8672                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
8673                                 j++;
8674                         }
8675                 }
8676                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
8677                         /* add pf vsi to entries */
8678                         mirr_rule->entries[j] = pf->main_vsi_seid;
8679                         j++;
8680                 }
8681                 if (j == 0) {
8682                         PMD_DRV_LOG(ERR, "pool is not specified.");
8683                         rte_free(mirr_rule);
8684                         return -EINVAL;
8685                 }
8686                 /* egress and ingress in aq commands means from switch but not port */
8687                 mirr_rule->rule_type =
8688                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
8689                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
8690                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
8691                 break;
8692         case ETH_MIRROR_UPLINK_PORT:
8693                 /* egress and ingress in aq commands means from switch but not port*/
8694                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
8695                 break;
8696         case ETH_MIRROR_DOWNLINK_PORT:
8697                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
8698                 break;
8699         default:
8700                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
8701                         mirror_conf->rule_type);
8702                 rte_free(mirr_rule);
8703                 return -EINVAL;
8704         }
8705
8706         /* If the dst_pool is equal to vf_num, consider it as PF */
8707         if (mirror_conf->dst_pool == pf->vf_num)
8708                 dst_seid = pf->main_vsi_seid;
8709         else
8710                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
8711
8712         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
8713                                       mirr_rule->rule_type, mirr_rule->entries,
8714                                       j, &rule_id);
8715         if (ret < 0) {
8716                 PMD_DRV_LOG(ERR, "failed to add mirror rule:"
8717                                    " ret = %d, aq_err = %d.",
8718                                    ret, hw->aq.asq_last_status);
8719                 rte_free(mirr_rule);
8720                 return -ENOSYS;
8721         }
8722
8723         mirr_rule->index = sw_id;
8724         mirr_rule->num_entries = j;
8725         mirr_rule->id = rule_id;
8726         mirr_rule->dst_vsi_seid = dst_seid;
8727
8728         if (parent)
8729                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
8730         else
8731                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
8732
8733         pf->nb_mirror_rule++;
8734         return 0;
8735 }
8736
8737 /**
8738  * i40e_mirror_rule_reset
8739  * @dev: pointer to the device
8740  * @sw_id: mirror rule's sw_id
8741  *
8742  * reset a mirror rule.
8743  *
8744  **/
8745 static int
8746 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
8747 {
8748         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8749         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8750         struct i40e_mirror_rule *it, *mirr_rule = NULL;
8751         uint16_t seid;
8752         int ret;
8753
8754         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
8755
8756         seid = pf->main_vsi->veb->seid;
8757
8758         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
8759                 if (sw_id == it->index) {
8760                         mirr_rule = it;
8761                         break;
8762                 }
8763         }
8764         if (mirr_rule) {
8765                 ret = i40e_aq_del_mirror_rule(hw, seid,
8766                                 mirr_rule->rule_type,
8767                                 mirr_rule->entries,
8768                                 mirr_rule->num_entries, mirr_rule->id);
8769                 if (ret < 0) {
8770                         PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
8771                                            " status = %d, aq_err = %d.",
8772                                            ret, hw->aq.asq_last_status);
8773                         return -ENOSYS;
8774                 }
8775                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
8776                 rte_free(mirr_rule);
8777                 pf->nb_mirror_rule--;
8778         } else {
8779                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
8780                 return -ENOENT;
8781         }
8782         return 0;
8783 }
8784
8785 static uint64_t
8786 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
8787 {
8788         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8789         uint64_t systim_cycles;
8790
8791         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
8792         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
8793                         << 32;
8794
8795         return systim_cycles;
8796 }
8797
8798 static uint64_t
8799 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
8800 {
8801         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8802         uint64_t rx_tstamp;
8803
8804         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
8805         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
8806                         << 32;
8807
8808         return rx_tstamp;
8809 }
8810
8811 static uint64_t
8812 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
8813 {
8814         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8815         uint64_t tx_tstamp;
8816
8817         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
8818         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
8819                         << 32;
8820
8821         return tx_tstamp;
8822 }
8823
8824 static void
8825 i40e_start_timecounters(struct rte_eth_dev *dev)
8826 {
8827         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8828         struct i40e_adapter *adapter =
8829                         (struct i40e_adapter *)dev->data->dev_private;
8830         struct rte_eth_link link;
8831         uint32_t tsync_inc_l;
8832         uint32_t tsync_inc_h;
8833
8834         /* Get current link speed. */
8835         memset(&link, 0, sizeof(link));
8836         i40e_dev_link_update(dev, 1);
8837         rte_i40e_dev_atomic_read_link_status(dev, &link);
8838
8839         switch (link.link_speed) {
8840         case ETH_SPEED_NUM_40G:
8841                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
8842                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
8843                 break;
8844         case ETH_SPEED_NUM_10G:
8845                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
8846                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
8847                 break;
8848         case ETH_SPEED_NUM_1G:
8849                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
8850                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
8851                 break;
8852         default:
8853                 tsync_inc_l = 0x0;
8854                 tsync_inc_h = 0x0;
8855         }
8856
8857         /* Set the timesync increment value. */
8858         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
8859         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
8860
8861         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
8862         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
8863         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
8864
8865         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8866         adapter->systime_tc.cc_shift = 0;
8867         adapter->systime_tc.nsec_mask = 0;
8868
8869         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8870         adapter->rx_tstamp_tc.cc_shift = 0;
8871         adapter->rx_tstamp_tc.nsec_mask = 0;
8872
8873         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8874         adapter->tx_tstamp_tc.cc_shift = 0;
8875         adapter->tx_tstamp_tc.nsec_mask = 0;
8876 }
8877
8878 static int
8879 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
8880 {
8881         struct i40e_adapter *adapter =
8882                         (struct i40e_adapter *)dev->data->dev_private;
8883
8884         adapter->systime_tc.nsec += delta;
8885         adapter->rx_tstamp_tc.nsec += delta;
8886         adapter->tx_tstamp_tc.nsec += delta;
8887
8888         return 0;
8889 }
8890
8891 static int
8892 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
8893 {
8894         uint64_t ns;
8895         struct i40e_adapter *adapter =
8896                         (struct i40e_adapter *)dev->data->dev_private;
8897
8898         ns = rte_timespec_to_ns(ts);
8899
8900         /* Set the timecounters to a new value. */
8901         adapter->systime_tc.nsec = ns;
8902         adapter->rx_tstamp_tc.nsec = ns;
8903         adapter->tx_tstamp_tc.nsec = ns;
8904
8905         return 0;
8906 }
8907
8908 static int
8909 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
8910 {
8911         uint64_t ns, systime_cycles;
8912         struct i40e_adapter *adapter =
8913                         (struct i40e_adapter *)dev->data->dev_private;
8914
8915         systime_cycles = i40e_read_systime_cyclecounter(dev);
8916         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
8917         *ts = rte_ns_to_timespec(ns);
8918
8919         return 0;
8920 }
8921
8922 static int
8923 i40e_timesync_enable(struct rte_eth_dev *dev)
8924 {
8925         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8926         uint32_t tsync_ctl_l;
8927         uint32_t tsync_ctl_h;
8928
8929         /* Stop the timesync system time. */
8930         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
8931         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
8932         /* Reset the timesync system time value. */
8933         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
8934         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
8935
8936         i40e_start_timecounters(dev);
8937
8938         /* Clear timesync registers. */
8939         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
8940         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
8941         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
8942         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
8943         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
8944         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
8945
8946         /* Enable timestamping of PTP packets. */
8947         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
8948         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
8949
8950         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
8951         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
8952         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
8953
8954         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
8955         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
8956
8957         return 0;
8958 }
8959
8960 static int
8961 i40e_timesync_disable(struct rte_eth_dev *dev)
8962 {
8963         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8964         uint32_t tsync_ctl_l;
8965         uint32_t tsync_ctl_h;
8966
8967         /* Disable timestamping of transmitted PTP packets. */
8968         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
8969         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
8970
8971         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
8972         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
8973
8974         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
8975         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
8976
8977         /* Reset the timesync increment value. */
8978         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
8979         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
8980
8981         return 0;
8982 }
8983
8984 static int
8985 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
8986                                 struct timespec *timestamp, uint32_t flags)
8987 {
8988         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8989         struct i40e_adapter *adapter =
8990                 (struct i40e_adapter *)dev->data->dev_private;
8991
8992         uint32_t sync_status;
8993         uint32_t index = flags & 0x03;
8994         uint64_t rx_tstamp_cycles;
8995         uint64_t ns;
8996
8997         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
8998         if ((sync_status & (1 << index)) == 0)
8999                 return -EINVAL;
9000
9001         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
9002         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
9003         *timestamp = rte_ns_to_timespec(ns);
9004
9005         return 0;
9006 }
9007
9008 static int
9009 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
9010                                 struct timespec *timestamp)
9011 {
9012         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9013         struct i40e_adapter *adapter =
9014                 (struct i40e_adapter *)dev->data->dev_private;
9015
9016         uint32_t sync_status;
9017         uint64_t tx_tstamp_cycles;
9018         uint64_t ns;
9019
9020         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
9021         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
9022                 return -EINVAL;
9023
9024         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
9025         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
9026         *timestamp = rte_ns_to_timespec(ns);
9027
9028         return 0;
9029 }
9030
9031 /*
9032  * i40e_parse_dcb_configure - parse dcb configure from user
9033  * @dev: the device being configured
9034  * @dcb_cfg: pointer of the result of parse
9035  * @*tc_map: bit map of enabled traffic classes
9036  *
9037  * Returns 0 on success, negative value on failure
9038  */
9039 static int
9040 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
9041                          struct i40e_dcbx_config *dcb_cfg,
9042                          uint8_t *tc_map)
9043 {
9044         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
9045         uint8_t i, tc_bw, bw_lf;
9046
9047         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
9048
9049         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
9050         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
9051                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
9052                 return -EINVAL;
9053         }
9054
9055         /* assume each tc has the same bw */
9056         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
9057         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
9058                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
9059         /* to ensure the sum of tcbw is equal to 100 */
9060         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
9061         for (i = 0; i < bw_lf; i++)
9062                 dcb_cfg->etscfg.tcbwtable[i]++;
9063
9064         /* assume each tc has the same Transmission Selection Algorithm */
9065         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
9066                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
9067
9068         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
9069                 dcb_cfg->etscfg.prioritytable[i] =
9070                                 dcb_rx_conf->dcb_tc[i];
9071
9072         /* FW needs one App to configure HW */
9073         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
9074         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
9075         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
9076         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
9077
9078         if (dcb_rx_conf->nb_tcs == 0)
9079                 *tc_map = 1; /* tc0 only */
9080         else
9081                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
9082
9083         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
9084                 dcb_cfg->pfc.willing = 0;
9085                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
9086                 dcb_cfg->pfc.pfcenable = *tc_map;
9087         }
9088         return 0;
9089 }
9090
9091
9092 static enum i40e_status_code
9093 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
9094                               struct i40e_aqc_vsi_properties_data *info,
9095                               uint8_t enabled_tcmap)
9096 {
9097         enum i40e_status_code ret;
9098         int i, total_tc = 0;
9099         uint16_t qpnum_per_tc, bsf, qp_idx;
9100         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
9101         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
9102         uint16_t used_queues;
9103
9104         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
9105         if (ret != I40E_SUCCESS)
9106                 return ret;
9107
9108         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9109                 if (enabled_tcmap & (1 << i))
9110                         total_tc++;
9111         }
9112         if (total_tc == 0)
9113                 total_tc = 1;
9114         vsi->enabled_tc = enabled_tcmap;
9115
9116         /* different VSI has different queues assigned */
9117         if (vsi->type == I40E_VSI_MAIN)
9118                 used_queues = dev_data->nb_rx_queues -
9119                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
9120         else if (vsi->type == I40E_VSI_VMDQ2)
9121                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
9122         else {
9123                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
9124                 return I40E_ERR_NO_AVAILABLE_VSI;
9125         }
9126
9127         qpnum_per_tc = used_queues / total_tc;
9128         /* Number of queues per enabled TC */
9129         if (qpnum_per_tc == 0) {
9130                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
9131                 return I40E_ERR_INVALID_QP_ID;
9132         }
9133         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
9134                                 I40E_MAX_Q_PER_TC);
9135         bsf = rte_bsf32(qpnum_per_tc);
9136
9137         /**
9138          * Configure TC and queue mapping parameters, for enabled TC,
9139          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
9140          * default queue will serve it.
9141          */
9142         qp_idx = 0;
9143         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9144                 if (vsi->enabled_tc & (1 << i)) {
9145                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
9146                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
9147                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
9148                         qp_idx += qpnum_per_tc;
9149                 } else
9150                         info->tc_mapping[i] = 0;
9151         }
9152
9153         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
9154         if (vsi->type == I40E_VSI_SRIOV) {
9155                 info->mapping_flags |=
9156                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
9157                 for (i = 0; i < vsi->nb_qps; i++)
9158                         info->queue_mapping[i] =
9159                                 rte_cpu_to_le_16(vsi->base_queue + i);
9160         } else {
9161                 info->mapping_flags |=
9162                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
9163                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
9164         }
9165         info->valid_sections |=
9166                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
9167
9168         return I40E_SUCCESS;
9169 }
9170
9171 /*
9172  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
9173  * @veb: VEB to be configured
9174  * @tc_map: enabled TC bitmap
9175  *
9176  * Returns 0 on success, negative value on failure
9177  */
9178 static enum i40e_status_code
9179 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
9180 {
9181         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
9182         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
9183         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
9184         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
9185         enum i40e_status_code ret = I40E_SUCCESS;
9186         int i;
9187         uint32_t bw_max;
9188
9189         /* Check if enabled_tc is same as existing or new TCs */
9190         if (veb->enabled_tc == tc_map)
9191                 return ret;
9192
9193         /* configure tc bandwidth */
9194         memset(&veb_bw, 0, sizeof(veb_bw));
9195         veb_bw.tc_valid_bits = tc_map;
9196         /* Enable ETS TCs with equal BW Share for now across all VSIs */
9197         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9198                 if (tc_map & BIT_ULL(i))
9199                         veb_bw.tc_bw_share_credits[i] = 1;
9200         }
9201         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
9202                                                    &veb_bw, NULL);
9203         if (ret) {
9204                 PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation"
9205                                   " per TC failed = %d",
9206                                   hw->aq.asq_last_status);
9207                 return ret;
9208         }
9209
9210         memset(&ets_query, 0, sizeof(ets_query));
9211         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9212                                                    &ets_query, NULL);
9213         if (ret != I40E_SUCCESS) {
9214                 PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS"
9215                                  " configuration %u", hw->aq.asq_last_status);
9216                 return ret;
9217         }
9218         memset(&bw_query, 0, sizeof(bw_query));
9219         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9220                                                   &bw_query, NULL);
9221         if (ret != I40E_SUCCESS) {
9222                 PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth"
9223                                  " configuration %u", hw->aq.asq_last_status);
9224                 return ret;
9225         }
9226
9227         /* store and print out BW info */
9228         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
9229         veb->bw_info.bw_max = ets_query.tc_bw_max;
9230         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
9231         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
9232         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
9233                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
9234                      I40E_16_BIT_WIDTH);
9235         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9236                 veb->bw_info.bw_ets_share_credits[i] =
9237                                 bw_query.tc_bw_share_credits[i];
9238                 veb->bw_info.bw_ets_credits[i] =
9239                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
9240                 /* 4 bits per TC, 4th bit is reserved */
9241                 veb->bw_info.bw_ets_max[i] =
9242                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
9243                                   RTE_LEN2MASK(3, uint8_t));
9244                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
9245                             veb->bw_info.bw_ets_share_credits[i]);
9246                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
9247                             veb->bw_info.bw_ets_credits[i]);
9248                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
9249                             veb->bw_info.bw_ets_max[i]);
9250         }
9251
9252         veb->enabled_tc = tc_map;
9253
9254         return ret;
9255 }
9256
9257
9258 /*
9259  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
9260  * @vsi: VSI to be configured
9261  * @tc_map: enabled TC bitmap
9262  *
9263  * Returns 0 on success, negative value on failure
9264  */
9265 static enum i40e_status_code
9266 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
9267 {
9268         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
9269         struct i40e_vsi_context ctxt;
9270         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
9271         enum i40e_status_code ret = I40E_SUCCESS;
9272         int i;
9273
9274         /* Check if enabled_tc is same as existing or new TCs */
9275         if (vsi->enabled_tc == tc_map)
9276                 return ret;
9277
9278         /* configure tc bandwidth */
9279         memset(&bw_data, 0, sizeof(bw_data));
9280         bw_data.tc_valid_bits = tc_map;
9281         /* Enable ETS TCs with equal BW Share for now across all VSIs */
9282         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9283                 if (tc_map & BIT_ULL(i))
9284                         bw_data.tc_bw_credits[i] = 1;
9285         }
9286         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
9287         if (ret) {
9288                 PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
9289                         " per TC failed = %d",
9290                         hw->aq.asq_last_status);
9291                 goto out;
9292         }
9293         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
9294                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
9295
9296         /* Update Queue Pairs Mapping for currently enabled UPs */
9297         ctxt.seid = vsi->seid;
9298         ctxt.pf_num = hw->pf_id;
9299         ctxt.vf_num = 0;
9300         ctxt.uplink_seid = vsi->uplink_seid;
9301         ctxt.info = vsi->info;
9302         i40e_get_cap(hw);
9303         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
9304         if (ret)
9305                 goto out;
9306
9307         /* Update the VSI after updating the VSI queue-mapping information */
9308         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9309         if (ret) {
9310                 PMD_INIT_LOG(ERR, "Failed to configure "
9311                             "TC queue mapping = %d",
9312                             hw->aq.asq_last_status);
9313                 goto out;
9314         }
9315         /* update the local VSI info with updated queue map */
9316         (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
9317                                         sizeof(vsi->info.tc_mapping));
9318         (void)rte_memcpy(&vsi->info.queue_mapping,
9319                         &ctxt.info.queue_mapping,
9320                 sizeof(vsi->info.queue_mapping));
9321         vsi->info.mapping_flags = ctxt.info.mapping_flags;
9322         vsi->info.valid_sections = 0;
9323
9324         /* query and update current VSI BW information */
9325         ret = i40e_vsi_get_bw_config(vsi);
9326         if (ret) {
9327                 PMD_INIT_LOG(ERR,
9328                          "Failed updating vsi bw info, err %s aq_err %s",
9329                          i40e_stat_str(hw, ret),
9330                          i40e_aq_str(hw, hw->aq.asq_last_status));
9331                 goto out;
9332         }
9333
9334         vsi->enabled_tc = tc_map;
9335
9336 out:
9337         return ret;
9338 }
9339
9340 /*
9341  * i40e_dcb_hw_configure - program the dcb setting to hw
9342  * @pf: pf the configuration is taken on
9343  * @new_cfg: new configuration
9344  * @tc_map: enabled TC bitmap
9345  *
9346  * Returns 0 on success, negative value on failure
9347  */
9348 static enum i40e_status_code
9349 i40e_dcb_hw_configure(struct i40e_pf *pf,
9350                       struct i40e_dcbx_config *new_cfg,
9351                       uint8_t tc_map)
9352 {
9353         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9354         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
9355         struct i40e_vsi *main_vsi = pf->main_vsi;
9356         struct i40e_vsi_list *vsi_list;
9357         enum i40e_status_code ret;
9358         int i;
9359         uint32_t val;
9360
9361         /* Use the FW API if FW > v4.4*/
9362         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
9363               (hw->aq.fw_maj_ver >= 5))) {
9364                 PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
9365                                   " to configure DCB");
9366                 return I40E_ERR_FIRMWARE_API_VERSION;
9367         }
9368
9369         /* Check if need reconfiguration */
9370         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
9371                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
9372                 return I40E_SUCCESS;
9373         }
9374
9375         /* Copy the new config to the current config */
9376         *old_cfg = *new_cfg;
9377         old_cfg->etsrec = old_cfg->etscfg;
9378         ret = i40e_set_dcb_config(hw);
9379         if (ret) {
9380                 PMD_INIT_LOG(ERR,
9381                          "Set DCB Config failed, err %s aq_err %s\n",
9382                          i40e_stat_str(hw, ret),
9383                          i40e_aq_str(hw, hw->aq.asq_last_status));
9384                 return ret;
9385         }
9386         /* set receive Arbiter to RR mode and ETS scheme by default */
9387         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
9388                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
9389                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
9390                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
9391                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
9392                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
9393                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
9394                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
9395                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
9396                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
9397                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
9398                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
9399                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
9400         }
9401         /* get local mib to check whether it is configured correctly */
9402         /* IEEE mode */
9403         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
9404         /* Get Local DCB Config */
9405         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
9406                                      &hw->local_dcbx_config);
9407
9408         /* if Veb is created, need to update TC of it at first */
9409         if (main_vsi->veb) {
9410                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
9411                 if (ret)
9412                         PMD_INIT_LOG(WARNING,
9413                                  "Failed configuring TC for VEB seid=%d\n",
9414                                  main_vsi->veb->seid);
9415         }
9416         /* Update each VSI */
9417         i40e_vsi_config_tc(main_vsi, tc_map);
9418         if (main_vsi->veb) {
9419                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
9420                         /* Beside main VSI and VMDQ VSIs, only enable default
9421                          * TC for other VSIs
9422                          */
9423                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
9424                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
9425                                                          tc_map);
9426                         else
9427                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
9428                                                          I40E_DEFAULT_TCMAP);
9429                         if (ret)
9430                                 PMD_INIT_LOG(WARNING,
9431                                          "Failed configuring TC for VSI seid=%d\n",
9432                                          vsi_list->vsi->seid);
9433                         /* continue */
9434                 }
9435         }
9436         return I40E_SUCCESS;
9437 }
9438
9439 /*
9440  * i40e_dcb_init_configure - initial dcb config
9441  * @dev: device being configured
9442  * @sw_dcb: indicate whether dcb is sw configured or hw offload
9443  *
9444  * Returns 0 on success, negative value on failure
9445  */
9446 static int
9447 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
9448 {
9449         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9450         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9451         int ret = 0;
9452
9453         if ((pf->flags & I40E_FLAG_DCB) == 0) {
9454                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
9455                 return -ENOTSUP;
9456         }
9457
9458         /* DCB initialization:
9459          * Update DCB configuration from the Firmware and configure
9460          * LLDP MIB change event.
9461          */
9462         if (sw_dcb == TRUE) {
9463                 ret = i40e_init_dcb(hw);
9464                 /* If lldp agent is stopped, the return value from
9465                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
9466                  * adminq status. Otherwise, it should return success.
9467                  */
9468                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
9469                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
9470                         memset(&hw->local_dcbx_config, 0,
9471                                 sizeof(struct i40e_dcbx_config));
9472                         /* set dcb default configuration */
9473                         hw->local_dcbx_config.etscfg.willing = 0;
9474                         hw->local_dcbx_config.etscfg.maxtcs = 0;
9475                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
9476                         hw->local_dcbx_config.etscfg.tsatable[0] =
9477                                                 I40E_IEEE_TSA_ETS;
9478                         hw->local_dcbx_config.etsrec =
9479                                 hw->local_dcbx_config.etscfg;
9480                         hw->local_dcbx_config.pfc.willing = 0;
9481                         hw->local_dcbx_config.pfc.pfccap =
9482                                                 I40E_MAX_TRAFFIC_CLASS;
9483                         /* FW needs one App to configure HW */
9484                         hw->local_dcbx_config.numapps = 1;
9485                         hw->local_dcbx_config.app[0].selector =
9486                                                 I40E_APP_SEL_ETHTYPE;
9487                         hw->local_dcbx_config.app[0].priority = 3;
9488                         hw->local_dcbx_config.app[0].protocolid =
9489                                                 I40E_APP_PROTOID_FCOE;
9490                         ret = i40e_set_dcb_config(hw);
9491                         if (ret) {
9492                                 PMD_INIT_LOG(ERR, "default dcb config fails."
9493                                         " err = %d, aq_err = %d.", ret,
9494                                           hw->aq.asq_last_status);
9495                                 return -ENOSYS;
9496                         }
9497                 } else {
9498                         PMD_INIT_LOG(ERR, "DCB initialization in FW fails,"
9499                                           " err = %d, aq_err = %d.", ret,
9500                                           hw->aq.asq_last_status);
9501                         return -ENOTSUP;
9502                 }
9503         } else {
9504                 ret = i40e_aq_start_lldp(hw, NULL);
9505                 if (ret != I40E_SUCCESS)
9506                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
9507
9508                 ret = i40e_init_dcb(hw);
9509                 if (!ret) {
9510                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
9511                                 PMD_INIT_LOG(ERR, "HW doesn't support"
9512                                                   " DCBX offload.");
9513                                 return -ENOTSUP;
9514                         }
9515                 } else {
9516                         PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
9517                                           " aq_err = %d.", ret,
9518                                           hw->aq.asq_last_status);
9519                         return -ENOTSUP;
9520                 }
9521         }
9522         return 0;
9523 }
9524
9525 /*
9526  * i40e_dcb_setup - setup dcb related config
9527  * @dev: device being configured
9528  *
9529  * Returns 0 on success, negative value on failure
9530  */
9531 static int
9532 i40e_dcb_setup(struct rte_eth_dev *dev)
9533 {
9534         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9535         struct i40e_dcbx_config dcb_cfg;
9536         uint8_t tc_map = 0;
9537         int ret = 0;
9538
9539         if ((pf->flags & I40E_FLAG_DCB) == 0) {
9540                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
9541                 return -ENOTSUP;
9542         }
9543
9544         if (pf->vf_num != 0)
9545                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
9546
9547         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
9548         if (ret) {
9549                 PMD_INIT_LOG(ERR, "invalid dcb config");
9550                 return -EINVAL;
9551         }
9552         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
9553         if (ret) {
9554                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
9555                 return -ENOSYS;
9556         }
9557
9558         return 0;
9559 }
9560
9561 static int
9562 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
9563                       struct rte_eth_dcb_info *dcb_info)
9564 {
9565         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9566         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9567         struct i40e_vsi *vsi = pf->main_vsi;
9568         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
9569         uint16_t bsf, tc_mapping;
9570         int i, j = 0;
9571
9572         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
9573                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
9574         else
9575                 dcb_info->nb_tcs = 1;
9576         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
9577                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
9578         for (i = 0; i < dcb_info->nb_tcs; i++)
9579                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
9580
9581         /* get queue mapping if vmdq is disabled */
9582         if (!pf->nb_cfg_vmdq_vsi) {
9583                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9584                         if (!(vsi->enabled_tc & (1 << i)))
9585                                 continue;
9586                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
9587                         dcb_info->tc_queue.tc_rxq[j][i].base =
9588                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
9589                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
9590                         dcb_info->tc_queue.tc_txq[j][i].base =
9591                                 dcb_info->tc_queue.tc_rxq[j][i].base;
9592                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
9593                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
9594                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
9595                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
9596                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
9597                 }
9598                 return 0;
9599         }
9600
9601         /* get queue mapping if vmdq is enabled */
9602         do {
9603                 vsi = pf->vmdq[j].vsi;
9604                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9605                         if (!(vsi->enabled_tc & (1 << i)))
9606                                 continue;
9607                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
9608                         dcb_info->tc_queue.tc_rxq[j][i].base =
9609                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
9610                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
9611                         dcb_info->tc_queue.tc_txq[j][i].base =
9612                                 dcb_info->tc_queue.tc_rxq[j][i].base;
9613                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
9614                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
9615                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
9616                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
9617                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
9618                 }
9619                 j++;
9620         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
9621         return 0;
9622 }
9623
9624 static int
9625 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
9626 {
9627         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
9628         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9629         uint16_t interval =
9630                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
9631         uint16_t msix_intr;
9632
9633         msix_intr = intr_handle->intr_vec[queue_id];
9634         if (msix_intr == I40E_MISC_VEC_ID)
9635                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
9636                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
9637                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
9638                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
9639                                (interval <<
9640                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
9641         else
9642                 I40E_WRITE_REG(hw,
9643                                I40E_PFINT_DYN_CTLN(msix_intr -
9644                                                    I40E_RX_VEC_START),
9645                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
9646                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
9647                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
9648                                (interval <<
9649                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
9650
9651         I40E_WRITE_FLUSH(hw);
9652         rte_intr_enable(&dev->pci_dev->intr_handle);
9653
9654         return 0;
9655 }
9656
9657 static int
9658 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
9659 {
9660         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
9661         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9662         uint16_t msix_intr;
9663
9664         msix_intr = intr_handle->intr_vec[queue_id];
9665         if (msix_intr == I40E_MISC_VEC_ID)
9666                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
9667         else
9668                 I40E_WRITE_REG(hw,
9669                                I40E_PFINT_DYN_CTLN(msix_intr -
9670                                                    I40E_RX_VEC_START),
9671                                0);
9672         I40E_WRITE_FLUSH(hw);
9673
9674         return 0;
9675 }
9676
9677 static int i40e_get_regs(struct rte_eth_dev *dev,
9678                          struct rte_dev_reg_info *regs)
9679 {
9680         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9681         uint32_t *ptr_data = regs->data;
9682         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
9683         const struct i40e_reg_info *reg_info;
9684
9685         if (ptr_data == NULL) {
9686                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
9687                 regs->width = sizeof(uint32_t);
9688                 return 0;
9689         }
9690
9691         /* The first few registers have to be read using AQ operations */
9692         reg_idx = 0;
9693         while (i40e_regs_adminq[reg_idx].name) {
9694                 reg_info = &i40e_regs_adminq[reg_idx++];
9695                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
9696                         for (arr_idx2 = 0;
9697                                         arr_idx2 <= reg_info->count2;
9698                                         arr_idx2++) {
9699                                 reg_offset = arr_idx * reg_info->stride1 +
9700                                         arr_idx2 * reg_info->stride2;
9701                                 reg_offset += reg_info->base_addr;
9702                                 ptr_data[reg_offset >> 2] =
9703                                         i40e_read_rx_ctl(hw, reg_offset);
9704                         }
9705         }
9706
9707         /* The remaining registers can be read using primitives */
9708         reg_idx = 0;
9709         while (i40e_regs_others[reg_idx].name) {
9710                 reg_info = &i40e_regs_others[reg_idx++];
9711                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
9712                         for (arr_idx2 = 0;
9713                                         arr_idx2 <= reg_info->count2;
9714                                         arr_idx2++) {
9715                                 reg_offset = arr_idx * reg_info->stride1 +
9716                                         arr_idx2 * reg_info->stride2;
9717                                 reg_offset += reg_info->base_addr;
9718                                 ptr_data[reg_offset >> 2] =
9719                                         I40E_READ_REG(hw, reg_offset);
9720                         }
9721         }
9722
9723         return 0;
9724 }
9725
9726 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
9727 {
9728         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9729
9730         /* Convert word count to byte count */
9731         return hw->nvm.sr_size << 1;
9732 }
9733
9734 static int i40e_get_eeprom(struct rte_eth_dev *dev,
9735                            struct rte_dev_eeprom_info *eeprom)
9736 {
9737         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9738         uint16_t *data = eeprom->data;
9739         uint16_t offset, length, cnt_words;
9740         int ret_code;
9741
9742         offset = eeprom->offset >> 1;
9743         length = eeprom->length >> 1;
9744         cnt_words = length;
9745
9746         if (offset > hw->nvm.sr_size ||
9747                 offset + length > hw->nvm.sr_size) {
9748                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
9749                 return -EINVAL;
9750         }
9751
9752         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
9753
9754         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
9755         if (ret_code != I40E_SUCCESS || cnt_words != length) {
9756                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
9757                 return -EIO;
9758         }
9759
9760         return 0;
9761 }
9762
9763 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
9764                                       struct ether_addr *mac_addr)
9765 {
9766         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9767
9768         if (!is_valid_assigned_ether_addr(mac_addr)) {
9769                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
9770                 return;
9771         }
9772
9773         /* Flags: 0x3 updates port address */
9774         i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
9775 }
9776
9777 static int
9778 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
9779 {
9780         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9781         struct rte_eth_dev_data *dev_data = pf->dev_data;
9782         uint32_t frame_size = mtu + ETHER_HDR_LEN
9783                               + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE;
9784         int ret = 0;
9785
9786         /* check if mtu is within the allowed range */
9787         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
9788                 return -EINVAL;
9789
9790         /* mtu setting is forbidden if port is start */
9791         if (dev_data->dev_started) {
9792                 PMD_DRV_LOG(ERR,
9793                             "port %d must be stopped before configuration\n",
9794                             dev_data->port_id);
9795                 return -EBUSY;
9796         }
9797
9798         if (frame_size > ETHER_MAX_LEN)
9799                 dev_data->dev_conf.rxmode.jumbo_frame = 1;
9800         else
9801                 dev_data->dev_conf.rxmode.jumbo_frame = 0;
9802
9803         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
9804
9805         return ret;
9806 }