New upstream version 17.11.1
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_bus_pci.h>
52 #include <rte_atomic.h>
53 #include <rte_branch_prediction.h>
54 #include <rte_memory.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_ethdev_pci.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #ifdef RTE_LIBRTE_SECURITY
65 #include <rte_security_driver.h>
66 #endif
67
68 #include "ixgbe_logs.h"
69 #include "base/ixgbe_api.h"
70 #include "base/ixgbe_vf.h"
71 #include "base/ixgbe_common.h"
72 #include "ixgbe_ethdev.h"
73 #include "ixgbe_bypass.h"
74 #include "ixgbe_rxtx.h"
75 #include "base/ixgbe_type.h"
76 #include "base/ixgbe_phy.h"
77 #include "ixgbe_regs.h"
78
79 /*
80  * High threshold controlling when to start sending XOFF frames. Must be at
81  * least 8 bytes less than receive packet buffer size. This value is in units
82  * of 1024 bytes.
83  */
84 #define IXGBE_FC_HI    0x80
85
86 /*
87  * Low threshold controlling when to start sending XON frames. This value is
88  * in units of 1024 bytes.
89  */
90 #define IXGBE_FC_LO    0x40
91
92 /* Default minimum inter-interrupt interval for EITR configuration */
93 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
94
95 /* Timer value included in XOFF frames. */
96 #define IXGBE_FC_PAUSE 0x680
97
98 /*Default value of Max Rx Queue*/
99 #define IXGBE_MAX_RX_QUEUE_NUM 128
100
101 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
102 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
103 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
104
105 #define IXGBE_MMW_SIZE_DEFAULT        0x4
106 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
107 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
108
109 /*
110  *  Default values for RX/TX configuration
111  */
112 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
113 #define IXGBE_DEFAULT_RX_PTHRESH      8
114 #define IXGBE_DEFAULT_RX_HTHRESH      8
115 #define IXGBE_DEFAULT_RX_WTHRESH      0
116
117 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
118 #define IXGBE_DEFAULT_TX_PTHRESH      32
119 #define IXGBE_DEFAULT_TX_HTHRESH      0
120 #define IXGBE_DEFAULT_TX_WTHRESH      0
121 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
122
123 /* Bit shift and mask */
124 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
125 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
126 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
127 #define IXGBE_8_BIT_MASK   UINT8_MAX
128
129 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
130
131 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
132
133 #define IXGBE_HKEY_MAX_INDEX 10
134
135 /* Additional timesync values. */
136 #define NSEC_PER_SEC             1000000000L
137 #define IXGBE_INCVAL_10GB        0x66666666
138 #define IXGBE_INCVAL_1GB         0x40000000
139 #define IXGBE_INCVAL_100         0x50000000
140 #define IXGBE_INCVAL_SHIFT_10GB  28
141 #define IXGBE_INCVAL_SHIFT_1GB   24
142 #define IXGBE_INCVAL_SHIFT_100   21
143 #define IXGBE_INCVAL_SHIFT_82599 7
144 #define IXGBE_INCPER_SHIFT_82599 24
145
146 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
147
148 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
149 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
150 #define DEFAULT_ETAG_ETYPE                     0x893f
151 #define IXGBE_ETAG_ETYPE                       0x00005084
152 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
153 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
154 #define IXGBE_RAH_ADTYPE                       0x40000000
155 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
156 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
157 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
158 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
159 #define IXGBE_QDE_STRIP_TAG                    0x00000004
160 #define IXGBE_VTEICR_MASK                      0x07
161
162 #define IXGBE_EXVET_VET_EXT_SHIFT              16
163 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
164
165 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
166 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
167 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
168 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
169 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
170 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
171 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
172 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
173 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
174 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
175 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
176 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
177 static void ixgbe_dev_close(struct rte_eth_dev *dev);
178 static int  ixgbe_dev_reset(struct rte_eth_dev *dev);
179 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
180 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
181 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
182 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
183 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
184                                 int wait_to_complete);
185 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
186                                 struct rte_eth_stats *stats);
187 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
188                                 struct rte_eth_xstat *xstats, unsigned n);
189 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
190                                   struct rte_eth_xstat *xstats, unsigned n);
191 static int
192 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
193                 uint64_t *values, unsigned int n);
194 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
195 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
196 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
197         struct rte_eth_xstat_name *xstats_names,
198         unsigned int size);
199 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
200         struct rte_eth_xstat_name *xstats_names, unsigned limit);
201 static int ixgbe_dev_xstats_get_names_by_id(
202         struct rte_eth_dev *dev,
203         struct rte_eth_xstat_name *xstats_names,
204         const uint64_t *ids,
205         unsigned int limit);
206 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
207                                              uint16_t queue_id,
208                                              uint8_t stat_idx,
209                                              uint8_t is_rx);
210 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
211                                  size_t fw_size);
212 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
213                                struct rte_eth_dev_info *dev_info);
214 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
215 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
216                                  struct rte_eth_dev_info *dev_info);
217 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
218
219 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
220                 uint16_t vlan_id, int on);
221 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
222                                enum rte_vlan_type vlan_type,
223                                uint16_t tpid_id);
224 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
225                 uint16_t queue, bool on);
226 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
227                 int on);
228 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
229 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
230 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
231 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
232 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
233
234 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
235 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
236 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
237                                struct rte_eth_fc_conf *fc_conf);
238 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
239                                struct rte_eth_fc_conf *fc_conf);
240 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
241                 struct rte_eth_pfc_conf *pfc_conf);
242 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
243                         struct rte_eth_rss_reta_entry64 *reta_conf,
244                         uint16_t reta_size);
245 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
246                         struct rte_eth_rss_reta_entry64 *reta_conf,
247                         uint16_t reta_size);
248 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
249 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
250 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
251 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
252 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
253 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
254                                       struct rte_intr_handle *handle);
255 static void ixgbe_dev_interrupt_handler(void *param);
256 static void ixgbe_dev_interrupt_delayed_handler(void *param);
257 static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
258                          uint32_t index, uint32_t pool);
259 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
260 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
261                                            struct ether_addr *mac_addr);
262 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
263 static bool is_device_supported(struct rte_eth_dev *dev,
264                                 struct rte_pci_driver *drv);
265
266 /* For Virtual Function support */
267 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
268 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
269 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
270 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
271 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
272                                    int wait_to_complete);
273 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
274 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
275 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
276 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
277 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
278 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
279                 struct rte_eth_stats *stats);
280 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
281 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
282                 uint16_t vlan_id, int on);
283 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
284                 uint16_t queue, int on);
285 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
286 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
287 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
288                                             uint16_t queue_id);
289 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
290                                              uint16_t queue_id);
291 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
292                                  uint8_t queue, uint8_t msix_vector);
293 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
294 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
295 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
296
297 /* For Eth VMDQ APIs support */
298 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
299                 ether_addr * mac_addr, uint8_t on);
300 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
301 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
302                 struct rte_eth_mirror_conf *mirror_conf,
303                 uint8_t rule_id, uint8_t on);
304 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
305                 uint8_t rule_id);
306 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
307                                           uint16_t queue_id);
308 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
309                                            uint16_t queue_id);
310 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
311                                uint8_t queue, uint8_t msix_vector);
312 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
313
314 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
315                                 struct ether_addr *mac_addr,
316                                 uint32_t index, uint32_t pool);
317 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
318 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
319                                              struct ether_addr *mac_addr);
320 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
321                         struct rte_eth_syn_filter *filter);
322 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
323                         enum rte_filter_op filter_op,
324                         void *arg);
325 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
326                         struct ixgbe_5tuple_filter *filter);
327 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
328                         struct ixgbe_5tuple_filter *filter);
329 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
330                                 enum rte_filter_op filter_op,
331                                 void *arg);
332 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
333                         struct rte_eth_ntuple_filter *filter);
334 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
335                                 enum rte_filter_op filter_op,
336                                 void *arg);
337 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
338                         struct rte_eth_ethertype_filter *filter);
339 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
340                      enum rte_filter_type filter_type,
341                      enum rte_filter_op filter_op,
342                      void *arg);
343 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
344
345 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
346                                       struct ether_addr *mc_addr_set,
347                                       uint32_t nb_mc_addr);
348 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
349                                    struct rte_eth_dcb_info *dcb_info);
350
351 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
352 static int ixgbe_get_regs(struct rte_eth_dev *dev,
353                             struct rte_dev_reg_info *regs);
354 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
355 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
356                                 struct rte_dev_eeprom_info *eeprom);
357 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
358                                 struct rte_dev_eeprom_info *eeprom);
359
360 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
361 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
362                                 struct rte_dev_reg_info *regs);
363
364 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
365 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
366 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
367                                             struct timespec *timestamp,
368                                             uint32_t flags);
369 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
370                                             struct timespec *timestamp);
371 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
372 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
373                                    struct timespec *timestamp);
374 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
375                                    const struct timespec *timestamp);
376 static void ixgbevf_dev_interrupt_handler(void *param);
377
378 static int ixgbe_dev_l2_tunnel_eth_type_conf
379         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
380 static int ixgbe_dev_l2_tunnel_offload_set
381         (struct rte_eth_dev *dev,
382          struct rte_eth_l2_tunnel_conf *l2_tunnel,
383          uint32_t mask,
384          uint8_t en);
385 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
386                                              enum rte_filter_op filter_op,
387                                              void *arg);
388
389 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
390                                          struct rte_eth_udp_tunnel *udp_tunnel);
391 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
392                                          struct rte_eth_udp_tunnel *udp_tunnel);
393 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
394 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
395
396 /*
397  * Define VF Stats MACRO for Non "cleared on read" register
398  */
399 #define UPDATE_VF_STAT(reg, last, cur)                          \
400 {                                                               \
401         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
402         cur += (latest - last) & UINT_MAX;                      \
403         last = latest;                                          \
404 }
405
406 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
407 {                                                                \
408         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
409         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
410         u64 latest = ((new_msb << 32) | new_lsb);                \
411         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
412         last = latest;                                           \
413 }
414
415 #define IXGBE_SET_HWSTRIP(h, q) do {\
416                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
417                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
418                 (h)->bitmap[idx] |= 1 << bit;\
419         } while (0)
420
421 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
422                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
423                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
424                 (h)->bitmap[idx] &= ~(1 << bit);\
425         } while (0)
426
427 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
428                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
429                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
430                 (r) = (h)->bitmap[idx] >> bit & 1;\
431         } while (0)
432
433 /*
434  * The set of PCI devices this driver supports
435  */
436 static const struct rte_pci_id pci_id_ixgbe_map[] = {
437         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
438         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
439         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
440         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
441         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
442         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
443         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
444         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
445         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
446         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
447         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
448         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
449         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
450         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
451         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
452         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
453         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
454         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
455         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
456         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
457         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
458         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
459         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
460         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
461         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
462         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
463         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
464         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
465         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
466         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
467         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
468         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
469         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
470         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
471         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
472         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
473         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
474         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
475         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
476         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
477         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
478         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
479         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
480         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
481         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
482         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
483         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
484         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
485 #ifdef RTE_LIBRTE_IXGBE_BYPASS
486         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
487 #endif
488         { .vendor_id = 0, /* sentinel */ },
489 };
490
491 /*
492  * The set of PCI devices this driver supports (for 82599 VF)
493  */
494 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
495         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
496         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
497         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
498         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
499         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
500         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
501         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
502         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
503         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
504         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
505         { .vendor_id = 0, /* sentinel */ },
506 };
507
508 static const struct rte_eth_desc_lim rx_desc_lim = {
509         .nb_max = IXGBE_MAX_RING_DESC,
510         .nb_min = IXGBE_MIN_RING_DESC,
511         .nb_align = IXGBE_RXD_ALIGN,
512 };
513
514 static const struct rte_eth_desc_lim tx_desc_lim = {
515         .nb_max = IXGBE_MAX_RING_DESC,
516         .nb_min = IXGBE_MIN_RING_DESC,
517         .nb_align = IXGBE_TXD_ALIGN,
518         .nb_seg_max = IXGBE_TX_MAX_SEG,
519         .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
520 };
521
522 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
523         .dev_configure        = ixgbe_dev_configure,
524         .dev_start            = ixgbe_dev_start,
525         .dev_stop             = ixgbe_dev_stop,
526         .dev_set_link_up    = ixgbe_dev_set_link_up,
527         .dev_set_link_down  = ixgbe_dev_set_link_down,
528         .dev_close            = ixgbe_dev_close,
529         .dev_reset            = ixgbe_dev_reset,
530         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
531         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
532         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
533         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
534         .link_update          = ixgbe_dev_link_update,
535         .stats_get            = ixgbe_dev_stats_get,
536         .xstats_get           = ixgbe_dev_xstats_get,
537         .xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
538         .stats_reset          = ixgbe_dev_stats_reset,
539         .xstats_reset         = ixgbe_dev_xstats_reset,
540         .xstats_get_names     = ixgbe_dev_xstats_get_names,
541         .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
542         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
543         .fw_version_get       = ixgbe_fw_version_get,
544         .dev_infos_get        = ixgbe_dev_info_get,
545         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
546         .mtu_set              = ixgbe_dev_mtu_set,
547         .vlan_filter_set      = ixgbe_vlan_filter_set,
548         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
549         .vlan_offload_set     = ixgbe_vlan_offload_set,
550         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
551         .rx_queue_start       = ixgbe_dev_rx_queue_start,
552         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
553         .tx_queue_start       = ixgbe_dev_tx_queue_start,
554         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
555         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
556         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
557         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
558         .rx_queue_release     = ixgbe_dev_rx_queue_release,
559         .rx_queue_count       = ixgbe_dev_rx_queue_count,
560         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
561         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
562         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
563         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
564         .tx_queue_release     = ixgbe_dev_tx_queue_release,
565         .dev_led_on           = ixgbe_dev_led_on,
566         .dev_led_off          = ixgbe_dev_led_off,
567         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
568         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
569         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
570         .mac_addr_add         = ixgbe_add_rar,
571         .mac_addr_remove      = ixgbe_remove_rar,
572         .mac_addr_set         = ixgbe_set_default_mac_addr,
573         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
574         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
575         .mirror_rule_set      = ixgbe_mirror_rule_set,
576         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
577         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
578         .reta_update          = ixgbe_dev_rss_reta_update,
579         .reta_query           = ixgbe_dev_rss_reta_query,
580         .rss_hash_update      = ixgbe_dev_rss_hash_update,
581         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
582         .filter_ctrl          = ixgbe_dev_filter_ctrl,
583         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
584         .rxq_info_get         = ixgbe_rxq_info_get,
585         .txq_info_get         = ixgbe_txq_info_get,
586         .timesync_enable      = ixgbe_timesync_enable,
587         .timesync_disable     = ixgbe_timesync_disable,
588         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
589         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
590         .get_reg              = ixgbe_get_regs,
591         .get_eeprom_length    = ixgbe_get_eeprom_length,
592         .get_eeprom           = ixgbe_get_eeprom,
593         .set_eeprom           = ixgbe_set_eeprom,
594         .get_dcb_info         = ixgbe_dev_get_dcb_info,
595         .timesync_adjust_time = ixgbe_timesync_adjust_time,
596         .timesync_read_time   = ixgbe_timesync_read_time,
597         .timesync_write_time  = ixgbe_timesync_write_time,
598         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
599         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
600         .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
601         .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
602         .tm_ops_get           = ixgbe_tm_ops_get,
603 };
604
605 /*
606  * dev_ops for virtual function, bare necessities for basic vf
607  * operation have been implemented
608  */
609 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
610         .dev_configure        = ixgbevf_dev_configure,
611         .dev_start            = ixgbevf_dev_start,
612         .dev_stop             = ixgbevf_dev_stop,
613         .link_update          = ixgbevf_dev_link_update,
614         .stats_get            = ixgbevf_dev_stats_get,
615         .xstats_get           = ixgbevf_dev_xstats_get,
616         .stats_reset          = ixgbevf_dev_stats_reset,
617         .xstats_reset         = ixgbevf_dev_stats_reset,
618         .xstats_get_names     = ixgbevf_dev_xstats_get_names,
619         .dev_close            = ixgbevf_dev_close,
620         .dev_reset            = ixgbevf_dev_reset,
621         .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
622         .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
623         .dev_infos_get        = ixgbevf_dev_info_get,
624         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
625         .mtu_set              = ixgbevf_dev_set_mtu,
626         .vlan_filter_set      = ixgbevf_vlan_filter_set,
627         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
628         .vlan_offload_set     = ixgbevf_vlan_offload_set,
629         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
630         .rx_queue_release     = ixgbe_dev_rx_queue_release,
631         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
632         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
633         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
634         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
635         .tx_queue_release     = ixgbe_dev_tx_queue_release,
636         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
637         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
638         .mac_addr_add         = ixgbevf_add_mac_addr,
639         .mac_addr_remove      = ixgbevf_remove_mac_addr,
640         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
641         .rxq_info_get         = ixgbe_rxq_info_get,
642         .txq_info_get         = ixgbe_txq_info_get,
643         .mac_addr_set         = ixgbevf_set_default_mac_addr,
644         .get_reg              = ixgbevf_get_regs,
645         .reta_update          = ixgbe_dev_rss_reta_update,
646         .reta_query           = ixgbe_dev_rss_reta_query,
647         .rss_hash_update      = ixgbe_dev_rss_hash_update,
648         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
649 };
650
651 /* store statistics names and its offset in stats structure */
652 struct rte_ixgbe_xstats_name_off {
653         char name[RTE_ETH_XSTATS_NAME_SIZE];
654         unsigned offset;
655 };
656
657 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
658         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
659         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
660         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
661         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
662         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
663         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
664         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
665         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
666         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
667         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
668         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
669         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
670         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
671         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
672         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
673                 prc1023)},
674         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
675                 prc1522)},
676         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
677         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
678         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
679         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
680         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
681         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
682         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
683         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
684         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
685         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
686         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
687         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
688         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
689         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
690         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
691         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
692         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
693                 ptc1023)},
694         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
695                 ptc1522)},
696         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
697         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
698         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
699         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
700
701         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
702                 fdirustat_add)},
703         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
704                 fdirustat_remove)},
705         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
706                 fdirfstat_fadd)},
707         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
708                 fdirfstat_fremove)},
709         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
710                 fdirmatch)},
711         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
712                 fdirmiss)},
713
714         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
715         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
716         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
717                 fclast)},
718         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
719         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
720         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
721         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
722         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
723                 fcoe_noddp)},
724         {"rx_fcoe_no_direct_data_placement_ext_buff",
725                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
726
727         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
728                 lxontxc)},
729         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
730                 lxonrxc)},
731         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
732                 lxofftxc)},
733         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
734                 lxoffrxc)},
735         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
736 };
737
738 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
739                            sizeof(rte_ixgbe_stats_strings[0]))
740
741 /* MACsec statistics */
742 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
743         {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
744                 out_pkts_untagged)},
745         {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
746                 out_pkts_encrypted)},
747         {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
748                 out_pkts_protected)},
749         {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
750                 out_octets_encrypted)},
751         {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
752                 out_octets_protected)},
753         {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
754                 in_pkts_untagged)},
755         {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
756                 in_pkts_badtag)},
757         {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
758                 in_pkts_nosci)},
759         {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
760                 in_pkts_unknownsci)},
761         {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
762                 in_octets_decrypted)},
763         {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
764                 in_octets_validated)},
765         {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
766                 in_pkts_unchecked)},
767         {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
768                 in_pkts_delayed)},
769         {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
770                 in_pkts_late)},
771         {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
772                 in_pkts_ok)},
773         {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
774                 in_pkts_invalid)},
775         {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
776                 in_pkts_notvalid)},
777         {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
778                 in_pkts_unusedsa)},
779         {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
780                 in_pkts_notusingsa)},
781 };
782
783 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
784                            sizeof(rte_ixgbe_macsec_strings[0]))
785
786 /* Per-queue statistics */
787 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
788         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
789         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
790         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
791         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
792 };
793
794 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
795                            sizeof(rte_ixgbe_rxq_strings[0]))
796 #define IXGBE_NB_RXQ_PRIO_VALUES 8
797
798 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
799         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
800         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
801         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
802                 pxon2offc)},
803 };
804
805 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
806                            sizeof(rte_ixgbe_txq_strings[0]))
807 #define IXGBE_NB_TXQ_PRIO_VALUES 8
808
809 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
810         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
811 };
812
813 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
814                 sizeof(rte_ixgbevf_stats_strings[0]))
815
816 /**
817  * Atomically reads the link status information from global
818  * structure rte_eth_dev.
819  *
820  * @param dev
821  *   - Pointer to the structure rte_eth_dev to read from.
822  *   - Pointer to the buffer to be saved with the link status.
823  *
824  * @return
825  *   - On success, zero.
826  *   - On failure, negative value.
827  */
828 static inline int
829 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
830                                 struct rte_eth_link *link)
831 {
832         struct rte_eth_link *dst = link;
833         struct rte_eth_link *src = &(dev->data->dev_link);
834
835         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
836                                         *(uint64_t *)src) == 0)
837                 return -1;
838
839         return 0;
840 }
841
842 /**
843  * Atomically writes the link status information into global
844  * structure rte_eth_dev.
845  *
846  * @param dev
847  *   - Pointer to the structure rte_eth_dev to read from.
848  *   - Pointer to the buffer to be saved with the link status.
849  *
850  * @return
851  *   - On success, zero.
852  *   - On failure, negative value.
853  */
854 static inline int
855 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
856                                 struct rte_eth_link *link)
857 {
858         struct rte_eth_link *dst = &(dev->data->dev_link);
859         struct rte_eth_link *src = link;
860
861         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
862                                         *(uint64_t *)src) == 0)
863                 return -1;
864
865         return 0;
866 }
867
868 /*
869  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
870  */
871 static inline int
872 ixgbe_is_sfp(struct ixgbe_hw *hw)
873 {
874         switch (hw->phy.type) {
875         case ixgbe_phy_sfp_avago:
876         case ixgbe_phy_sfp_ftl:
877         case ixgbe_phy_sfp_intel:
878         case ixgbe_phy_sfp_unknown:
879         case ixgbe_phy_sfp_passive_tyco:
880         case ixgbe_phy_sfp_passive_unknown:
881                 return 1;
882         default:
883                 return 0;
884         }
885 }
886
887 static inline int32_t
888 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
889 {
890         uint32_t ctrl_ext;
891         int32_t status;
892
893         status = ixgbe_reset_hw(hw);
894
895         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
896         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
897         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
898         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
899         IXGBE_WRITE_FLUSH(hw);
900
901         if (status == IXGBE_ERR_SFP_NOT_PRESENT)
902                 status = IXGBE_SUCCESS;
903         return status;
904 }
905
906 static inline void
907 ixgbe_enable_intr(struct rte_eth_dev *dev)
908 {
909         struct ixgbe_interrupt *intr =
910                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
911         struct ixgbe_hw *hw =
912                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
913
914         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
915         IXGBE_WRITE_FLUSH(hw);
916 }
917
918 /*
919  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
920  */
921 static void
922 ixgbe_disable_intr(struct ixgbe_hw *hw)
923 {
924         PMD_INIT_FUNC_TRACE();
925
926         if (hw->mac.type == ixgbe_mac_82598EB) {
927                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
928         } else {
929                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
930                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
931                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
932         }
933         IXGBE_WRITE_FLUSH(hw);
934 }
935
936 /*
937  * This function resets queue statistics mapping registers.
938  * From Niantic datasheet, Initialization of Statistics section:
939  * "...if software requires the queue counters, the RQSMR and TQSM registers
940  * must be re-programmed following a device reset.
941  */
942 static void
943 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
944 {
945         uint32_t i;
946
947         for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
948                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
949                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
950         }
951 }
952
953
954 static int
955 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
956                                   uint16_t queue_id,
957                                   uint8_t stat_idx,
958                                   uint8_t is_rx)
959 {
960 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
961 #define NB_QMAP_FIELDS_PER_QSM_REG 4
962 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
963
964         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
965         struct ixgbe_stat_mapping_registers *stat_mappings =
966                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
967         uint32_t qsmr_mask = 0;
968         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
969         uint32_t q_map;
970         uint8_t n, offset;
971
972         if ((hw->mac.type != ixgbe_mac_82599EB) &&
973                 (hw->mac.type != ixgbe_mac_X540) &&
974                 (hw->mac.type != ixgbe_mac_X550) &&
975                 (hw->mac.type != ixgbe_mac_X550EM_x) &&
976                 (hw->mac.type != ixgbe_mac_X550EM_a))
977                 return -ENOSYS;
978
979         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
980                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
981                      queue_id, stat_idx);
982
983         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
984         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
985                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
986                 return -EIO;
987         }
988         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
989
990         /* Now clear any previous stat_idx set */
991         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
992         if (!is_rx)
993                 stat_mappings->tqsm[n] &= ~clearing_mask;
994         else
995                 stat_mappings->rqsmr[n] &= ~clearing_mask;
996
997         q_map = (uint32_t)stat_idx;
998         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
999         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
1000         if (!is_rx)
1001                 stat_mappings->tqsm[n] |= qsmr_mask;
1002         else
1003                 stat_mappings->rqsmr[n] |= qsmr_mask;
1004
1005         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
1006                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
1007                      queue_id, stat_idx);
1008         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
1009                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
1010
1011         /* Now write the mapping in the appropriate register */
1012         if (is_rx) {
1013                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
1014                              stat_mappings->rqsmr[n], n);
1015                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
1016         } else {
1017                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
1018                              stat_mappings->tqsm[n], n);
1019                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
1020         }
1021         return 0;
1022 }
1023
1024 static void
1025 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
1026 {
1027         struct ixgbe_stat_mapping_registers *stat_mappings =
1028                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
1029         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1030         int i;
1031
1032         /* write whatever was in stat mapping table to the NIC */
1033         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
1034                 /* rx */
1035                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
1036
1037                 /* tx */
1038                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
1039         }
1040 }
1041
1042 static void
1043 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
1044 {
1045         uint8_t i;
1046         struct ixgbe_dcb_tc_config *tc;
1047         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
1048
1049         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
1050         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
1051         for (i = 0; i < dcb_max_tc; i++) {
1052                 tc = &dcb_config->tc_config[i];
1053                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
1054                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
1055                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1056                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
1057                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
1058                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1059                 tc->pfc = ixgbe_dcb_pfc_disabled;
1060         }
1061
1062         /* Initialize default user to priority mapping, UPx->TC0 */
1063         tc = &dcb_config->tc_config[0];
1064         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1065         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1066         for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1067                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1068                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1069         }
1070         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1071         dcb_config->pfc_mode_enable = false;
1072         dcb_config->vt_mode = true;
1073         dcb_config->round_robin_enable = false;
1074         /* support all DCB capabilities in 82599 */
1075         dcb_config->support.capabilities = 0xFF;
1076
1077         /*we only support 4 Tcs for X540, X550 */
1078         if (hw->mac.type == ixgbe_mac_X540 ||
1079                 hw->mac.type == ixgbe_mac_X550 ||
1080                 hw->mac.type == ixgbe_mac_X550EM_x ||
1081                 hw->mac.type == ixgbe_mac_X550EM_a) {
1082                 dcb_config->num_tcs.pg_tcs = 4;
1083                 dcb_config->num_tcs.pfc_tcs = 4;
1084         }
1085 }
1086
1087 /*
1088  * Ensure that all locks are released before first NVM or PHY access
1089  */
1090 static void
1091 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1092 {
1093         uint16_t mask;
1094
1095         /*
1096          * Phy lock should not fail in this early stage. If this is the case,
1097          * it is due to an improper exit of the application.
1098          * So force the release of the faulty lock. Release of common lock
1099          * is done automatically by swfw_sync function.
1100          */
1101         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1102         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1103                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1104         }
1105         ixgbe_release_swfw_semaphore(hw, mask);
1106
1107         /*
1108          * These ones are more tricky since they are common to all ports; but
1109          * swfw_sync retries last long enough (1s) to be almost sure that if
1110          * lock can not be taken it is due to an improper lock of the
1111          * semaphore.
1112          */
1113         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1114         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1115                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1116         }
1117         ixgbe_release_swfw_semaphore(hw, mask);
1118 }
1119
1120 /*
1121  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1122  * It returns 0 on success.
1123  */
1124 static int
1125 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
1126 {
1127         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1128         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1129         struct ixgbe_hw *hw =
1130                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1131         struct ixgbe_vfta *shadow_vfta =
1132                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1133         struct ixgbe_hwstrip *hwstrip =
1134                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1135         struct ixgbe_dcb_config *dcb_config =
1136                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1137         struct ixgbe_filter_info *filter_info =
1138                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1139         struct ixgbe_bw_conf *bw_conf =
1140                 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1141         uint32_t ctrl_ext;
1142         uint16_t csum;
1143         int diag, i;
1144
1145         PMD_INIT_FUNC_TRACE();
1146
1147         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1148         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1149         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1150         eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1151
1152         /*
1153          * For secondary processes, we don't initialise any further as primary
1154          * has already done this work. Only check we don't need a different
1155          * RX and TX function.
1156          */
1157         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1158                 struct ixgbe_tx_queue *txq;
1159                 /* TX queue function in primary, set by last queue initialized
1160                  * Tx queue may not initialized by primary process
1161                  */
1162                 if (eth_dev->data->tx_queues) {
1163                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1164                         ixgbe_set_tx_function(eth_dev, txq);
1165                 } else {
1166                         /* Use default TX function if we get here */
1167                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1168                                      "Using default TX function.");
1169                 }
1170
1171                 ixgbe_set_rx_function(eth_dev);
1172
1173                 return 0;
1174         }
1175
1176 #ifdef RTE_LIBRTE_SECURITY
1177         /* Initialize security_ctx only for primary process*/
1178         eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev);
1179         if (eth_dev->security_ctx == NULL)
1180                 return -ENOMEM;
1181 #endif
1182
1183         rte_eth_copy_pci_info(eth_dev, pci_dev);
1184
1185         /* Vendor and Device ID need to be set before init of shared code */
1186         hw->device_id = pci_dev->id.device_id;
1187         hw->vendor_id = pci_dev->id.vendor_id;
1188         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1189         hw->allow_unsupported_sfp = 1;
1190
1191         /* Initialize the shared code (base driver) */
1192 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1193         diag = ixgbe_bypass_init_shared_code(hw);
1194 #else
1195         diag = ixgbe_init_shared_code(hw);
1196 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1197
1198         if (diag != IXGBE_SUCCESS) {
1199                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1200                 return -EIO;
1201         }
1202
1203         /* pick up the PCI bus settings for reporting later */
1204         ixgbe_get_bus_info(hw);
1205
1206         /* Unlock any pending hardware semaphore */
1207         ixgbe_swfw_lock_reset(hw);
1208
1209         /* Initialize DCB configuration*/
1210         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1211         ixgbe_dcb_init(hw, dcb_config);
1212         /* Get Hardware Flow Control setting */
1213         hw->fc.requested_mode = ixgbe_fc_full;
1214         hw->fc.current_mode = ixgbe_fc_full;
1215         hw->fc.pause_time = IXGBE_FC_PAUSE;
1216         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1217                 hw->fc.low_water[i] = IXGBE_FC_LO;
1218                 hw->fc.high_water[i] = IXGBE_FC_HI;
1219         }
1220         hw->fc.send_xon = 1;
1221
1222         /* Make sure we have a good EEPROM before we read from it */
1223         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1224         if (diag != IXGBE_SUCCESS) {
1225                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1226                 return -EIO;
1227         }
1228
1229 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1230         diag = ixgbe_bypass_init_hw(hw);
1231 #else
1232         diag = ixgbe_init_hw(hw);
1233 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1234
1235         /*
1236          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1237          * is called too soon after the kernel driver unbinding/binding occurs.
1238          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1239          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1240          * also called. See ixgbe_identify_phy_82599(). The reason for the
1241          * failure is not known, and only occuts when virtualisation features
1242          * are disabled in the bios. A delay of 100ms  was found to be enough by
1243          * trial-and-error, and is doubled to be safe.
1244          */
1245         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1246                 rte_delay_ms(200);
1247                 diag = ixgbe_init_hw(hw);
1248         }
1249
1250         if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1251                 diag = IXGBE_SUCCESS;
1252
1253         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1254                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1255                              "LOM.  Please be aware there may be issues associated "
1256                              "with your hardware.");
1257                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1258                              "please contact your Intel or hardware representative "
1259                              "who provided you with this hardware.");
1260         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1261                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1262         if (diag) {
1263                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1264                 return -EIO;
1265         }
1266
1267         /* Reset the hw statistics */
1268         ixgbe_dev_stats_reset(eth_dev);
1269
1270         /* disable interrupt */
1271         ixgbe_disable_intr(hw);
1272
1273         /* reset mappings for queue statistics hw counters*/
1274         ixgbe_reset_qstat_mappings(hw);
1275
1276         /* Allocate memory for storing MAC addresses */
1277         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1278                                                hw->mac.num_rar_entries, 0);
1279         if (eth_dev->data->mac_addrs == NULL) {
1280                 PMD_INIT_LOG(ERR,
1281                              "Failed to allocate %u bytes needed to store "
1282                              "MAC addresses",
1283                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1284                 return -ENOMEM;
1285         }
1286         /* Copy the permanent MAC address */
1287         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1288                         &eth_dev->data->mac_addrs[0]);
1289
1290         /* Allocate memory for storing hash filter MAC addresses */
1291         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1292                                                     IXGBE_VMDQ_NUM_UC_MAC, 0);
1293         if (eth_dev->data->hash_mac_addrs == NULL) {
1294                 PMD_INIT_LOG(ERR,
1295                              "Failed to allocate %d bytes needed to store MAC addresses",
1296                              ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1297                 return -ENOMEM;
1298         }
1299
1300         /* initialize the vfta */
1301         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1302
1303         /* initialize the hw strip bitmap*/
1304         memset(hwstrip, 0, sizeof(*hwstrip));
1305
1306         /* initialize PF if max_vfs not zero */
1307         ixgbe_pf_host_init(eth_dev);
1308
1309         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1310         /* let hardware know driver is loaded */
1311         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1312         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1313         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1314         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1315         IXGBE_WRITE_FLUSH(hw);
1316
1317         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1318                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1319                              (int) hw->mac.type, (int) hw->phy.type,
1320                              (int) hw->phy.sfp_type);
1321         else
1322                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1323                              (int) hw->mac.type, (int) hw->phy.type);
1324
1325         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1326                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1327                      pci_dev->id.device_id);
1328
1329         rte_intr_callback_register(intr_handle,
1330                                    ixgbe_dev_interrupt_handler, eth_dev);
1331
1332         /* enable uio/vfio intr/eventfd mapping */
1333         rte_intr_enable(intr_handle);
1334
1335         /* enable support intr */
1336         ixgbe_enable_intr(eth_dev);
1337
1338         /* initialize filter info */
1339         memset(filter_info, 0,
1340                sizeof(struct ixgbe_filter_info));
1341
1342         /* initialize 5tuple filter list */
1343         TAILQ_INIT(&filter_info->fivetuple_list);
1344
1345         /* initialize flow director filter list & hash */
1346         ixgbe_fdir_filter_init(eth_dev);
1347
1348         /* initialize l2 tunnel filter list & hash */
1349         ixgbe_l2_tn_filter_init(eth_dev);
1350
1351         /* initialize flow filter lists */
1352         ixgbe_filterlist_init();
1353
1354         /* initialize bandwidth configuration info */
1355         memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1356
1357         /* initialize Traffic Manager configuration */
1358         ixgbe_tm_conf_init(eth_dev);
1359
1360         return 0;
1361 }
1362
1363 static int
1364 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1365 {
1366         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1367         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1368         struct ixgbe_hw *hw;
1369
1370         PMD_INIT_FUNC_TRACE();
1371
1372         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1373                 return -EPERM;
1374
1375         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1376
1377         if (hw->adapter_stopped == 0)
1378                 ixgbe_dev_close(eth_dev);
1379
1380         eth_dev->dev_ops = NULL;
1381         eth_dev->rx_pkt_burst = NULL;
1382         eth_dev->tx_pkt_burst = NULL;
1383
1384         /* Unlock any pending hardware semaphore */
1385         ixgbe_swfw_lock_reset(hw);
1386
1387         /* disable uio intr before callback unregister */
1388         rte_intr_disable(intr_handle);
1389         rte_intr_callback_unregister(intr_handle,
1390                                      ixgbe_dev_interrupt_handler, eth_dev);
1391
1392         /* uninitialize PF if max_vfs not zero */
1393         ixgbe_pf_host_uninit(eth_dev);
1394
1395         rte_free(eth_dev->data->mac_addrs);
1396         eth_dev->data->mac_addrs = NULL;
1397
1398         rte_free(eth_dev->data->hash_mac_addrs);
1399         eth_dev->data->hash_mac_addrs = NULL;
1400
1401         /* remove all the fdir filters & hash */
1402         ixgbe_fdir_filter_uninit(eth_dev);
1403
1404         /* remove all the L2 tunnel filters & hash */
1405         ixgbe_l2_tn_filter_uninit(eth_dev);
1406
1407         /* Remove all ntuple filters of the device */
1408         ixgbe_ntuple_filter_uninit(eth_dev);
1409
1410         /* clear all the filters list */
1411         ixgbe_filterlist_flush();
1412
1413         /* Remove all Traffic Manager configuration */
1414         ixgbe_tm_conf_uninit(eth_dev);
1415
1416 #ifdef RTE_LIBRTE_SECURITY
1417         rte_free(eth_dev->security_ctx);
1418 #endif
1419
1420         return 0;
1421 }
1422
1423 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1424 {
1425         struct ixgbe_filter_info *filter_info =
1426                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1427         struct ixgbe_5tuple_filter *p_5tuple;
1428
1429         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1430                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1431                              p_5tuple,
1432                              entries);
1433                 rte_free(p_5tuple);
1434         }
1435         memset(filter_info->fivetuple_mask, 0,
1436                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1437
1438         return 0;
1439 }
1440
1441 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1442 {
1443         struct ixgbe_hw_fdir_info *fdir_info =
1444                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1445         struct ixgbe_fdir_filter *fdir_filter;
1446
1447                 if (fdir_info->hash_map)
1448                 rte_free(fdir_info->hash_map);
1449         if (fdir_info->hash_handle)
1450                 rte_hash_free(fdir_info->hash_handle);
1451
1452         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1453                 TAILQ_REMOVE(&fdir_info->fdir_list,
1454                              fdir_filter,
1455                              entries);
1456                 rte_free(fdir_filter);
1457         }
1458
1459         return 0;
1460 }
1461
1462 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1463 {
1464         struct ixgbe_l2_tn_info *l2_tn_info =
1465                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1466         struct ixgbe_l2_tn_filter *l2_tn_filter;
1467
1468         if (l2_tn_info->hash_map)
1469                 rte_free(l2_tn_info->hash_map);
1470         if (l2_tn_info->hash_handle)
1471                 rte_hash_free(l2_tn_info->hash_handle);
1472
1473         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1474                 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1475                              l2_tn_filter,
1476                              entries);
1477                 rte_free(l2_tn_filter);
1478         }
1479
1480         return 0;
1481 }
1482
1483 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1484 {
1485         struct ixgbe_hw_fdir_info *fdir_info =
1486                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1487         char fdir_hash_name[RTE_HASH_NAMESIZE];
1488         struct rte_hash_parameters fdir_hash_params = {
1489                 .name = fdir_hash_name,
1490                 .entries = IXGBE_MAX_FDIR_FILTER_NUM,
1491                 .key_len = sizeof(union ixgbe_atr_input),
1492                 .hash_func = rte_hash_crc,
1493                 .hash_func_init_val = 0,
1494                 .socket_id = rte_socket_id(),
1495         };
1496
1497         TAILQ_INIT(&fdir_info->fdir_list);
1498         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1499                  "fdir_%s", eth_dev->device->name);
1500         fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1501         if (!fdir_info->hash_handle) {
1502                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1503                 return -EINVAL;
1504         }
1505         fdir_info->hash_map = rte_zmalloc("ixgbe",
1506                                           sizeof(struct ixgbe_fdir_filter *) *
1507                                           IXGBE_MAX_FDIR_FILTER_NUM,
1508                                           0);
1509         if (!fdir_info->hash_map) {
1510                 PMD_INIT_LOG(ERR,
1511                              "Failed to allocate memory for fdir hash map!");
1512                 return -ENOMEM;
1513         }
1514         fdir_info->mask_added = FALSE;
1515
1516         return 0;
1517 }
1518
1519 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1520 {
1521         struct ixgbe_l2_tn_info *l2_tn_info =
1522                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1523         char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1524         struct rte_hash_parameters l2_tn_hash_params = {
1525                 .name = l2_tn_hash_name,
1526                 .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1527                 .key_len = sizeof(struct ixgbe_l2_tn_key),
1528                 .hash_func = rte_hash_crc,
1529                 .hash_func_init_val = 0,
1530                 .socket_id = rte_socket_id(),
1531         };
1532
1533         TAILQ_INIT(&l2_tn_info->l2_tn_list);
1534         snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1535                  "l2_tn_%s", eth_dev->device->name);
1536         l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1537         if (!l2_tn_info->hash_handle) {
1538                 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1539                 return -EINVAL;
1540         }
1541         l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1542                                    sizeof(struct ixgbe_l2_tn_filter *) *
1543                                    IXGBE_MAX_L2_TN_FILTER_NUM,
1544                                    0);
1545         if (!l2_tn_info->hash_map) {
1546                 PMD_INIT_LOG(ERR,
1547                         "Failed to allocate memory for L2 TN hash map!");
1548                 return -ENOMEM;
1549         }
1550         l2_tn_info->e_tag_en = FALSE;
1551         l2_tn_info->e_tag_fwd_en = FALSE;
1552         l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
1553
1554         return 0;
1555 }
1556 /*
1557  * Negotiate mailbox API version with the PF.
1558  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1559  * Then we try to negotiate starting with the most recent one.
1560  * If all negotiation attempts fail, then we will proceed with
1561  * the default one (ixgbe_mbox_api_10).
1562  */
1563 static void
1564 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1565 {
1566         int32_t i;
1567
1568         /* start with highest supported, proceed down */
1569         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1570                 ixgbe_mbox_api_12,
1571                 ixgbe_mbox_api_11,
1572                 ixgbe_mbox_api_10,
1573         };
1574
1575         for (i = 0;
1576                         i != RTE_DIM(sup_ver) &&
1577                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1578                         i++)
1579                 ;
1580 }
1581
1582 static void
1583 generate_random_mac_addr(struct ether_addr *mac_addr)
1584 {
1585         uint64_t random;
1586
1587         /* Set Organizationally Unique Identifier (OUI) prefix. */
1588         mac_addr->addr_bytes[0] = 0x00;
1589         mac_addr->addr_bytes[1] = 0x09;
1590         mac_addr->addr_bytes[2] = 0xC0;
1591         /* Force indication of locally assigned MAC address. */
1592         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1593         /* Generate the last 3 bytes of the MAC address with a random number. */
1594         random = rte_rand();
1595         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1596 }
1597
1598 /*
1599  * Virtual Function device init
1600  */
1601 static int
1602 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1603 {
1604         int diag;
1605         uint32_t tc, tcs;
1606         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1607         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1608         struct ixgbe_hw *hw =
1609                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1610         struct ixgbe_vfta *shadow_vfta =
1611                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1612         struct ixgbe_hwstrip *hwstrip =
1613                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1614         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1615
1616         PMD_INIT_FUNC_TRACE();
1617
1618         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1619         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1620         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1621
1622         /* for secondary processes, we don't initialise any further as primary
1623          * has already done this work. Only check we don't need a different
1624          * RX function
1625          */
1626         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1627                 struct ixgbe_tx_queue *txq;
1628                 /* TX queue function in primary, set by last queue initialized
1629                  * Tx queue may not initialized by primary process
1630                  */
1631                 if (eth_dev->data->tx_queues) {
1632                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1633                         ixgbe_set_tx_function(eth_dev, txq);
1634                 } else {
1635                         /* Use default TX function if we get here */
1636                         PMD_INIT_LOG(NOTICE,
1637                                      "No TX queues configured yet. Using default TX function.");
1638                 }
1639
1640                 ixgbe_set_rx_function(eth_dev);
1641
1642                 return 0;
1643         }
1644
1645         rte_eth_copy_pci_info(eth_dev, pci_dev);
1646
1647         hw->device_id = pci_dev->id.device_id;
1648         hw->vendor_id = pci_dev->id.vendor_id;
1649         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1650
1651         /* initialize the vfta */
1652         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1653
1654         /* initialize the hw strip bitmap*/
1655         memset(hwstrip, 0, sizeof(*hwstrip));
1656
1657         /* Initialize the shared code (base driver) */
1658         diag = ixgbe_init_shared_code(hw);
1659         if (diag != IXGBE_SUCCESS) {
1660                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1661                 return -EIO;
1662         }
1663
1664         /* init_mailbox_params */
1665         hw->mbx.ops.init_params(hw);
1666
1667         /* Reset the hw statistics */
1668         ixgbevf_dev_stats_reset(eth_dev);
1669
1670         /* Disable the interrupts for VF */
1671         ixgbevf_intr_disable(hw);
1672
1673         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1674         diag = hw->mac.ops.reset_hw(hw);
1675
1676         /*
1677          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1678          * the underlying PF driver has not assigned a MAC address to the VF.
1679          * In this case, assign a random MAC address.
1680          */
1681         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1682                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1683                 return diag;
1684         }
1685
1686         /* negotiate mailbox API version to use with the PF. */
1687         ixgbevf_negotiate_api(hw);
1688
1689         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1690         ixgbevf_get_queues(hw, &tcs, &tc);
1691
1692         /* Allocate memory for storing MAC addresses */
1693         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1694                                                hw->mac.num_rar_entries, 0);
1695         if (eth_dev->data->mac_addrs == NULL) {
1696                 PMD_INIT_LOG(ERR,
1697                              "Failed to allocate %u bytes needed to store "
1698                              "MAC addresses",
1699                              ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1700                 return -ENOMEM;
1701         }
1702
1703         /* Generate a random MAC address, if none was assigned by PF. */
1704         if (is_zero_ether_addr(perm_addr)) {
1705                 generate_random_mac_addr(perm_addr);
1706                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1707                 if (diag) {
1708                         rte_free(eth_dev->data->mac_addrs);
1709                         eth_dev->data->mac_addrs = NULL;
1710                         return diag;
1711                 }
1712                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1713                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1714                              "%02x:%02x:%02x:%02x:%02x:%02x",
1715                              perm_addr->addr_bytes[0],
1716                              perm_addr->addr_bytes[1],
1717                              perm_addr->addr_bytes[2],
1718                              perm_addr->addr_bytes[3],
1719                              perm_addr->addr_bytes[4],
1720                              perm_addr->addr_bytes[5]);
1721         }
1722
1723         /* Copy the permanent MAC address */
1724         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1725
1726         /* reset the hardware with the new settings */
1727         diag = hw->mac.ops.start_hw(hw);
1728         switch (diag) {
1729         case  0:
1730                 break;
1731
1732         default:
1733                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1734                 return -EIO;
1735         }
1736
1737         rte_intr_callback_register(intr_handle,
1738                                    ixgbevf_dev_interrupt_handler, eth_dev);
1739         rte_intr_enable(intr_handle);
1740         ixgbevf_intr_enable(hw);
1741
1742         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1743                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1744                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1745
1746         return 0;
1747 }
1748
1749 /* Virtual Function device uninit */
1750
1751 static int
1752 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1753 {
1754         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1755         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1756         struct ixgbe_hw *hw;
1757
1758         PMD_INIT_FUNC_TRACE();
1759
1760         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1761                 return -EPERM;
1762
1763         hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1764
1765         if (hw->adapter_stopped == 0)
1766                 ixgbevf_dev_close(eth_dev);
1767
1768         eth_dev->dev_ops = NULL;
1769         eth_dev->rx_pkt_burst = NULL;
1770         eth_dev->tx_pkt_burst = NULL;
1771
1772         /* Disable the interrupts for VF */
1773         ixgbevf_intr_disable(hw);
1774
1775         rte_free(eth_dev->data->mac_addrs);
1776         eth_dev->data->mac_addrs = NULL;
1777
1778         rte_intr_disable(intr_handle);
1779         rte_intr_callback_unregister(intr_handle,
1780                                      ixgbevf_dev_interrupt_handler, eth_dev);
1781
1782         return 0;
1783 }
1784
1785 static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1786         struct rte_pci_device *pci_dev)
1787 {
1788         return rte_eth_dev_pci_generic_probe(pci_dev,
1789                 sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
1790 }
1791
1792 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1793 {
1794         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
1795 }
1796
1797 static struct rte_pci_driver rte_ixgbe_pmd = {
1798         .id_table = pci_id_ixgbe_map,
1799         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1800                      RTE_PCI_DRV_IOVA_AS_VA,
1801         .probe = eth_ixgbe_pci_probe,
1802         .remove = eth_ixgbe_pci_remove,
1803 };
1804
1805 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1806         struct rte_pci_device *pci_dev)
1807 {
1808         return rte_eth_dev_pci_generic_probe(pci_dev,
1809                 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1810 }
1811
1812 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1813 {
1814         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1815 }
1816
1817 /*
1818  * virtual function driver struct
1819  */
1820 static struct rte_pci_driver rte_ixgbevf_pmd = {
1821         .id_table = pci_id_ixgbevf_map,
1822         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
1823         .probe = eth_ixgbevf_pci_probe,
1824         .remove = eth_ixgbevf_pci_remove,
1825 };
1826
1827 static int
1828 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1829 {
1830         struct ixgbe_hw *hw =
1831                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1832         struct ixgbe_vfta *shadow_vfta =
1833                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1834         uint32_t vfta;
1835         uint32_t vid_idx;
1836         uint32_t vid_bit;
1837
1838         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1839         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1840         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1841         if (on)
1842                 vfta |= vid_bit;
1843         else
1844                 vfta &= ~vid_bit;
1845         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1846
1847         /* update local VFTA copy */
1848         shadow_vfta->vfta[vid_idx] = vfta;
1849
1850         return 0;
1851 }
1852
1853 static void
1854 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1855 {
1856         if (on)
1857                 ixgbe_vlan_hw_strip_enable(dev, queue);
1858         else
1859                 ixgbe_vlan_hw_strip_disable(dev, queue);
1860 }
1861
1862 static int
1863 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1864                     enum rte_vlan_type vlan_type,
1865                     uint16_t tpid)
1866 {
1867         struct ixgbe_hw *hw =
1868                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1869         int ret = 0;
1870         uint32_t reg;
1871         uint32_t qinq;
1872
1873         qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1874         qinq &= IXGBE_DMATXCTL_GDV;
1875
1876         switch (vlan_type) {
1877         case ETH_VLAN_TYPE_INNER:
1878                 if (qinq) {
1879                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1880                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1881                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1882                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1883                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1884                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1885                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1886                 } else {
1887                         ret = -ENOTSUP;
1888                         PMD_DRV_LOG(ERR, "Inner type is not supported"
1889                                     " by single VLAN");
1890                 }
1891                 break;
1892         case ETH_VLAN_TYPE_OUTER:
1893                 if (qinq) {
1894                         /* Only the high 16-bits is valid */
1895                         IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1896                                         IXGBE_EXVET_VET_EXT_SHIFT);
1897                 } else {
1898                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1899                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1900                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1901                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1902                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1903                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1904                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1905                 }
1906
1907                 break;
1908         default:
1909                 ret = -EINVAL;
1910                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1911                 break;
1912         }
1913
1914         return ret;
1915 }
1916
1917 void
1918 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1919 {
1920         struct ixgbe_hw *hw =
1921                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1922         uint32_t vlnctrl;
1923
1924         PMD_INIT_FUNC_TRACE();
1925
1926         /* Filter Table Disable */
1927         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1928         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1929
1930         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1931 }
1932
1933 void
1934 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1935 {
1936         struct ixgbe_hw *hw =
1937                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1938         struct ixgbe_vfta *shadow_vfta =
1939                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1940         uint32_t vlnctrl;
1941         uint16_t i;
1942
1943         PMD_INIT_FUNC_TRACE();
1944
1945         /* Filter Table Enable */
1946         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1947         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1948         vlnctrl |= IXGBE_VLNCTRL_VFE;
1949
1950         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1951
1952         /* write whatever is in local vfta copy */
1953         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1954                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1955 }
1956
1957 static void
1958 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1959 {
1960         struct ixgbe_hwstrip *hwstrip =
1961                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1962         struct ixgbe_rx_queue *rxq;
1963
1964         if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1965                 return;
1966
1967         if (on)
1968                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1969         else
1970                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1971
1972         if (queue >= dev->data->nb_rx_queues)
1973                 return;
1974
1975         rxq = dev->data->rx_queues[queue];
1976
1977         if (on)
1978                 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1979         else
1980                 rxq->vlan_flags = PKT_RX_VLAN;
1981 }
1982
1983 static void
1984 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1985 {
1986         struct ixgbe_hw *hw =
1987                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1988         uint32_t ctrl;
1989
1990         PMD_INIT_FUNC_TRACE();
1991
1992         if (hw->mac.type == ixgbe_mac_82598EB) {
1993                 /* No queue level support */
1994                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1995                 return;
1996         }
1997
1998         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1999         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2000         ctrl &= ~IXGBE_RXDCTL_VME;
2001         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2002
2003         /* record those setting for HW strip per queue */
2004         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
2005 }
2006
2007 static void
2008 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
2009 {
2010         struct ixgbe_hw *hw =
2011                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2012         uint32_t ctrl;
2013
2014         PMD_INIT_FUNC_TRACE();
2015
2016         if (hw->mac.type == ixgbe_mac_82598EB) {
2017                 /* No queue level supported */
2018                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2019                 return;
2020         }
2021
2022         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2023         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2024         ctrl |= IXGBE_RXDCTL_VME;
2025         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2026
2027         /* record those setting for HW strip per queue */
2028         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2029 }
2030
2031 void
2032 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
2033 {
2034         struct ixgbe_hw *hw =
2035                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2036         uint32_t ctrl;
2037         uint16_t i;
2038         struct ixgbe_rx_queue *rxq;
2039
2040         PMD_INIT_FUNC_TRACE();
2041
2042         if (hw->mac.type == ixgbe_mac_82598EB) {
2043                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2044                 ctrl &= ~IXGBE_VLNCTRL_VME;
2045                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2046         } else {
2047                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2048                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2049                         rxq = dev->data->rx_queues[i];
2050                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2051                         ctrl &= ~IXGBE_RXDCTL_VME;
2052                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2053
2054                         /* record those setting for HW strip per queue */
2055                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
2056                 }
2057         }
2058 }
2059
2060 void
2061 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
2062 {
2063         struct ixgbe_hw *hw =
2064                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2065         uint32_t ctrl;
2066         uint16_t i;
2067         struct ixgbe_rx_queue *rxq;
2068
2069         PMD_INIT_FUNC_TRACE();
2070
2071         if (hw->mac.type == ixgbe_mac_82598EB) {
2072                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2073                 ctrl |= IXGBE_VLNCTRL_VME;
2074                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2075         } else {
2076                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2077                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2078                         rxq = dev->data->rx_queues[i];
2079                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2080                         ctrl |= IXGBE_RXDCTL_VME;
2081                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2082
2083                         /* record those setting for HW strip per queue */
2084                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
2085                 }
2086         }
2087 }
2088
2089 static void
2090 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2091 {
2092         struct ixgbe_hw *hw =
2093                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2094         uint32_t ctrl;
2095
2096         PMD_INIT_FUNC_TRACE();
2097
2098         /* DMATXCTRL: Geric Double VLAN Disable */
2099         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2100         ctrl &= ~IXGBE_DMATXCTL_GDV;
2101         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2102
2103         /* CTRL_EXT: Global Double VLAN Disable */
2104         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2105         ctrl &= ~IXGBE_EXTENDED_VLAN;
2106         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2107
2108 }
2109
2110 static void
2111 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2112 {
2113         struct ixgbe_hw *hw =
2114                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2115         uint32_t ctrl;
2116
2117         PMD_INIT_FUNC_TRACE();
2118
2119         /* DMATXCTRL: Geric Double VLAN Enable */
2120         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2121         ctrl |= IXGBE_DMATXCTL_GDV;
2122         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2123
2124         /* CTRL_EXT: Global Double VLAN Enable */
2125         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2126         ctrl |= IXGBE_EXTENDED_VLAN;
2127         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2128
2129         /* Clear pooling mode of PFVTCTL. It's required by X550. */
2130         if (hw->mac.type == ixgbe_mac_X550 ||
2131             hw->mac.type == ixgbe_mac_X550EM_x ||
2132             hw->mac.type == ixgbe_mac_X550EM_a) {
2133                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2134                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2135                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2136         }
2137
2138         /*
2139          * VET EXT field in the EXVET register = 0x8100 by default
2140          * So no need to change. Same to VT field of DMATXCTL register
2141          */
2142 }
2143
2144 static int
2145 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2146 {
2147         if (mask & ETH_VLAN_STRIP_MASK) {
2148                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2149                         ixgbe_vlan_hw_strip_enable_all(dev);
2150                 else
2151                         ixgbe_vlan_hw_strip_disable_all(dev);
2152         }
2153
2154         if (mask & ETH_VLAN_FILTER_MASK) {
2155                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2156                         ixgbe_vlan_hw_filter_enable(dev);
2157                 else
2158                         ixgbe_vlan_hw_filter_disable(dev);
2159         }
2160
2161         if (mask & ETH_VLAN_EXTEND_MASK) {
2162                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2163                         ixgbe_vlan_hw_extend_enable(dev);
2164                 else
2165                         ixgbe_vlan_hw_extend_disable(dev);
2166         }
2167
2168         return 0;
2169 }
2170
2171 static void
2172 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2173 {
2174         struct ixgbe_hw *hw =
2175                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2176         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2177         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2178
2179         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2180         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2181 }
2182
2183 static int
2184 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2185 {
2186         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2187
2188         switch (nb_rx_q) {
2189         case 1:
2190         case 2:
2191                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2192                 break;
2193         case 4:
2194                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2195                 break;
2196         default:
2197                 return -EINVAL;
2198         }
2199
2200         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
2201                 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2202         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
2203                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2204         return 0;
2205 }
2206
2207 static int
2208 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2209 {
2210         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2211         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2212         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2213         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2214
2215         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2216                 /* check multi-queue mode */
2217                 switch (dev_conf->rxmode.mq_mode) {
2218                 case ETH_MQ_RX_VMDQ_DCB:
2219                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2220                         break;
2221                 case ETH_MQ_RX_VMDQ_DCB_RSS:
2222                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2223                         PMD_INIT_LOG(ERR, "SRIOV active,"
2224                                         " unsupported mq_mode rx %d.",
2225                                         dev_conf->rxmode.mq_mode);
2226                         return -EINVAL;
2227                 case ETH_MQ_RX_RSS:
2228                 case ETH_MQ_RX_VMDQ_RSS:
2229                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2230                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2231                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2232                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2233                                                 " invalid queue number"
2234                                                 " for VMDQ RSS, allowed"
2235                                                 " value are 1, 2 or 4.");
2236                                         return -EINVAL;
2237                                 }
2238                         break;
2239                 case ETH_MQ_RX_VMDQ_ONLY:
2240                 case ETH_MQ_RX_NONE:
2241                         /* if nothing mq mode configure, use default scheme */
2242                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2243                         break;
2244                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2245                         /* SRIOV only works in VMDq enable mode */
2246                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2247                                         " wrong mq_mode rx %d.",
2248                                         dev_conf->rxmode.mq_mode);
2249                         return -EINVAL;
2250                 }
2251
2252                 switch (dev_conf->txmode.mq_mode) {
2253                 case ETH_MQ_TX_VMDQ_DCB:
2254                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2255                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2256                         break;
2257                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2258                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2259                         break;
2260                 }
2261
2262                 /* check valid queue number */
2263                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2264                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2265                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2266                                         " nb_rx_q=%d nb_tx_q=%d queue number"
2267                                         " must be less than or equal to %d.",
2268                                         nb_rx_q, nb_tx_q,
2269                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2270                         return -EINVAL;
2271                 }
2272         } else {
2273                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2274                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2275                                           " not supported.");
2276                         return -EINVAL;
2277                 }
2278                 /* check configuration for vmdb+dcb mode */
2279                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2280                         const struct rte_eth_vmdq_dcb_conf *conf;
2281
2282                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2283                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2284                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
2285                                 return -EINVAL;
2286                         }
2287                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2288                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2289                                conf->nb_queue_pools == ETH_32_POOLS)) {
2290                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2291                                                 " nb_queue_pools must be %d or %d.",
2292                                                 ETH_16_POOLS, ETH_32_POOLS);
2293                                 return -EINVAL;
2294                         }
2295                 }
2296                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2297                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
2298
2299                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2300                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2301                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
2302                                 return -EINVAL;
2303                         }
2304                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2305                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2306                                conf->nb_queue_pools == ETH_32_POOLS)) {
2307                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2308                                                 " nb_queue_pools != %d and"
2309                                                 " nb_queue_pools != %d.",
2310                                                 ETH_16_POOLS, ETH_32_POOLS);
2311                                 return -EINVAL;
2312                         }
2313                 }
2314
2315                 /* For DCB mode check our configuration before we go further */
2316                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2317                         const struct rte_eth_dcb_rx_conf *conf;
2318
2319                         if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
2320                                 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
2321                                                  IXGBE_DCB_NB_QUEUES);
2322                                 return -EINVAL;
2323                         }
2324                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2325                         if (!(conf->nb_tcs == ETH_4_TCS ||
2326                                conf->nb_tcs == ETH_8_TCS)) {
2327                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2328                                                 " and nb_tcs != %d.",
2329                                                 ETH_4_TCS, ETH_8_TCS);
2330                                 return -EINVAL;
2331                         }
2332                 }
2333
2334                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2335                         const struct rte_eth_dcb_tx_conf *conf;
2336
2337                         if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
2338                                 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
2339                                                  IXGBE_DCB_NB_QUEUES);
2340                                 return -EINVAL;
2341                         }
2342                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2343                         if (!(conf->nb_tcs == ETH_4_TCS ||
2344                                conf->nb_tcs == ETH_8_TCS)) {
2345                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2346                                                 " and nb_tcs != %d.",
2347                                                 ETH_4_TCS, ETH_8_TCS);
2348                                 return -EINVAL;
2349                         }
2350                 }
2351
2352                 /*
2353                  * When DCB/VT is off, maximum number of queues changes,
2354                  * except for 82598EB, which remains constant.
2355                  */
2356                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2357                                 hw->mac.type != ixgbe_mac_82598EB) {
2358                         if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2359                                 PMD_INIT_LOG(ERR,
2360                                              "Neither VT nor DCB are enabled, "
2361                                              "nb_tx_q > %d.",
2362                                              IXGBE_NONE_MODE_TX_NB_QUEUES);
2363                                 return -EINVAL;
2364                         }
2365                 }
2366         }
2367         return 0;
2368 }
2369
2370 static int
2371 ixgbe_dev_configure(struct rte_eth_dev *dev)
2372 {
2373         struct ixgbe_interrupt *intr =
2374                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2375         struct ixgbe_adapter *adapter =
2376                 (struct ixgbe_adapter *)dev->data->dev_private;
2377         int ret;
2378
2379         PMD_INIT_FUNC_TRACE();
2380         /* multipe queue mode checking */
2381         ret  = ixgbe_check_mq_mode(dev);
2382         if (ret != 0) {
2383                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2384                             ret);
2385                 return ret;
2386         }
2387
2388         /* set flag to update link status after init */
2389         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2390
2391         /*
2392          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2393          * allocation or vector Rx preconditions we will reset it.
2394          */
2395         adapter->rx_bulk_alloc_allowed = true;
2396         adapter->rx_vec_allowed = true;
2397
2398         return 0;
2399 }
2400
2401 static void
2402 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2403 {
2404         struct ixgbe_hw *hw =
2405                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2406         struct ixgbe_interrupt *intr =
2407                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2408         uint32_t gpie;
2409
2410         /* only set up it on X550EM_X */
2411         if (hw->mac.type == ixgbe_mac_X550EM_x) {
2412                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2413                 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2414                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2415                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2416                         intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2417         }
2418 }
2419
2420 int
2421 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2422                         uint16_t tx_rate, uint64_t q_msk)
2423 {
2424         struct ixgbe_hw *hw;
2425         struct ixgbe_vf_info *vfinfo;
2426         struct rte_eth_link link;
2427         uint8_t  nb_q_per_pool;
2428         uint32_t queue_stride;
2429         uint32_t queue_idx, idx = 0, vf_idx;
2430         uint32_t queue_end;
2431         uint16_t total_rate = 0;
2432         struct rte_pci_device *pci_dev;
2433
2434         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2435         rte_eth_link_get_nowait(dev->data->port_id, &link);
2436
2437         if (vf >= pci_dev->max_vfs)
2438                 return -EINVAL;
2439
2440         if (tx_rate > link.link_speed)
2441                 return -EINVAL;
2442
2443         if (q_msk == 0)
2444                 return 0;
2445
2446         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2447         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2448         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2449         queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2450         queue_idx = vf * queue_stride;
2451         queue_end = queue_idx + nb_q_per_pool - 1;
2452         if (queue_end >= hw->mac.max_tx_queues)
2453                 return -EINVAL;
2454
2455         if (vfinfo) {
2456                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2457                         if (vf_idx == vf)
2458                                 continue;
2459                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2460                                 idx++)
2461                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
2462                 }
2463         } else {
2464                 return -EINVAL;
2465         }
2466
2467         /* Store tx_rate for this vf. */
2468         for (idx = 0; idx < nb_q_per_pool; idx++) {
2469                 if (((uint64_t)0x1 << idx) & q_msk) {
2470                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
2471                                 vfinfo[vf].tx_rate[idx] = tx_rate;
2472                         total_rate += tx_rate;
2473                 }
2474         }
2475
2476         if (total_rate > dev->data->dev_link.link_speed) {
2477                 /* Reset stored TX rate of the VF if it causes exceed
2478                  * link speed.
2479                  */
2480                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2481                 return -EINVAL;
2482         }
2483
2484         /* Set RTTBCNRC of each queue/pool for vf X  */
2485         for (; queue_idx <= queue_end; queue_idx++) {
2486                 if (0x1 & q_msk)
2487                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2488                 q_msk = q_msk >> 1;
2489         }
2490
2491         return 0;
2492 }
2493
2494 /*
2495  * Configure device link speed and setup link.
2496  * It returns 0 on success.
2497  */
2498 static int
2499 ixgbe_dev_start(struct rte_eth_dev *dev)
2500 {
2501         struct ixgbe_hw *hw =
2502                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2503         struct ixgbe_vf_info *vfinfo =
2504                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2505         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2506         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2507         uint32_t intr_vector = 0;
2508         int err, link_up = 0, negotiate = 0;
2509         uint32_t speed = 0;
2510         int mask = 0;
2511         int status;
2512         uint16_t vf, idx;
2513         uint32_t *link_speeds;
2514         struct ixgbe_tm_conf *tm_conf =
2515                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2516
2517         PMD_INIT_FUNC_TRACE();
2518
2519         /* IXGBE devices don't support:
2520         *    - half duplex (checked afterwards for valid speeds)
2521         *    - fixed speed: TODO implement
2522         */
2523         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2524                 PMD_INIT_LOG(ERR,
2525                 "Invalid link_speeds for port %u, fix speed not supported",
2526                                 dev->data->port_id);
2527                 return -EINVAL;
2528         }
2529
2530         /* disable uio/vfio intr/eventfd mapping */
2531         rte_intr_disable(intr_handle);
2532
2533         /* stop adapter */
2534         hw->adapter_stopped = 0;
2535         ixgbe_stop_adapter(hw);
2536
2537         /* reinitialize adapter
2538          * this calls reset and start
2539          */
2540         status = ixgbe_pf_reset_hw(hw);
2541         if (status != 0)
2542                 return -1;
2543         hw->mac.ops.start_hw(hw);
2544         hw->mac.get_link_status = true;
2545
2546         /* configure PF module if SRIOV enabled */
2547         ixgbe_pf_host_configure(dev);
2548
2549         ixgbe_dev_phy_intr_setup(dev);
2550
2551         /* check and configure queue intr-vector mapping */
2552         if ((rte_intr_cap_multiple(intr_handle) ||
2553              !RTE_ETH_DEV_SRIOV(dev).active) &&
2554             dev->data->dev_conf.intr_conf.rxq != 0) {
2555                 intr_vector = dev->data->nb_rx_queues;
2556                 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2557                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2558                                         IXGBE_MAX_INTR_QUEUE_NUM);
2559                         return -ENOTSUP;
2560                 }
2561                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2562                         return -1;
2563         }
2564
2565         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2566                 intr_handle->intr_vec =
2567                         rte_zmalloc("intr_vec",
2568                                     dev->data->nb_rx_queues * sizeof(int), 0);
2569                 if (intr_handle->intr_vec == NULL) {
2570                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2571                                      " intr_vec", dev->data->nb_rx_queues);
2572                         return -ENOMEM;
2573                 }
2574         }
2575
2576         /* confiugre msix for sleep until rx interrupt */
2577         ixgbe_configure_msix(dev);
2578
2579         /* initialize transmission unit */
2580         ixgbe_dev_tx_init(dev);
2581
2582         /* This can fail when allocating mbufs for descriptor rings */
2583         err = ixgbe_dev_rx_init(dev);
2584         if (err) {
2585                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2586                 goto error;
2587         }
2588
2589         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2590                 ETH_VLAN_EXTEND_MASK;
2591         err = ixgbe_vlan_offload_set(dev, mask);
2592         if (err) {
2593                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
2594                 goto error;
2595         }
2596
2597         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2598                 /* Enable vlan filtering for VMDq */
2599                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2600         }
2601
2602         /* Configure DCB hw */
2603         ixgbe_configure_dcb(dev);
2604
2605         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2606                 err = ixgbe_fdir_configure(dev);
2607                 if (err)
2608                         goto error;
2609         }
2610
2611         /* Restore vf rate limit */
2612         if (vfinfo != NULL) {
2613                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2614                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2615                                 if (vfinfo[vf].tx_rate[idx] != 0)
2616                                         ixgbe_set_vf_rate_limit(
2617                                                 dev, vf,
2618                                                 vfinfo[vf].tx_rate[idx],
2619                                                 1 << idx);
2620         }
2621
2622         ixgbe_restore_statistics_mapping(dev);
2623
2624         err = ixgbe_dev_rxtx_start(dev);
2625         if (err < 0) {
2626                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2627                 goto error;
2628         }
2629
2630         /* Skip link setup if loopback mode is enabled for 82599. */
2631         if (hw->mac.type == ixgbe_mac_82599EB &&
2632                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2633                 goto skip_link_setup;
2634
2635         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2636                 err = hw->mac.ops.setup_sfp(hw);
2637                 if (err)
2638                         goto error;
2639         }
2640
2641         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2642                 /* Turn on the copper */
2643                 ixgbe_set_phy_power(hw, true);
2644         } else {
2645                 /* Turn on the laser */
2646                 ixgbe_enable_tx_laser(hw);
2647         }
2648
2649         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2650         if (err)
2651                 goto error;
2652         dev->data->dev_link.link_status = link_up;
2653
2654         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2655         if (err)
2656                 goto error;
2657
2658         link_speeds = &dev->data->dev_conf.link_speeds;
2659         if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2660                         ETH_LINK_SPEED_10G)) {
2661                 PMD_INIT_LOG(ERR, "Invalid link setting");
2662                 goto error;
2663         }
2664
2665         speed = 0x0;
2666         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2667                 switch (hw->mac.type) {
2668                 case ixgbe_mac_82598EB:
2669                         speed = IXGBE_LINK_SPEED_82598_AUTONEG;
2670                         break;
2671                 case ixgbe_mac_82599EB:
2672                 case ixgbe_mac_X540:
2673                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2674                         break;
2675                 case ixgbe_mac_X550:
2676                 case ixgbe_mac_X550EM_x:
2677                 case ixgbe_mac_X550EM_a:
2678                         speed = IXGBE_LINK_SPEED_X550_AUTONEG;
2679                         break;
2680                 default:
2681                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2682                 }
2683         } else {
2684                 if (*link_speeds & ETH_LINK_SPEED_10G)
2685                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2686                 if (*link_speeds & ETH_LINK_SPEED_1G)
2687                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2688                 if (*link_speeds & ETH_LINK_SPEED_100M)
2689                         speed |= IXGBE_LINK_SPEED_100_FULL;
2690         }
2691
2692         err = ixgbe_setup_link(hw, speed, link_up);
2693         if (err)
2694                 goto error;
2695
2696 skip_link_setup:
2697
2698         if (rte_intr_allow_others(intr_handle)) {
2699                 /* check if lsc interrupt is enabled */
2700                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2701                         ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
2702                 else
2703                         ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
2704                 ixgbe_dev_macsec_interrupt_setup(dev);
2705         } else {
2706                 rte_intr_callback_unregister(intr_handle,
2707                                              ixgbe_dev_interrupt_handler, dev);
2708                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2709                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2710                                      " no intr multiplex");
2711         }
2712
2713         /* check if rxq interrupt is enabled */
2714         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2715             rte_intr_dp_is_en(intr_handle))
2716                 ixgbe_dev_rxq_interrupt_setup(dev);
2717
2718         /* enable uio/vfio intr/eventfd mapping */
2719         rte_intr_enable(intr_handle);
2720
2721         /* resume enabled intr since hw reset */
2722         ixgbe_enable_intr(dev);
2723         ixgbe_l2_tunnel_conf(dev);
2724         ixgbe_filter_restore(dev);
2725
2726         if (tm_conf->root && !tm_conf->committed)
2727                 PMD_DRV_LOG(WARNING,
2728                             "please call hierarchy_commit() "
2729                             "before starting the port");
2730
2731         return 0;
2732
2733 error:
2734         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2735         ixgbe_dev_clear_queues(dev);
2736         return -EIO;
2737 }
2738
2739 /*
2740  * Stop device: disable rx and tx functions to allow for reconfiguring.
2741  */
2742 static void
2743 ixgbe_dev_stop(struct rte_eth_dev *dev)
2744 {
2745         struct rte_eth_link link;
2746         struct ixgbe_hw *hw =
2747                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2748         struct ixgbe_vf_info *vfinfo =
2749                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2750         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2751         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2752         int vf;
2753         struct ixgbe_tm_conf *tm_conf =
2754                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2755
2756         PMD_INIT_FUNC_TRACE();
2757
2758         /* disable interrupts */
2759         ixgbe_disable_intr(hw);
2760
2761         /* reset the NIC */
2762         ixgbe_pf_reset_hw(hw);
2763         hw->adapter_stopped = 0;
2764
2765         /* stop adapter */
2766         ixgbe_stop_adapter(hw);
2767
2768         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2769                 vfinfo[vf].clear_to_send = false;
2770
2771         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2772                 /* Turn off the copper */
2773                 ixgbe_set_phy_power(hw, false);
2774         } else {
2775                 /* Turn off the laser */
2776                 ixgbe_disable_tx_laser(hw);
2777         }
2778
2779         ixgbe_dev_clear_queues(dev);
2780
2781         /* Clear stored conf */
2782         dev->data->scattered_rx = 0;
2783         dev->data->lro = 0;
2784
2785         /* Clear recorded link status */
2786         memset(&link, 0, sizeof(link));
2787         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2788
2789         if (!rte_intr_allow_others(intr_handle))
2790                 /* resume to the default handler */
2791                 rte_intr_callback_register(intr_handle,
2792                                            ixgbe_dev_interrupt_handler,
2793                                            (void *)dev);
2794
2795         /* Clean datapath event and queue/vec mapping */
2796         rte_intr_efd_disable(intr_handle);
2797         if (intr_handle->intr_vec != NULL) {
2798                 rte_free(intr_handle->intr_vec);
2799                 intr_handle->intr_vec = NULL;
2800         }
2801
2802         /* reset hierarchy commit */
2803         tm_conf->committed = false;
2804 }
2805
2806 /*
2807  * Set device link up: enable tx.
2808  */
2809 static int
2810 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2811 {
2812         struct ixgbe_hw *hw =
2813                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2814         if (hw->mac.type == ixgbe_mac_82599EB) {
2815 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2816                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2817                         /* Not suported in bypass mode */
2818                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2819                                      "by device id 0x%x", hw->device_id);
2820                         return -ENOTSUP;
2821                 }
2822 #endif
2823         }
2824
2825         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2826                 /* Turn on the copper */
2827                 ixgbe_set_phy_power(hw, true);
2828         } else {
2829                 /* Turn on the laser */
2830                 ixgbe_enable_tx_laser(hw);
2831         }
2832
2833         return 0;
2834 }
2835
2836 /*
2837  * Set device link down: disable tx.
2838  */
2839 static int
2840 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2841 {
2842         struct ixgbe_hw *hw =
2843                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2844         if (hw->mac.type == ixgbe_mac_82599EB) {
2845 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2846                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2847                         /* Not suported in bypass mode */
2848                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2849                                      "by device id 0x%x", hw->device_id);
2850                         return -ENOTSUP;
2851                 }
2852 #endif
2853         }
2854
2855         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2856                 /* Turn off the copper */
2857                 ixgbe_set_phy_power(hw, false);
2858         } else {
2859                 /* Turn off the laser */
2860                 ixgbe_disable_tx_laser(hw);
2861         }
2862
2863         return 0;
2864 }
2865
2866 /*
2867  * Reset and stop device.
2868  */
2869 static void
2870 ixgbe_dev_close(struct rte_eth_dev *dev)
2871 {
2872         struct ixgbe_hw *hw =
2873                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2874
2875         PMD_INIT_FUNC_TRACE();
2876
2877         ixgbe_pf_reset_hw(hw);
2878
2879         ixgbe_dev_stop(dev);
2880         hw->adapter_stopped = 1;
2881
2882         ixgbe_dev_free_queues(dev);
2883
2884         ixgbe_disable_pcie_master(hw);
2885
2886         /* reprogram the RAR[0] in case user changed it. */
2887         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2888 }
2889
2890 /*
2891  * Reset PF device.
2892  */
2893 static int
2894 ixgbe_dev_reset(struct rte_eth_dev *dev)
2895 {
2896         int ret;
2897
2898         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2899          * its VF to make them align with it. The detailed notification
2900          * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
2901          * To avoid unexpected behavior in VF, currently reset of PF with
2902          * SR-IOV activation is not supported. It might be supported later.
2903          */
2904         if (dev->data->sriov.active)
2905                 return -ENOTSUP;
2906
2907         ret = eth_ixgbe_dev_uninit(dev);
2908         if (ret)
2909                 return ret;
2910
2911         ret = eth_ixgbe_dev_init(dev);
2912
2913         return ret;
2914 }
2915
2916 static void
2917 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2918                            struct ixgbe_hw_stats *hw_stats,
2919                            struct ixgbe_macsec_stats *macsec_stats,
2920                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
2921                            uint64_t *total_qprc, uint64_t *total_qprdc)
2922 {
2923         uint32_t bprc, lxon, lxoff, total;
2924         uint32_t delta_gprc = 0;
2925         unsigned i;
2926         /* Workaround for RX byte count not including CRC bytes when CRC
2927          * strip is enabled. CRC bytes are removed from counters when crc_strip
2928          * is disabled.
2929          */
2930         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2931                         IXGBE_HLREG0_RXCRCSTRP);
2932
2933         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2934         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2935         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2936         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2937
2938         for (i = 0; i < 8; i++) {
2939                 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2940
2941                 /* global total per queue */
2942                 hw_stats->mpc[i] += mp;
2943                 /* Running comprehensive total for stats display */
2944                 *total_missed_rx += hw_stats->mpc[i];
2945                 if (hw->mac.type == ixgbe_mac_82598EB) {
2946                         hw_stats->rnbc[i] +=
2947                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2948                         hw_stats->pxonrxc[i] +=
2949                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2950                         hw_stats->pxoffrxc[i] +=
2951                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2952                 } else {
2953                         hw_stats->pxonrxc[i] +=
2954                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2955                         hw_stats->pxoffrxc[i] +=
2956                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2957                         hw_stats->pxon2offc[i] +=
2958                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2959                 }
2960                 hw_stats->pxontxc[i] +=
2961                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2962                 hw_stats->pxofftxc[i] +=
2963                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2964         }
2965         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2966                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2967                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2968                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2969
2970                 delta_gprc += delta_qprc;
2971
2972                 hw_stats->qprc[i] += delta_qprc;
2973                 hw_stats->qptc[i] += delta_qptc;
2974
2975                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2976                 hw_stats->qbrc[i] +=
2977                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2978                 if (crc_strip == 0)
2979                         hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2980
2981                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2982                 hw_stats->qbtc[i] +=
2983                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2984
2985                 hw_stats->qprdc[i] += delta_qprdc;
2986                 *total_qprdc += hw_stats->qprdc[i];
2987
2988                 *total_qprc += hw_stats->qprc[i];
2989                 *total_qbrc += hw_stats->qbrc[i];
2990         }
2991         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2992         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2993         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2994
2995         /*
2996          * An errata states that gprc actually counts good + missed packets:
2997          * Workaround to set gprc to summated queue packet receives
2998          */
2999         hw_stats->gprc = *total_qprc;
3000
3001         if (hw->mac.type != ixgbe_mac_82598EB) {
3002                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
3003                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3004                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
3005                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3006                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
3007                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3008                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3009                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3010         } else {
3011                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3012                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3013                 /* 82598 only has a counter in the high register */
3014                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3015                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3016                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3017         }
3018         uint64_t old_tpr = hw_stats->tpr;
3019
3020         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3021         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3022
3023         if (crc_strip == 0)
3024                 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
3025
3026         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
3027         hw_stats->gptc += delta_gptc;
3028         hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
3029         hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
3030
3031         /*
3032          * Workaround: mprc hardware is incorrectly counting
3033          * broadcasts, so for now we subtract those.
3034          */
3035         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3036         hw_stats->bprc += bprc;
3037         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3038         if (hw->mac.type == ixgbe_mac_82598EB)
3039                 hw_stats->mprc -= bprc;
3040
3041         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3042         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3043         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3044         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3045         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3046         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3047
3048         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3049         hw_stats->lxontxc += lxon;
3050         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3051         hw_stats->lxofftxc += lxoff;
3052         total = lxon + lxoff;
3053
3054         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3055         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3056         hw_stats->gptc -= total;
3057         hw_stats->mptc -= total;
3058         hw_stats->ptc64 -= total;
3059         hw_stats->gotc -= total * ETHER_MIN_LEN;
3060
3061         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3062         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3063         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3064         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3065         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3066         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3067         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3068         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3069         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3070         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3071         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3072         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3073         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3074         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3075         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3076         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3077         /* Only read FCOE on 82599 */
3078         if (hw->mac.type != ixgbe_mac_82598EB) {
3079                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3080                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3081                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3082                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3083                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3084         }
3085
3086         /* Flow Director Stats registers */
3087         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3088         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3089
3090         /* MACsec Stats registers */
3091         macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3092         macsec_stats->out_pkts_encrypted +=
3093                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3094         macsec_stats->out_pkts_protected +=
3095                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3096         macsec_stats->out_octets_encrypted +=
3097                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3098         macsec_stats->out_octets_protected +=
3099                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3100         macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3101         macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3102         macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3103         macsec_stats->in_pkts_unknownsci +=
3104                 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3105         macsec_stats->in_octets_decrypted +=
3106                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3107         macsec_stats->in_octets_validated +=
3108                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3109         macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3110         macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3111         macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3112         for (i = 0; i < 2; i++) {
3113                 macsec_stats->in_pkts_ok +=
3114                         IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3115                 macsec_stats->in_pkts_invalid +=
3116                         IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3117                 macsec_stats->in_pkts_notvalid +=
3118                         IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3119         }
3120         macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3121         macsec_stats->in_pkts_notusingsa +=
3122                 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3123 }
3124
3125 /*
3126  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3127  */
3128 static int
3129 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3130 {
3131         struct ixgbe_hw *hw =
3132                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3133         struct ixgbe_hw_stats *hw_stats =
3134                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3135         struct ixgbe_macsec_stats *macsec_stats =
3136                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3137                                 dev->data->dev_private);
3138         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3139         unsigned i;
3140
3141         total_missed_rx = 0;
3142         total_qbrc = 0;
3143         total_qprc = 0;
3144         total_qprdc = 0;
3145
3146         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3147                         &total_qbrc, &total_qprc, &total_qprdc);
3148
3149         if (stats == NULL)
3150                 return -EINVAL;
3151
3152         /* Fill out the rte_eth_stats statistics structure */
3153         stats->ipackets = total_qprc;
3154         stats->ibytes = total_qbrc;
3155         stats->opackets = hw_stats->gptc;
3156         stats->obytes = hw_stats->gotc;
3157
3158         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3159                 stats->q_ipackets[i] = hw_stats->qprc[i];
3160                 stats->q_opackets[i] = hw_stats->qptc[i];
3161                 stats->q_ibytes[i] = hw_stats->qbrc[i];
3162                 stats->q_obytes[i] = hw_stats->qbtc[i];
3163                 stats->q_errors[i] = hw_stats->qprdc[i];
3164         }
3165
3166         /* Rx Errors */
3167         stats->imissed  = total_missed_rx;
3168         stats->ierrors  = hw_stats->crcerrs +
3169                           hw_stats->mspdc +
3170                           hw_stats->rlec +
3171                           hw_stats->ruc +
3172                           hw_stats->roc +
3173                           hw_stats->illerrc +
3174                           hw_stats->errbc +
3175                           hw_stats->rfc +
3176                           hw_stats->fccrc +
3177                           hw_stats->fclast;
3178
3179         /* Tx Errors */
3180         stats->oerrors  = 0;
3181         return 0;
3182 }
3183
3184 static void
3185 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3186 {
3187         struct ixgbe_hw_stats *stats =
3188                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3189
3190         /* HW registers are cleared on read */
3191         ixgbe_dev_stats_get(dev, NULL);
3192
3193         /* Reset software totals */
3194         memset(stats, 0, sizeof(*stats));
3195 }
3196
3197 /* This function calculates the number of xstats based on the current config */
3198 static unsigned
3199 ixgbe_xstats_calc_num(void) {
3200         return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3201                 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3202                 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3203 }
3204
3205 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3206         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3207 {
3208         const unsigned cnt_stats = ixgbe_xstats_calc_num();
3209         unsigned stat, i, count;
3210
3211         if (xstats_names != NULL) {
3212                 count = 0;
3213
3214                 /* Note: limit >= cnt_stats checked upstream
3215                  * in rte_eth_xstats_names()
3216                  */
3217
3218                 /* Extended stats from ixgbe_hw_stats */
3219                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3220                         snprintf(xstats_names[count].name,
3221                                 sizeof(xstats_names[count].name),
3222                                 "%s",
3223                                 rte_ixgbe_stats_strings[i].name);
3224                         count++;
3225                 }
3226
3227                 /* MACsec Stats */
3228                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3229                         snprintf(xstats_names[count].name,
3230                                 sizeof(xstats_names[count].name),
3231                                 "%s",
3232                                 rte_ixgbe_macsec_strings[i].name);
3233                         count++;
3234                 }
3235
3236                 /* RX Priority Stats */
3237                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3238                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3239                                 snprintf(xstats_names[count].name,
3240                                         sizeof(xstats_names[count].name),
3241                                         "rx_priority%u_%s", i,
3242                                         rte_ixgbe_rxq_strings[stat].name);
3243                                 count++;
3244                         }
3245                 }
3246
3247                 /* TX Priority Stats */
3248                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3249                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3250                                 snprintf(xstats_names[count].name,
3251                                         sizeof(xstats_names[count].name),
3252                                         "tx_priority%u_%s", i,
3253                                         rte_ixgbe_txq_strings[stat].name);
3254                                 count++;
3255                         }
3256                 }
3257         }
3258         return cnt_stats;
3259 }
3260
3261 static int ixgbe_dev_xstats_get_names_by_id(
3262         struct rte_eth_dev *dev,
3263         struct rte_eth_xstat_name *xstats_names,
3264         const uint64_t *ids,
3265         unsigned int limit)
3266 {
3267         if (!ids) {
3268                 const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3269                 unsigned int stat, i, count;
3270
3271                 if (xstats_names != NULL) {
3272                         count = 0;
3273
3274                         /* Note: limit >= cnt_stats checked upstream
3275                          * in rte_eth_xstats_names()
3276                          */
3277
3278                         /* Extended stats from ixgbe_hw_stats */
3279                         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3280                                 snprintf(xstats_names[count].name,
3281                                         sizeof(xstats_names[count].name),
3282                                         "%s",
3283                                         rte_ixgbe_stats_strings[i].name);
3284                                 count++;
3285                         }
3286
3287                         /* MACsec Stats */
3288                         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3289                                 snprintf(xstats_names[count].name,
3290                                         sizeof(xstats_names[count].name),
3291                                         "%s",
3292                                         rte_ixgbe_macsec_strings[i].name);
3293                                 count++;
3294                         }
3295
3296                         /* RX Priority Stats */
3297                         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3298                                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3299                                         snprintf(xstats_names[count].name,
3300                                             sizeof(xstats_names[count].name),
3301                                             "rx_priority%u_%s", i,
3302                                             rte_ixgbe_rxq_strings[stat].name);
3303                                         count++;
3304                                 }
3305                         }
3306
3307                         /* TX Priority Stats */
3308                         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3309                                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3310                                         snprintf(xstats_names[count].name,
3311                                             sizeof(xstats_names[count].name),
3312                                             "tx_priority%u_%s", i,
3313                                             rte_ixgbe_txq_strings[stat].name);
3314                                         count++;
3315                                 }
3316                         }
3317                 }
3318                 return cnt_stats;
3319         }
3320
3321         uint16_t i;
3322         uint16_t size = ixgbe_xstats_calc_num();
3323         struct rte_eth_xstat_name xstats_names_copy[size];
3324
3325         ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3326                         size);
3327
3328         for (i = 0; i < limit; i++) {
3329                 if (ids[i] >= size) {
3330                         PMD_INIT_LOG(ERR, "id value isn't valid");
3331                         return -1;
3332                 }
3333                 strcpy(xstats_names[i].name,
3334                                 xstats_names_copy[ids[i]].name);
3335         }
3336         return limit;
3337 }
3338
3339 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3340         struct rte_eth_xstat_name *xstats_names, unsigned limit)
3341 {
3342         unsigned i;
3343
3344         if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3345                 return -ENOMEM;
3346
3347         if (xstats_names != NULL)
3348                 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3349                         snprintf(xstats_names[i].name,
3350                                 sizeof(xstats_names[i].name),
3351                                 "%s", rte_ixgbevf_stats_strings[i].name);
3352         return IXGBEVF_NB_XSTATS;
3353 }
3354
3355 static int
3356 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3357                                          unsigned n)
3358 {
3359         struct ixgbe_hw *hw =
3360                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3361         struct ixgbe_hw_stats *hw_stats =
3362                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3363         struct ixgbe_macsec_stats *macsec_stats =
3364                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3365                                 dev->data->dev_private);
3366         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3367         unsigned i, stat, count = 0;
3368
3369         count = ixgbe_xstats_calc_num();
3370
3371         if (n < count)
3372                 return count;
3373
3374         total_missed_rx = 0;
3375         total_qbrc = 0;
3376         total_qprc = 0;
3377         total_qprdc = 0;
3378
3379         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3380                         &total_qbrc, &total_qprc, &total_qprdc);
3381
3382         /* If this is a reset xstats is NULL, and we have cleared the
3383          * registers by reading them.
3384          */
3385         if (!xstats)
3386                 return 0;
3387
3388         /* Extended stats from ixgbe_hw_stats */
3389         count = 0;
3390         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3391                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3392                                 rte_ixgbe_stats_strings[i].offset);
3393                 xstats[count].id = count;
3394                 count++;
3395         }
3396
3397         /* MACsec Stats */
3398         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3399                 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3400                                 rte_ixgbe_macsec_strings[i].offset);
3401                 xstats[count].id = count;
3402                 count++;
3403         }
3404
3405         /* RX Priority Stats */
3406         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3407                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3408                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3409                                         rte_ixgbe_rxq_strings[stat].offset +
3410                                         (sizeof(uint64_t) * i));
3411                         xstats[count].id = count;
3412                         count++;
3413                 }
3414         }
3415
3416         /* TX Priority Stats */
3417         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3418                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3419                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3420                                         rte_ixgbe_txq_strings[stat].offset +
3421                                         (sizeof(uint64_t) * i));
3422                         xstats[count].id = count;
3423                         count++;
3424                 }
3425         }
3426         return count;
3427 }
3428
3429 static int
3430 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3431                 uint64_t *values, unsigned int n)
3432 {
3433         if (!ids) {
3434                 struct ixgbe_hw *hw =
3435                                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3436                 struct ixgbe_hw_stats *hw_stats =
3437                                 IXGBE_DEV_PRIVATE_TO_STATS(
3438                                                 dev->data->dev_private);
3439                 struct ixgbe_macsec_stats *macsec_stats =
3440                                 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3441                                         dev->data->dev_private);
3442                 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3443                 unsigned int i, stat, count = 0;
3444
3445                 count = ixgbe_xstats_calc_num();
3446
3447                 if (!ids && n < count)
3448                         return count;
3449
3450                 total_missed_rx = 0;
3451                 total_qbrc = 0;
3452                 total_qprc = 0;
3453                 total_qprdc = 0;
3454
3455                 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3456                                 &total_missed_rx, &total_qbrc, &total_qprc,
3457                                 &total_qprdc);
3458
3459                 /* If this is a reset xstats is NULL, and we have cleared the
3460                  * registers by reading them.
3461                  */
3462                 if (!ids && !values)
3463                         return 0;
3464
3465                 /* Extended stats from ixgbe_hw_stats */
3466                 count = 0;
3467                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3468                         values[count] = *(uint64_t *)(((char *)hw_stats) +
3469                                         rte_ixgbe_stats_strings[i].offset);
3470                         count++;
3471                 }
3472
3473                 /* MACsec Stats */
3474                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3475                         values[count] = *(uint64_t *)(((char *)macsec_stats) +
3476                                         rte_ixgbe_macsec_strings[i].offset);
3477                         count++;
3478                 }
3479
3480                 /* RX Priority Stats */
3481                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3482                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3483                                 values[count] =
3484                                         *(uint64_t *)(((char *)hw_stats) +
3485                                         rte_ixgbe_rxq_strings[stat].offset +
3486                                         (sizeof(uint64_t) * i));
3487                                 count++;
3488                         }
3489                 }
3490
3491                 /* TX Priority Stats */
3492                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3493                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3494                                 values[count] =
3495                                         *(uint64_t *)(((char *)hw_stats) +
3496                                         rte_ixgbe_txq_strings[stat].offset +
3497                                         (sizeof(uint64_t) * i));
3498                                 count++;
3499                         }
3500                 }
3501                 return count;
3502         }
3503
3504         uint16_t i;
3505         uint16_t size = ixgbe_xstats_calc_num();
3506         uint64_t values_copy[size];
3507
3508         ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3509
3510         for (i = 0; i < n; i++) {
3511                 if (ids[i] >= size) {
3512                         PMD_INIT_LOG(ERR, "id value isn't valid");
3513                         return -1;
3514                 }
3515                 values[i] = values_copy[ids[i]];
3516         }
3517         return n;
3518 }
3519
3520 static void
3521 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3522 {
3523         struct ixgbe_hw_stats *stats =
3524                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3525         struct ixgbe_macsec_stats *macsec_stats =
3526                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3527                                 dev->data->dev_private);
3528
3529         unsigned count = ixgbe_xstats_calc_num();
3530
3531         /* HW registers are cleared on read */
3532         ixgbe_dev_xstats_get(dev, NULL, count);
3533
3534         /* Reset software totals */
3535         memset(stats, 0, sizeof(*stats));
3536         memset(macsec_stats, 0, sizeof(*macsec_stats));
3537 }
3538
3539 static void
3540 ixgbevf_update_stats(struct rte_eth_dev *dev)
3541 {
3542         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3543         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3544                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3545
3546         /* Good Rx packet, include VF loopback */
3547         UPDATE_VF_STAT(IXGBE_VFGPRC,
3548             hw_stats->last_vfgprc, hw_stats->vfgprc);
3549
3550         /* Good Rx octets, include VF loopback */
3551         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3552             hw_stats->last_vfgorc, hw_stats->vfgorc);
3553
3554         /* Good Tx packet, include VF loopback */
3555         UPDATE_VF_STAT(IXGBE_VFGPTC,
3556             hw_stats->last_vfgptc, hw_stats->vfgptc);
3557
3558         /* Good Tx octets, include VF loopback */
3559         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3560             hw_stats->last_vfgotc, hw_stats->vfgotc);
3561
3562         /* Rx Multicst Packet */
3563         UPDATE_VF_STAT(IXGBE_VFMPRC,
3564             hw_stats->last_vfmprc, hw_stats->vfmprc);
3565 }
3566
3567 static int
3568 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3569                        unsigned n)
3570 {
3571         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3572                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3573         unsigned i;
3574
3575         if (n < IXGBEVF_NB_XSTATS)
3576                 return IXGBEVF_NB_XSTATS;
3577
3578         ixgbevf_update_stats(dev);
3579
3580         if (!xstats)
3581                 return 0;
3582
3583         /* Extended stats */
3584         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3585                 xstats[i].id = i;
3586                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3587                         rte_ixgbevf_stats_strings[i].offset);
3588         }
3589
3590         return IXGBEVF_NB_XSTATS;
3591 }
3592
3593 static int
3594 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3595 {
3596         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3597                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3598
3599         ixgbevf_update_stats(dev);
3600
3601         if (stats == NULL)
3602                 return -EINVAL;
3603
3604         stats->ipackets = hw_stats->vfgprc;
3605         stats->ibytes = hw_stats->vfgorc;
3606         stats->opackets = hw_stats->vfgptc;
3607         stats->obytes = hw_stats->vfgotc;
3608         return 0;
3609 }
3610
3611 static void
3612 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3613 {
3614         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3615                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3616
3617         /* Sync HW register to the last stats */
3618         ixgbevf_dev_stats_get(dev, NULL);
3619
3620         /* reset HW current stats*/
3621         hw_stats->vfgprc = 0;
3622         hw_stats->vfgorc = 0;
3623         hw_stats->vfgptc = 0;
3624         hw_stats->vfgotc = 0;
3625 }
3626
3627 static int
3628 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3629 {
3630         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3631         u16 eeprom_verh, eeprom_verl;
3632         u32 etrack_id;
3633         int ret;
3634
3635         ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3636         ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3637
3638         etrack_id = (eeprom_verh << 16) | eeprom_verl;
3639         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3640
3641         ret += 1; /* add the size of '\0' */
3642         if (fw_size < (u32)ret)
3643                 return ret;
3644         else
3645                 return 0;
3646 }
3647
3648 static void
3649 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3650 {
3651         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3652         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3653         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3654
3655         dev_info->pci_dev = pci_dev;
3656         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3657         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3658         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3659                 /*
3660                  * When DCB/VT is off, maximum number of queues changes,
3661                  * except for 82598EB, which remains constant.
3662                  */
3663                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3664                                 hw->mac.type != ixgbe_mac_82598EB)
3665                         dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3666         }
3667         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3668         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3669         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3670         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3671         dev_info->max_vfs = pci_dev->max_vfs;
3672         if (hw->mac.type == ixgbe_mac_82598EB)
3673                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3674         else
3675                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3676         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3677         dev_info->rx_offload_capa =
3678                 DEV_RX_OFFLOAD_VLAN_STRIP |
3679                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3680                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3681                 DEV_RX_OFFLOAD_TCP_CKSUM;
3682
3683         /*
3684          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
3685          * mode.
3686          */
3687         if ((hw->mac.type == ixgbe_mac_82599EB ||
3688              hw->mac.type == ixgbe_mac_X540) &&
3689             !RTE_ETH_DEV_SRIOV(dev).active)
3690                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
3691
3692         if (hw->mac.type == ixgbe_mac_82599EB ||
3693             hw->mac.type == ixgbe_mac_X540)
3694                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
3695
3696         if (hw->mac.type == ixgbe_mac_X550 ||
3697             hw->mac.type == ixgbe_mac_X550EM_x ||
3698             hw->mac.type == ixgbe_mac_X550EM_a)
3699                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
3700
3701         dev_info->tx_offload_capa =
3702                 DEV_TX_OFFLOAD_VLAN_INSERT |
3703                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3704                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3705                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3706                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3707                 DEV_TX_OFFLOAD_TCP_TSO;
3708
3709         if (hw->mac.type == ixgbe_mac_82599EB ||
3710             hw->mac.type == ixgbe_mac_X540)
3711                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
3712
3713         if (hw->mac.type == ixgbe_mac_X550 ||
3714             hw->mac.type == ixgbe_mac_X550EM_x ||
3715             hw->mac.type == ixgbe_mac_X550EM_a)
3716                 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
3717
3718 #ifdef RTE_LIBRTE_SECURITY
3719         dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
3720         dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
3721 #endif
3722
3723         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3724                 .rx_thresh = {
3725                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3726                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3727                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3728                 },
3729                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3730                 .rx_drop_en = 0,
3731         };
3732
3733         dev_info->default_txconf = (struct rte_eth_txconf) {
3734                 .tx_thresh = {
3735                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3736                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3737                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3738                 },
3739                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3740                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3741                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3742                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3743         };
3744
3745         dev_info->rx_desc_lim = rx_desc_lim;
3746         dev_info->tx_desc_lim = tx_desc_lim;
3747
3748         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3749         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3750         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3751
3752         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3753         if (hw->mac.type == ixgbe_mac_X540 ||
3754             hw->mac.type == ixgbe_mac_X540_vf ||
3755             hw->mac.type == ixgbe_mac_X550 ||
3756             hw->mac.type == ixgbe_mac_X550_vf) {
3757                 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3758         }
3759         if (hw->mac.type == ixgbe_mac_X550) {
3760                 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
3761                 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
3762         }
3763 }
3764
3765 static const uint32_t *
3766 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3767 {
3768         static const uint32_t ptypes[] = {
3769                 /* For non-vec functions,
3770                  * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3771                  * for vec functions,
3772                  * refers to _recv_raw_pkts_vec().
3773                  */
3774                 RTE_PTYPE_L2_ETHER,
3775                 RTE_PTYPE_L3_IPV4,
3776                 RTE_PTYPE_L3_IPV4_EXT,
3777                 RTE_PTYPE_L3_IPV6,
3778                 RTE_PTYPE_L3_IPV6_EXT,
3779                 RTE_PTYPE_L4_SCTP,
3780                 RTE_PTYPE_L4_TCP,
3781                 RTE_PTYPE_L4_UDP,
3782                 RTE_PTYPE_TUNNEL_IP,
3783                 RTE_PTYPE_INNER_L3_IPV6,
3784                 RTE_PTYPE_INNER_L3_IPV6_EXT,
3785                 RTE_PTYPE_INNER_L4_TCP,
3786                 RTE_PTYPE_INNER_L4_UDP,
3787                 RTE_PTYPE_UNKNOWN
3788         };
3789
3790         if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3791             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3792             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3793             dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3794                 return ptypes;
3795
3796 #if defined(RTE_ARCH_X86)
3797         if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3798             dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3799                 return ptypes;
3800 #endif
3801         return NULL;
3802 }
3803
3804 static void
3805 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3806                      struct rte_eth_dev_info *dev_info)
3807 {
3808         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3809         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3810
3811         dev_info->pci_dev = pci_dev;
3812         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3813         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3814         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3815         dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3816         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3817         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3818         dev_info->max_vfs = pci_dev->max_vfs;
3819         if (hw->mac.type == ixgbe_mac_82598EB)
3820                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3821         else
3822                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3823         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
3824                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3825                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
3826                                 DEV_RX_OFFLOAD_TCP_CKSUM;
3827         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
3828                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
3829                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
3830                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
3831                                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
3832                                 DEV_TX_OFFLOAD_TCP_TSO;
3833
3834         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3835                 .rx_thresh = {
3836                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3837                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3838                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3839                 },
3840                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3841                 .rx_drop_en = 0,
3842         };
3843
3844         dev_info->default_txconf = (struct rte_eth_txconf) {
3845                 .tx_thresh = {
3846                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3847                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3848                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3849                 },
3850                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3851                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3852                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3853                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3854         };
3855
3856         dev_info->rx_desc_lim = rx_desc_lim;
3857         dev_info->tx_desc_lim = tx_desc_lim;
3858 }
3859
3860 static int
3861 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3862                    int *link_up, int wait_to_complete)
3863 {
3864         /**
3865          * for a quick link status checking, wait_to_compelet == 0,
3866          * skip PF link status checking
3867          */
3868         bool no_pflink_check = wait_to_complete == 0;
3869         struct ixgbe_mbx_info *mbx = &hw->mbx;
3870         struct ixgbe_mac_info *mac = &hw->mac;
3871         uint32_t links_reg, in_msg;
3872         int ret_val = 0;
3873
3874         /* If we were hit with a reset drop the link */
3875         if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
3876                 mac->get_link_status = true;
3877
3878         if (!mac->get_link_status)
3879                 goto out;
3880
3881         /* if link status is down no point in checking to see if pf is up */
3882         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3883         if (!(links_reg & IXGBE_LINKS_UP))
3884                 goto out;
3885
3886         /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
3887          * before the link status is correct
3888          */
3889         if (mac->type == ixgbe_mac_82599_vf) {
3890                 int i;
3891
3892                 for (i = 0; i < 5; i++) {
3893                         rte_delay_us(100);
3894                         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3895
3896                         if (!(links_reg & IXGBE_LINKS_UP))
3897                                 goto out;
3898                 }
3899         }
3900
3901         switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3902         case IXGBE_LINKS_SPEED_10G_82599:
3903                 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3904                 if (hw->mac.type >= ixgbe_mac_X550) {
3905                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3906                                 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3907                 }
3908                 break;
3909         case IXGBE_LINKS_SPEED_1G_82599:
3910                 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3911                 break;
3912         case IXGBE_LINKS_SPEED_100_82599:
3913                 *speed = IXGBE_LINK_SPEED_100_FULL;
3914                 if (hw->mac.type == ixgbe_mac_X550) {
3915                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3916                                 *speed = IXGBE_LINK_SPEED_5GB_FULL;
3917                 }
3918                 break;
3919         case IXGBE_LINKS_SPEED_10_X550EM_A:
3920                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3921                 /* Since Reserved in older MAC's */
3922                 if (hw->mac.type >= ixgbe_mac_X550)
3923                         *speed = IXGBE_LINK_SPEED_10_FULL;
3924                 break;
3925         default:
3926                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3927         }
3928
3929         if (no_pflink_check) {
3930                 if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
3931                         mac->get_link_status = true;
3932                 else
3933                         mac->get_link_status = false;
3934
3935                 goto out;
3936         }
3937         /* if the read failed it could just be a mailbox collision, best wait
3938          * until we are called again and don't report an error
3939          */
3940         if (mbx->ops.read(hw, &in_msg, 1, 0))
3941                 goto out;
3942
3943         if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
3944                 /* msg is not CTS and is NACK we must have lost CTS status */
3945                 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
3946                         ret_val = -1;
3947                 goto out;
3948         }
3949
3950         /* the pf is talking, if we timed out in the past we reinit */
3951         if (!mbx->timeout) {
3952                 ret_val = -1;
3953                 goto out;
3954         }
3955
3956         /* if we passed all the tests above then the link is up and we no
3957          * longer need to check for link
3958          */
3959         mac->get_link_status = false;
3960
3961 out:
3962         *link_up = !mac->get_link_status;
3963         return ret_val;
3964 }
3965
3966 /* return 0 means link status changed, -1 means not changed */
3967 static int
3968 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
3969                             int wait_to_complete, int vf)
3970 {
3971         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3972         struct rte_eth_link link, old;
3973         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3974         struct ixgbe_interrupt *intr =
3975                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3976         int link_up;
3977         int diag;
3978         u32 speed = 0;
3979         int wait = 1;
3980         bool autoneg = false;
3981
3982         link.link_status = ETH_LINK_DOWN;
3983         link.link_speed = 0;
3984         link.link_duplex = ETH_LINK_HALF_DUPLEX;
3985         link.link_autoneg = ETH_LINK_AUTONEG;
3986         memset(&old, 0, sizeof(old));
3987         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
3988
3989         hw->mac.get_link_status = true;
3990
3991         if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
3992                 ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
3993                 speed = hw->phy.autoneg_advertised;
3994                 if (!speed)
3995                         ixgbe_get_link_capabilities(hw, &speed, &autoneg);
3996                 ixgbe_setup_link(hw, speed, true);
3997         }
3998
3999         /* check if it needs to wait to complete, if lsc interrupt is enabled */
4000         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
4001                 wait = 0;
4002
4003         if (vf)
4004                 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
4005         else
4006                 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
4007
4008         if (diag != 0) {
4009                 link.link_speed = ETH_SPEED_NUM_100M;
4010                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
4011                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
4012                 if (link.link_status == old.link_status)
4013                         return -1;
4014                 return 0;
4015         }
4016
4017         if (link_up == 0) {
4018                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
4019                 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
4020                 if (link.link_status == old.link_status)
4021                         return -1;
4022                 return 0;
4023         }
4024         intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4025         link.link_status = ETH_LINK_UP;
4026         link.link_duplex = ETH_LINK_FULL_DUPLEX;
4027
4028         switch (link_speed) {
4029         default:
4030         case IXGBE_LINK_SPEED_UNKNOWN:
4031                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
4032                 link.link_speed = ETH_SPEED_NUM_100M;
4033                 break;
4034
4035         case IXGBE_LINK_SPEED_100_FULL:
4036                 link.link_speed = ETH_SPEED_NUM_100M;
4037                 break;
4038
4039         case IXGBE_LINK_SPEED_1GB_FULL:
4040                 link.link_speed = ETH_SPEED_NUM_1G;
4041                 break;
4042
4043         case IXGBE_LINK_SPEED_2_5GB_FULL:
4044                 link.link_speed = ETH_SPEED_NUM_2_5G;
4045                 break;
4046
4047         case IXGBE_LINK_SPEED_5GB_FULL:
4048                 link.link_speed = ETH_SPEED_NUM_5G;
4049                 break;
4050
4051         case IXGBE_LINK_SPEED_10GB_FULL:
4052                 link.link_speed = ETH_SPEED_NUM_10G;
4053                 break;
4054         }
4055         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
4056
4057         if (link.link_status == old.link_status)
4058                 return -1;
4059
4060         return 0;
4061 }
4062
4063 static int
4064 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4065 {
4066         return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
4067 }
4068
4069 static int
4070 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4071 {
4072         return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
4073 }
4074
4075 static void
4076 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
4077 {
4078         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4079         uint32_t fctrl;
4080
4081         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4082         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4083         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4084 }
4085
4086 static void
4087 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
4088 {
4089         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4090         uint32_t fctrl;
4091
4092         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4093         fctrl &= (~IXGBE_FCTRL_UPE);
4094         if (dev->data->all_multicast == 1)
4095                 fctrl |= IXGBE_FCTRL_MPE;
4096         else
4097                 fctrl &= (~IXGBE_FCTRL_MPE);
4098         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4099 }
4100
4101 static void
4102 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4103 {
4104         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4105         uint32_t fctrl;
4106
4107         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4108         fctrl |= IXGBE_FCTRL_MPE;
4109         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4110 }
4111
4112 static void
4113 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4114 {
4115         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4116         uint32_t fctrl;
4117
4118         if (dev->data->promiscuous == 1)
4119                 return; /* must remain in all_multicast mode */
4120
4121         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4122         fctrl &= (~IXGBE_FCTRL_MPE);
4123         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4124 }
4125
4126 /**
4127  * It clears the interrupt causes and enables the interrupt.
4128  * It will be called once only during nic initialized.
4129  *
4130  * @param dev
4131  *  Pointer to struct rte_eth_dev.
4132  * @param on
4133  *  Enable or Disable.
4134  *
4135  * @return
4136  *  - On success, zero.
4137  *  - On failure, a negative value.
4138  */
4139 static int
4140 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
4141 {
4142         struct ixgbe_interrupt *intr =
4143                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4144
4145         ixgbe_dev_link_status_print(dev);
4146         if (on)
4147                 intr->mask |= IXGBE_EICR_LSC;
4148         else
4149                 intr->mask &= ~IXGBE_EICR_LSC;
4150
4151         return 0;
4152 }
4153
4154 /**
4155  * It clears the interrupt causes and enables the interrupt.
4156  * It will be called once only during nic initialized.
4157  *
4158  * @param dev
4159  *  Pointer to struct rte_eth_dev.
4160  *
4161  * @return
4162  *  - On success, zero.
4163  *  - On failure, a negative value.
4164  */
4165 static int
4166 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4167 {
4168         struct ixgbe_interrupt *intr =
4169                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4170
4171         intr->mask |= IXGBE_EICR_RTX_QUEUE;
4172
4173         return 0;
4174 }
4175
4176 /**
4177  * It clears the interrupt causes and enables the interrupt.
4178  * It will be called once only during nic initialized.
4179  *
4180  * @param dev
4181  *  Pointer to struct rte_eth_dev.
4182  *
4183  * @return
4184  *  - On success, zero.
4185  *  - On failure, a negative value.
4186  */
4187 static int
4188 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4189 {
4190         struct ixgbe_interrupt *intr =
4191                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4192
4193         intr->mask |= IXGBE_EICR_LINKSEC;
4194
4195         return 0;
4196 }
4197
4198 /*
4199  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4200  *
4201  * @param dev
4202  *  Pointer to struct rte_eth_dev.
4203  *
4204  * @return
4205  *  - On success, zero.
4206  *  - On failure, a negative value.
4207  */
4208 static int
4209 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4210 {
4211         uint32_t eicr;
4212         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4213         struct ixgbe_interrupt *intr =
4214                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4215
4216         /* clear all cause mask */
4217         ixgbe_disable_intr(hw);
4218
4219         /* read-on-clear nic registers here */
4220         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4221         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4222
4223         intr->flags = 0;
4224
4225         /* set flag for async link update */
4226         if (eicr & IXGBE_EICR_LSC)
4227                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4228
4229         if (eicr & IXGBE_EICR_MAILBOX)
4230                 intr->flags |= IXGBE_FLAG_MAILBOX;
4231
4232         if (eicr & IXGBE_EICR_LINKSEC)
4233                 intr->flags |= IXGBE_FLAG_MACSEC;
4234
4235         if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4236             hw->phy.type == ixgbe_phy_x550em_ext_t &&
4237             (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4238                 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4239
4240         return 0;
4241 }
4242
4243 /**
4244  * It gets and then prints the link status.
4245  *
4246  * @param dev
4247  *  Pointer to struct rte_eth_dev.
4248  *
4249  * @return
4250  *  - On success, zero.
4251  *  - On failure, a negative value.
4252  */
4253 static void
4254 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4255 {
4256         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4257         struct rte_eth_link link;
4258
4259         memset(&link, 0, sizeof(link));
4260         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4261         if (link.link_status) {
4262                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4263                                         (int)(dev->data->port_id),
4264                                         (unsigned)link.link_speed,
4265                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4266                                         "full-duplex" : "half-duplex");
4267         } else {
4268                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
4269                                 (int)(dev->data->port_id));
4270         }
4271         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4272                                 pci_dev->addr.domain,
4273                                 pci_dev->addr.bus,
4274                                 pci_dev->addr.devid,
4275                                 pci_dev->addr.function);
4276 }
4277
4278 /*
4279  * It executes link_update after knowing an interrupt occurred.
4280  *
4281  * @param dev
4282  *  Pointer to struct rte_eth_dev.
4283  *
4284  * @return
4285  *  - On success, zero.
4286  *  - On failure, a negative value.
4287  */
4288 static int
4289 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
4290                            struct rte_intr_handle *intr_handle)
4291 {
4292         struct ixgbe_interrupt *intr =
4293                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4294         int64_t timeout;
4295         struct rte_eth_link link;
4296         struct ixgbe_hw *hw =
4297                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4298
4299         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4300
4301         if (intr->flags & IXGBE_FLAG_MAILBOX) {
4302                 ixgbe_pf_mbx_process(dev);
4303                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
4304         }
4305
4306         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4307                 ixgbe_handle_lasi(hw);
4308                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4309         }
4310
4311         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4312                 /* get the link status before link update, for predicting later */
4313                 memset(&link, 0, sizeof(link));
4314                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4315
4316                 ixgbe_dev_link_update(dev, 0);
4317
4318                 /* likely to up */
4319                 if (!link.link_status)
4320                         /* handle it 1 sec later, wait it being stable */
4321                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4322                 /* likely to down */
4323                 else
4324                         /* handle it 4 sec later, wait it being stable */
4325                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4326
4327                 ixgbe_dev_link_status_print(dev);
4328                 if (rte_eal_alarm_set(timeout * 1000,
4329                                       ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4330                         PMD_DRV_LOG(ERR, "Error setting alarm");
4331                 else {
4332                         /* remember original mask */
4333                         intr->mask_original = intr->mask;
4334                         /* only disable lsc interrupt */
4335                         intr->mask &= ~IXGBE_EIMS_LSC;
4336                 }
4337         }
4338
4339         PMD_DRV_LOG(DEBUG, "enable intr immediately");
4340         ixgbe_enable_intr(dev);
4341         rte_intr_enable(intr_handle);
4342
4343         return 0;
4344 }
4345
4346 /**
4347  * Interrupt handler which shall be registered for alarm callback for delayed
4348  * handling specific interrupt to wait for the stable nic state. As the
4349  * NIC interrupt state is not stable for ixgbe after link is just down,
4350  * it needs to wait 4 seconds to get the stable status.
4351  *
4352  * @param handle
4353  *  Pointer to interrupt handle.
4354  * @param param
4355  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4356  *
4357  * @return
4358  *  void
4359  */
4360 static void
4361 ixgbe_dev_interrupt_delayed_handler(void *param)
4362 {
4363         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4364         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4365         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4366         struct ixgbe_interrupt *intr =
4367                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4368         struct ixgbe_hw *hw =
4369                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4370         uint32_t eicr;
4371
4372         ixgbe_disable_intr(hw);
4373
4374         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4375         if (eicr & IXGBE_EICR_MAILBOX)
4376                 ixgbe_pf_mbx_process(dev);
4377
4378         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4379                 ixgbe_handle_lasi(hw);
4380                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4381         }
4382
4383         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4384                 ixgbe_dev_link_update(dev, 0);
4385                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4386                 ixgbe_dev_link_status_print(dev);
4387                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4388                                               NULL, NULL);
4389         }
4390
4391         if (intr->flags & IXGBE_FLAG_MACSEC) {
4392                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4393                                               NULL, NULL);
4394                 intr->flags &= ~IXGBE_FLAG_MACSEC;
4395         }
4396
4397         /* restore original mask */
4398         intr->mask = intr->mask_original;
4399         intr->mask_original = 0;
4400
4401         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4402         ixgbe_enable_intr(dev);
4403         rte_intr_enable(intr_handle);
4404 }
4405
4406 /**
4407  * Interrupt handler triggered by NIC  for handling
4408  * specific interrupt.
4409  *
4410  * @param handle
4411  *  Pointer to interrupt handle.
4412  * @param param
4413  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4414  *
4415  * @return
4416  *  void
4417  */
4418 static void
4419 ixgbe_dev_interrupt_handler(void *param)
4420 {
4421         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4422
4423         ixgbe_dev_interrupt_get_status(dev);
4424         ixgbe_dev_interrupt_action(dev, dev->intr_handle);
4425 }
4426
4427 static int
4428 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4429 {
4430         struct ixgbe_hw *hw;
4431
4432         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4433         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4434 }
4435
4436 static int
4437 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4438 {
4439         struct ixgbe_hw *hw;
4440
4441         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4442         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4443 }
4444
4445 static int
4446 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4447 {
4448         struct ixgbe_hw *hw;
4449         uint32_t mflcn_reg;
4450         uint32_t fccfg_reg;
4451         int rx_pause;
4452         int tx_pause;
4453
4454         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4455
4456         fc_conf->pause_time = hw->fc.pause_time;
4457         fc_conf->high_water = hw->fc.high_water[0];
4458         fc_conf->low_water = hw->fc.low_water[0];
4459         fc_conf->send_xon = hw->fc.send_xon;
4460         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4461
4462         /*
4463          * Return rx_pause status according to actual setting of
4464          * MFLCN register.
4465          */
4466         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4467         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4468                 rx_pause = 1;
4469         else
4470                 rx_pause = 0;
4471
4472         /*
4473          * Return tx_pause status according to actual setting of
4474          * FCCFG register.
4475          */
4476         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4477         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4478                 tx_pause = 1;
4479         else
4480                 tx_pause = 0;
4481
4482         if (rx_pause && tx_pause)
4483                 fc_conf->mode = RTE_FC_FULL;
4484         else if (rx_pause)
4485                 fc_conf->mode = RTE_FC_RX_PAUSE;
4486         else if (tx_pause)
4487                 fc_conf->mode = RTE_FC_TX_PAUSE;
4488         else
4489                 fc_conf->mode = RTE_FC_NONE;
4490
4491         return 0;
4492 }
4493
4494 static int
4495 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4496 {
4497         struct ixgbe_hw *hw;
4498         int err;
4499         uint32_t rx_buf_size;
4500         uint32_t max_high_water;
4501         uint32_t mflcn;
4502         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4503                 ixgbe_fc_none,
4504                 ixgbe_fc_rx_pause,
4505                 ixgbe_fc_tx_pause,
4506                 ixgbe_fc_full
4507         };
4508
4509         PMD_INIT_FUNC_TRACE();
4510
4511         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4512         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4513         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4514
4515         /*
4516          * At least reserve one Ethernet frame for watermark
4517          * high_water/low_water in kilo bytes for ixgbe
4518          */
4519         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4520         if ((fc_conf->high_water > max_high_water) ||
4521                 (fc_conf->high_water < fc_conf->low_water)) {
4522                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4523                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4524                 return -EINVAL;
4525         }
4526
4527         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4528         hw->fc.pause_time     = fc_conf->pause_time;
4529         hw->fc.high_water[0]  = fc_conf->high_water;
4530         hw->fc.low_water[0]   = fc_conf->low_water;
4531         hw->fc.send_xon       = fc_conf->send_xon;
4532         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4533
4534         err = ixgbe_fc_enable(hw);
4535
4536         /* Not negotiated is not an error case */
4537         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
4538
4539                 /* check if we want to forward MAC frames - driver doesn't have native
4540                  * capability to do that, so we'll write the registers ourselves */
4541
4542                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4543
4544                 /* set or clear MFLCN.PMCF bit depending on configuration */
4545                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4546                         mflcn |= IXGBE_MFLCN_PMCF;
4547                 else
4548                         mflcn &= ~IXGBE_MFLCN_PMCF;
4549
4550                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
4551                 IXGBE_WRITE_FLUSH(hw);
4552
4553                 return 0;
4554         }
4555
4556         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
4557         return -EIO;
4558 }
4559
4560 /**
4561  *  ixgbe_pfc_enable_generic - Enable flow control
4562  *  @hw: pointer to hardware structure
4563  *  @tc_num: traffic class number
4564  *  Enable flow control according to the current settings.
4565  */
4566 static int
4567 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4568 {
4569         int ret_val = 0;
4570         uint32_t mflcn_reg, fccfg_reg;
4571         uint32_t reg;
4572         uint32_t fcrtl, fcrth;
4573         uint8_t i;
4574         uint8_t nb_rx_en;
4575
4576         /* Validate the water mark configuration */
4577         if (!hw->fc.pause_time) {
4578                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4579                 goto out;
4580         }
4581
4582         /* Low water mark of zero causes XOFF floods */
4583         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4584                  /* High/Low water can not be 0 */
4585                 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4586                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4587                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4588                         goto out;
4589                 }
4590
4591                 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4592                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4593                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4594                         goto out;
4595                 }
4596         }
4597         /* Negotiate the fc mode to use */
4598         ixgbe_fc_autoneg(hw);
4599
4600         /* Disable any previous flow control settings */
4601         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4602         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4603
4604         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4605         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4606
4607         switch (hw->fc.current_mode) {
4608         case ixgbe_fc_none:
4609                 /*
4610                  * If the count of enabled RX Priority Flow control >1,
4611                  * and the TX pause can not be disabled
4612                  */
4613                 nb_rx_en = 0;
4614                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4615                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4616                         if (reg & IXGBE_FCRTH_FCEN)
4617                                 nb_rx_en++;
4618                 }
4619                 if (nb_rx_en > 1)
4620                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4621                 break;
4622         case ixgbe_fc_rx_pause:
4623                 /*
4624                  * Rx Flow control is enabled and Tx Flow control is
4625                  * disabled by software override. Since there really
4626                  * isn't a way to advertise that we are capable of RX
4627                  * Pause ONLY, we will advertise that we support both
4628                  * symmetric and asymmetric Rx PAUSE.  Later, we will
4629                  * disable the adapter's ability to send PAUSE frames.
4630                  */
4631                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4632                 /*
4633                  * If the count of enabled RX Priority Flow control >1,
4634                  * and the TX pause can not be disabled
4635                  */
4636                 nb_rx_en = 0;
4637                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4638                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4639                         if (reg & IXGBE_FCRTH_FCEN)
4640                                 nb_rx_en++;
4641                 }
4642                 if (nb_rx_en > 1)
4643                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4644                 break;
4645         case ixgbe_fc_tx_pause:
4646                 /*
4647                  * Tx Flow control is enabled, and Rx Flow control is
4648                  * disabled by software override.
4649                  */
4650                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4651                 break;
4652         case ixgbe_fc_full:
4653                 /* Flow control (both Rx and Tx) is enabled by SW override. */
4654                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4655                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4656                 break;
4657         default:
4658                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4659                 ret_val = IXGBE_ERR_CONFIG;
4660                 goto out;
4661         }
4662
4663         /* Set 802.3x based flow control settings. */
4664         mflcn_reg |= IXGBE_MFLCN_DPF;
4665         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4666         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4667
4668         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4669         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4670                 hw->fc.high_water[tc_num]) {
4671                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4672                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4673                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4674         } else {
4675                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4676                 /*
4677                  * In order to prevent Tx hangs when the internal Tx
4678                  * switch is enabled we must set the high water mark
4679                  * to the maximum FCRTH value.  This allows the Tx
4680                  * switch to function even under heavy Rx workloads.
4681                  */
4682                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4683         }
4684         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4685
4686         /* Configure pause time (2 TCs per register) */
4687         reg = hw->fc.pause_time * 0x00010001;
4688         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4689                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4690
4691         /* Configure flow control refresh threshold value */
4692         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4693
4694 out:
4695         return ret_val;
4696 }
4697
4698 static int
4699 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4700 {
4701         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4702         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4703
4704         if (hw->mac.type != ixgbe_mac_82598EB) {
4705                 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4706         }
4707         return ret_val;
4708 }
4709
4710 static int
4711 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4712 {
4713         int err;
4714         uint32_t rx_buf_size;
4715         uint32_t max_high_water;
4716         uint8_t tc_num;
4717         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4718         struct ixgbe_hw *hw =
4719                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4720         struct ixgbe_dcb_config *dcb_config =
4721                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4722
4723         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4724                 ixgbe_fc_none,
4725                 ixgbe_fc_rx_pause,
4726                 ixgbe_fc_tx_pause,
4727                 ixgbe_fc_full
4728         };
4729
4730         PMD_INIT_FUNC_TRACE();
4731
4732         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4733         tc_num = map[pfc_conf->priority];
4734         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4735         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4736         /*
4737          * At least reserve one Ethernet frame for watermark
4738          * high_water/low_water in kilo bytes for ixgbe
4739          */
4740         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4741         if ((pfc_conf->fc.high_water > max_high_water) ||
4742             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4743                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4744                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4745                 return -EINVAL;
4746         }
4747
4748         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4749         hw->fc.pause_time = pfc_conf->fc.pause_time;
4750         hw->fc.send_xon = pfc_conf->fc.send_xon;
4751         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4752         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4753
4754         err = ixgbe_dcb_pfc_enable(dev, tc_num);
4755
4756         /* Not negotiated is not an error case */
4757         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4758                 return 0;
4759
4760         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4761         return -EIO;
4762 }
4763
4764 static int
4765 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4766                           struct rte_eth_rss_reta_entry64 *reta_conf,
4767                           uint16_t reta_size)
4768 {
4769         uint16_t i, sp_reta_size;
4770         uint8_t j, mask;
4771         uint32_t reta, r;
4772         uint16_t idx, shift;
4773         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4774         uint32_t reta_reg;
4775
4776         PMD_INIT_FUNC_TRACE();
4777
4778         if (!ixgbe_rss_update_sp(hw->mac.type)) {
4779                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4780                         "NIC.");
4781                 return -ENOTSUP;
4782         }
4783
4784         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4785         if (reta_size != sp_reta_size) {
4786                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4787                         "(%d) doesn't match the number hardware can supported "
4788                         "(%d)", reta_size, sp_reta_size);
4789                 return -EINVAL;
4790         }
4791
4792         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4793                 idx = i / RTE_RETA_GROUP_SIZE;
4794                 shift = i % RTE_RETA_GROUP_SIZE;
4795                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4796                                                 IXGBE_4_BIT_MASK);
4797                 if (!mask)
4798                         continue;
4799                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4800                 if (mask == IXGBE_4_BIT_MASK)
4801                         r = 0;
4802                 else
4803                         r = IXGBE_READ_REG(hw, reta_reg);
4804                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4805                         if (mask & (0x1 << j))
4806                                 reta |= reta_conf[idx].reta[shift + j] <<
4807                                                         (CHAR_BIT * j);
4808                         else
4809                                 reta |= r & (IXGBE_8_BIT_MASK <<
4810                                                 (CHAR_BIT * j));
4811                 }
4812                 IXGBE_WRITE_REG(hw, reta_reg, reta);
4813         }
4814
4815         return 0;
4816 }
4817
4818 static int
4819 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4820                          struct rte_eth_rss_reta_entry64 *reta_conf,
4821                          uint16_t reta_size)
4822 {
4823         uint16_t i, sp_reta_size;
4824         uint8_t j, mask;
4825         uint32_t reta;
4826         uint16_t idx, shift;
4827         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4828         uint32_t reta_reg;
4829
4830         PMD_INIT_FUNC_TRACE();
4831         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4832         if (reta_size != sp_reta_size) {
4833                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4834                         "(%d) doesn't match the number hardware can supported "
4835                         "(%d)", reta_size, sp_reta_size);
4836                 return -EINVAL;
4837         }
4838
4839         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4840                 idx = i / RTE_RETA_GROUP_SIZE;
4841                 shift = i % RTE_RETA_GROUP_SIZE;
4842                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4843                                                 IXGBE_4_BIT_MASK);
4844                 if (!mask)
4845                         continue;
4846
4847                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4848                 reta = IXGBE_READ_REG(hw, reta_reg);
4849                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4850                         if (mask & (0x1 << j))
4851                                 reta_conf[idx].reta[shift + j] =
4852                                         ((reta >> (CHAR_BIT * j)) &
4853                                                 IXGBE_8_BIT_MASK);
4854                 }
4855         }
4856
4857         return 0;
4858 }
4859
4860 static int
4861 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4862                                 uint32_t index, uint32_t pool)
4863 {
4864         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4865         uint32_t enable_addr = 1;
4866
4867         return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
4868                              pool, enable_addr);
4869 }
4870
4871 static void
4872 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
4873 {
4874         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4875
4876         ixgbe_clear_rar(hw, index);
4877 }
4878
4879 static void
4880 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4881 {
4882         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4883
4884         ixgbe_remove_rar(dev, 0);
4885
4886         ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
4887 }
4888
4889 static bool
4890 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
4891 {
4892         if (strcmp(dev->device->driver->name, drv->driver.name))
4893                 return false;
4894
4895         return true;
4896 }
4897
4898 bool
4899 is_ixgbe_supported(struct rte_eth_dev *dev)
4900 {
4901         return is_device_supported(dev, &rte_ixgbe_pmd);
4902 }
4903
4904 static int
4905 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4906 {
4907         uint32_t hlreg0;
4908         uint32_t maxfrs;
4909         struct ixgbe_hw *hw;
4910         struct rte_eth_dev_info dev_info;
4911         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4912         struct rte_eth_dev_data *dev_data = dev->data;
4913
4914         ixgbe_dev_info_get(dev, &dev_info);
4915
4916         /* check that mtu is within the allowed range */
4917         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
4918                 return -EINVAL;
4919
4920         /* If device is started, refuse mtu that requires the support of
4921          * scattered packets when this feature has not been enabled before.
4922          */
4923         if (dev_data->dev_started && !dev_data->scattered_rx &&
4924             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
4925              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
4926                 PMD_INIT_LOG(ERR, "Stop port first.");
4927                 return -EINVAL;
4928         }
4929
4930         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4931         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4932
4933         /* switch to jumbo mode if needed */
4934         if (frame_size > ETHER_MAX_LEN) {
4935                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
4936                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4937         } else {
4938                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
4939                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4940         }
4941         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4942
4943         /* update max frame size */
4944         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4945
4946         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4947         maxfrs &= 0x0000FFFF;
4948         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
4949         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4950
4951         return 0;
4952 }
4953
4954 /*
4955  * Virtual Function operations
4956  */
4957 static void
4958 ixgbevf_intr_disable(struct ixgbe_hw *hw)
4959 {
4960         PMD_INIT_FUNC_TRACE();
4961
4962         /* Clear interrupt mask to stop from interrupts being generated */
4963         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
4964
4965         IXGBE_WRITE_FLUSH(hw);
4966 }
4967
4968 static void
4969 ixgbevf_intr_enable(struct ixgbe_hw *hw)
4970 {
4971         PMD_INIT_FUNC_TRACE();
4972
4973         /* VF enable interrupt autoclean */
4974         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
4975         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
4976         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
4977
4978         IXGBE_WRITE_FLUSH(hw);
4979 }
4980
4981 static int
4982 ixgbevf_dev_configure(struct rte_eth_dev *dev)
4983 {
4984         struct rte_eth_conf *conf = &dev->data->dev_conf;
4985         struct ixgbe_adapter *adapter =
4986                         (struct ixgbe_adapter *)dev->data->dev_private;
4987
4988         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
4989                      dev->data->port_id);
4990
4991         /*
4992          * VF has no ability to enable/disable HW CRC
4993          * Keep the persistent behavior the same as Host PF
4994          */
4995 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
4996         if (!conf->rxmode.hw_strip_crc) {
4997                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
4998                 conf->rxmode.hw_strip_crc = 1;
4999         }
5000 #else
5001         if (conf->rxmode.hw_strip_crc) {
5002                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
5003                 conf->rxmode.hw_strip_crc = 0;
5004         }
5005 #endif
5006
5007         /*
5008          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
5009          * allocation or vector Rx preconditions we will reset it.
5010          */
5011         adapter->rx_bulk_alloc_allowed = true;
5012         adapter->rx_vec_allowed = true;
5013
5014         return 0;
5015 }
5016
5017 static int
5018 ixgbevf_dev_start(struct rte_eth_dev *dev)
5019 {
5020         struct ixgbe_hw *hw =
5021                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5022         uint32_t intr_vector = 0;
5023         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5024         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5025
5026         int err, mask = 0;
5027
5028         PMD_INIT_FUNC_TRACE();
5029
5030         err = hw->mac.ops.reset_hw(hw);
5031         if (err) {
5032                 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
5033                 return err;
5034         }
5035         hw->mac.get_link_status = true;
5036
5037         /* negotiate mailbox API version to use with the PF. */
5038         ixgbevf_negotiate_api(hw);
5039
5040         ixgbevf_dev_tx_init(dev);
5041
5042         /* This can fail when allocating mbufs for descriptor rings */
5043         err = ixgbevf_dev_rx_init(dev);
5044         if (err) {
5045                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
5046                 ixgbe_dev_clear_queues(dev);
5047                 return err;
5048         }
5049
5050         /* Set vfta */
5051         ixgbevf_set_vfta_all(dev, 1);
5052
5053         /* Set HW strip */
5054         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
5055                 ETH_VLAN_EXTEND_MASK;
5056         err = ixgbevf_vlan_offload_set(dev, mask);
5057         if (err) {
5058                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
5059                 ixgbe_dev_clear_queues(dev);
5060                 return err;
5061         }
5062
5063         ixgbevf_dev_rxtx_start(dev);
5064
5065         /* check and configure queue intr-vector mapping */
5066         if (rte_intr_cap_multiple(intr_handle) &&
5067             dev->data->dev_conf.intr_conf.rxq) {
5068                 /* According to datasheet, only vector 0/1/2 can be used,
5069                  * now only one vector is used for Rx queue
5070                  */
5071                 intr_vector = 1;
5072                 if (rte_intr_efd_enable(intr_handle, intr_vector))
5073                         return -1;
5074         }
5075
5076         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
5077                 intr_handle->intr_vec =
5078                         rte_zmalloc("intr_vec",
5079                                     dev->data->nb_rx_queues * sizeof(int), 0);
5080                 if (intr_handle->intr_vec == NULL) {
5081                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
5082                                      " intr_vec", dev->data->nb_rx_queues);
5083                         return -ENOMEM;
5084                 }
5085         }
5086         ixgbevf_configure_msix(dev);
5087
5088         /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
5089          * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
5090          * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
5091          * is not cleared, it will fail when following rte_intr_enable( ) tries
5092          * to map Rx queue interrupt to other VFIO vectors.
5093          * So clear uio/vfio intr/evevnfd first to avoid failure.
5094          */
5095         rte_intr_disable(intr_handle);
5096
5097         rte_intr_enable(intr_handle);
5098
5099         /* Re-enable interrupt for VF */
5100         ixgbevf_intr_enable(hw);
5101
5102         return 0;
5103 }
5104
5105 static void
5106 ixgbevf_dev_stop(struct rte_eth_dev *dev)
5107 {
5108         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5109         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5110         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5111
5112         PMD_INIT_FUNC_TRACE();
5113
5114         ixgbevf_intr_disable(hw);
5115
5116         hw->adapter_stopped = 1;
5117         ixgbe_stop_adapter(hw);
5118
5119         /*
5120           * Clear what we set, but we still keep shadow_vfta to
5121           * restore after device starts
5122           */
5123         ixgbevf_set_vfta_all(dev, 0);
5124
5125         /* Clear stored conf */
5126         dev->data->scattered_rx = 0;
5127
5128         ixgbe_dev_clear_queues(dev);
5129
5130         /* Clean datapath event and queue/vec mapping */
5131         rte_intr_efd_disable(intr_handle);
5132         if (intr_handle->intr_vec != NULL) {
5133                 rte_free(intr_handle->intr_vec);
5134                 intr_handle->intr_vec = NULL;
5135         }
5136 }
5137
5138 static void
5139 ixgbevf_dev_close(struct rte_eth_dev *dev)
5140 {
5141         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5142
5143         PMD_INIT_FUNC_TRACE();
5144
5145         ixgbe_reset_hw(hw);
5146
5147         ixgbevf_dev_stop(dev);
5148
5149         ixgbe_dev_free_queues(dev);
5150
5151         /**
5152          * Remove the VF MAC address ro ensure
5153          * that the VF traffic goes to the PF
5154          * after stop, close and detach of the VF
5155          **/
5156         ixgbevf_remove_mac_addr(dev, 0);
5157 }
5158
5159 /*
5160  * Reset VF device
5161  */
5162 static int
5163 ixgbevf_dev_reset(struct rte_eth_dev *dev)
5164 {
5165         int ret;
5166
5167         ret = eth_ixgbevf_dev_uninit(dev);
5168         if (ret)
5169                 return ret;
5170
5171         ret = eth_ixgbevf_dev_init(dev);
5172
5173         return ret;
5174 }
5175
5176 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5177 {
5178         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5179         struct ixgbe_vfta *shadow_vfta =
5180                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5181         int i = 0, j = 0, vfta = 0, mask = 1;
5182
5183         for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5184                 vfta = shadow_vfta->vfta[i];
5185                 if (vfta) {
5186                         mask = 1;
5187                         for (j = 0; j < 32; j++) {
5188                                 if (vfta & mask)
5189                                         ixgbe_set_vfta(hw, (i<<5)+j, 0,
5190                                                        on, false);
5191                                 mask <<= 1;
5192                         }
5193                 }
5194         }
5195
5196 }
5197
5198 static int
5199 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5200 {
5201         struct ixgbe_hw *hw =
5202                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5203         struct ixgbe_vfta *shadow_vfta =
5204                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5205         uint32_t vid_idx = 0;
5206         uint32_t vid_bit = 0;
5207         int ret = 0;
5208
5209         PMD_INIT_FUNC_TRACE();
5210
5211         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5212         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5213         if (ret) {
5214                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5215                 return ret;
5216         }
5217         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5218         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5219
5220         /* Save what we set and retore it after device reset */
5221         if (on)
5222                 shadow_vfta->vfta[vid_idx] |= vid_bit;
5223         else
5224                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5225
5226         return 0;
5227 }
5228
5229 static void
5230 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5231 {
5232         struct ixgbe_hw *hw =
5233                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5234         uint32_t ctrl;
5235
5236         PMD_INIT_FUNC_TRACE();
5237
5238         if (queue >= hw->mac.max_rx_queues)
5239                 return;
5240
5241         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5242         if (on)
5243                 ctrl |= IXGBE_RXDCTL_VME;
5244         else
5245                 ctrl &= ~IXGBE_RXDCTL_VME;
5246         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5247
5248         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5249 }
5250
5251 static int
5252 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5253 {
5254         struct ixgbe_hw *hw =
5255                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5256         uint16_t i;
5257         int on = 0;
5258
5259         /* VF function only support hw strip feature, others are not support */
5260         if (mask & ETH_VLAN_STRIP_MASK) {
5261                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
5262
5263                 for (i = 0; i < hw->mac.max_rx_queues; i++)
5264                         ixgbevf_vlan_strip_queue_set(dev, i, on);
5265         }
5266
5267         return 0;
5268 }
5269
5270 int
5271 ixgbe_vt_check(struct ixgbe_hw *hw)
5272 {
5273         uint32_t reg_val;
5274
5275         /* if Virtualization Technology is enabled */
5276         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5277         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5278                 PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5279                 return -1;
5280         }
5281
5282         return 0;
5283 }
5284
5285 static uint32_t
5286 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
5287 {
5288         uint32_t vector = 0;
5289
5290         switch (hw->mac.mc_filter_type) {
5291         case 0:   /* use bits [47:36] of the address */
5292                 vector = ((uc_addr->addr_bytes[4] >> 4) |
5293                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
5294                 break;
5295         case 1:   /* use bits [46:35] of the address */
5296                 vector = ((uc_addr->addr_bytes[4] >> 3) |
5297                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
5298                 break;
5299         case 2:   /* use bits [45:34] of the address */
5300                 vector = ((uc_addr->addr_bytes[4] >> 2) |
5301                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
5302                 break;
5303         case 3:   /* use bits [43:32] of the address */
5304                 vector = ((uc_addr->addr_bytes[4]) |
5305                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
5306                 break;
5307         default:  /* Invalid mc_filter_type */
5308                 break;
5309         }
5310
5311         /* vector can only be 12-bits or boundary will be exceeded */
5312         vector &= 0xFFF;
5313         return vector;
5314 }
5315
5316 static int
5317 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5318                         uint8_t on)
5319 {
5320         uint32_t vector;
5321         uint32_t uta_idx;
5322         uint32_t reg_val;
5323         uint32_t uta_shift;
5324         uint32_t rc;
5325         const uint32_t ixgbe_uta_idx_mask = 0x7F;
5326         const uint32_t ixgbe_uta_bit_shift = 5;
5327         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5328         const uint32_t bit1 = 0x1;
5329
5330         struct ixgbe_hw *hw =
5331                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5332         struct ixgbe_uta_info *uta_info =
5333                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5334
5335         /* The UTA table only exists on 82599 hardware and newer */
5336         if (hw->mac.type < ixgbe_mac_82599EB)
5337                 return -ENOTSUP;
5338
5339         vector = ixgbe_uta_vector(hw, mac_addr);
5340         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5341         uta_shift = vector & ixgbe_uta_bit_mask;
5342
5343         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5344         if (rc == on)
5345                 return 0;
5346
5347         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5348         if (on) {
5349                 uta_info->uta_in_use++;
5350                 reg_val |= (bit1 << uta_shift);
5351                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5352         } else {
5353                 uta_info->uta_in_use--;
5354                 reg_val &= ~(bit1 << uta_shift);
5355                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5356         }
5357
5358         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5359
5360         if (uta_info->uta_in_use > 0)
5361                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5362                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5363         else
5364                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5365
5366         return 0;
5367 }
5368
5369 static int
5370 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5371 {
5372         int i;
5373         struct ixgbe_hw *hw =
5374                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5375         struct ixgbe_uta_info *uta_info =
5376                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5377
5378         /* The UTA table only exists on 82599 hardware and newer */
5379         if (hw->mac.type < ixgbe_mac_82599EB)
5380                 return -ENOTSUP;
5381
5382         if (on) {
5383                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5384                         uta_info->uta_shadow[i] = ~0;
5385                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5386                 }
5387         } else {
5388                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5389                         uta_info->uta_shadow[i] = 0;
5390                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5391                 }
5392         }
5393         return 0;
5394
5395 }
5396
5397 uint32_t
5398 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5399 {
5400         uint32_t new_val = orig_val;
5401
5402         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5403                 new_val |= IXGBE_VMOLR_AUPE;
5404         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5405                 new_val |= IXGBE_VMOLR_ROMPE;
5406         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5407                 new_val |= IXGBE_VMOLR_ROPE;
5408         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5409                 new_val |= IXGBE_VMOLR_BAM;
5410         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5411                 new_val |= IXGBE_VMOLR_MPE;
5412
5413         return new_val;
5414 }
5415
5416 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5417 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5418 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5419 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5420 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5421         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5422         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5423
5424 static int
5425 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5426                       struct rte_eth_mirror_conf *mirror_conf,
5427                       uint8_t rule_id, uint8_t on)
5428 {
5429         uint32_t mr_ctl, vlvf;
5430         uint32_t mp_lsb = 0;
5431         uint32_t mv_msb = 0;
5432         uint32_t mv_lsb = 0;
5433         uint32_t mp_msb = 0;
5434         uint8_t i = 0;
5435         int reg_index = 0;
5436         uint64_t vlan_mask = 0;
5437
5438         const uint8_t pool_mask_offset = 32;
5439         const uint8_t vlan_mask_offset = 32;
5440         const uint8_t dst_pool_offset = 8;
5441         const uint8_t rule_mr_offset  = 4;
5442         const uint8_t mirror_rule_mask = 0x0F;
5443
5444         struct ixgbe_mirror_info *mr_info =
5445                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5446         struct ixgbe_hw *hw =
5447                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5448         uint8_t mirror_type = 0;
5449
5450         if (ixgbe_vt_check(hw) < 0)
5451                 return -ENOTSUP;
5452
5453         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5454                 return -EINVAL;
5455
5456         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5457                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5458                             mirror_conf->rule_type);
5459                 return -EINVAL;
5460         }
5461
5462         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5463                 mirror_type |= IXGBE_MRCTL_VLME;
5464                 /* Check if vlan id is valid and find conresponding VLAN ID
5465                  * index in VLVF
5466                  */
5467                 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5468                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5469                                 /* search vlan id related pool vlan filter
5470                                  * index
5471                                  */
5472                                 reg_index = ixgbe_find_vlvf_slot(
5473                                                 hw,
5474                                                 mirror_conf->vlan.vlan_id[i],
5475                                                 false);
5476                                 if (reg_index < 0)
5477                                         return -EINVAL;
5478                                 vlvf = IXGBE_READ_REG(hw,
5479                                                       IXGBE_VLVF(reg_index));
5480                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
5481                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5482                                       mirror_conf->vlan.vlan_id[i]))
5483                                         vlan_mask |= (1ULL << reg_index);
5484                                 else
5485                                         return -EINVAL;
5486                         }
5487                 }
5488
5489                 if (on) {
5490                         mv_lsb = vlan_mask & 0xFFFFFFFF;
5491                         mv_msb = vlan_mask >> vlan_mask_offset;
5492
5493                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
5494                                                 mirror_conf->vlan.vlan_mask;
5495                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5496                                 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5497                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5498                                                 mirror_conf->vlan.vlan_id[i];
5499                         }
5500                 } else {
5501                         mv_lsb = 0;
5502                         mv_msb = 0;
5503                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5504                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5505                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5506                 }
5507         }
5508
5509         /**
5510          * if enable pool mirror, write related pool mask register,if disable
5511          * pool mirror, clear PFMRVM register
5512          */
5513         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5514                 mirror_type |= IXGBE_MRCTL_VPME;
5515                 if (on) {
5516                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5517                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5518                         mr_info->mr_conf[rule_id].pool_mask =
5519                                         mirror_conf->pool_mask;
5520
5521                 } else {
5522                         mp_lsb = 0;
5523                         mp_msb = 0;
5524                         mr_info->mr_conf[rule_id].pool_mask = 0;
5525                 }
5526         }
5527         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5528                 mirror_type |= IXGBE_MRCTL_UPME;
5529         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5530                 mirror_type |= IXGBE_MRCTL_DPME;
5531
5532         /* read  mirror control register and recalculate it */
5533         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5534
5535         if (on) {
5536                 mr_ctl |= mirror_type;
5537                 mr_ctl &= mirror_rule_mask;
5538                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5539         } else {
5540                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5541         }
5542
5543         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5544         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5545
5546         /* write mirrror control  register */
5547         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5548
5549         /* write pool mirrror control  register */
5550         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5551                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5552                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5553                                 mp_msb);
5554         }
5555         /* write VLAN mirrror control  register */
5556         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5557                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5558                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5559                                 mv_msb);
5560         }
5561
5562         return 0;
5563 }
5564
5565 static int
5566 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5567 {
5568         int mr_ctl = 0;
5569         uint32_t lsb_val = 0;
5570         uint32_t msb_val = 0;
5571         const uint8_t rule_mr_offset = 4;
5572
5573         struct ixgbe_hw *hw =
5574                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5575         struct ixgbe_mirror_info *mr_info =
5576                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5577
5578         if (ixgbe_vt_check(hw) < 0)
5579                 return -ENOTSUP;
5580
5581         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5582                 return -EINVAL;
5583
5584         memset(&mr_info->mr_conf[rule_id], 0,
5585                sizeof(struct rte_eth_mirror_conf));
5586
5587         /* clear PFVMCTL register */
5588         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5589
5590         /* clear pool mask register */
5591         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5592         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5593
5594         /* clear vlan mask register */
5595         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5596         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5597
5598         return 0;
5599 }
5600
5601 static int
5602 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5603 {
5604         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5605         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5606         uint32_t mask;
5607         struct ixgbe_hw *hw =
5608                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5609         uint32_t vec = IXGBE_MISC_VEC_ID;
5610
5611         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5612         if (rte_intr_allow_others(intr_handle))
5613                 vec = IXGBE_RX_VEC_START;
5614         mask |= (1 << vec);
5615         RTE_SET_USED(queue_id);
5616         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5617
5618         rte_intr_enable(intr_handle);
5619
5620         return 0;
5621 }
5622
5623 static int
5624 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5625 {
5626         uint32_t mask;
5627         struct ixgbe_hw *hw =
5628                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5629         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5630         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5631         uint32_t vec = IXGBE_MISC_VEC_ID;
5632
5633         mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5634         if (rte_intr_allow_others(intr_handle))
5635                 vec = IXGBE_RX_VEC_START;
5636         mask &= ~(1 << vec);
5637         RTE_SET_USED(queue_id);
5638         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5639
5640         return 0;
5641 }
5642
5643 static int
5644 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5645 {
5646         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5647         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5648         uint32_t mask;
5649         struct ixgbe_hw *hw =
5650                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5651         struct ixgbe_interrupt *intr =
5652                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5653
5654         if (queue_id < 16) {
5655                 ixgbe_disable_intr(hw);
5656                 intr->mask |= (1 << queue_id);
5657                 ixgbe_enable_intr(dev);
5658         } else if (queue_id < 32) {
5659                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5660                 mask &= (1 << queue_id);
5661                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5662         } else if (queue_id < 64) {
5663                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5664                 mask &= (1 << (queue_id - 32));
5665                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5666         }
5667         rte_intr_enable(intr_handle);
5668
5669         return 0;
5670 }
5671
5672 static int
5673 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5674 {
5675         uint32_t mask;
5676         struct ixgbe_hw *hw =
5677                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5678         struct ixgbe_interrupt *intr =
5679                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5680
5681         if (queue_id < 16) {
5682                 ixgbe_disable_intr(hw);
5683                 intr->mask &= ~(1 << queue_id);
5684                 ixgbe_enable_intr(dev);
5685         } else if (queue_id < 32) {
5686                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5687                 mask &= ~(1 << queue_id);
5688                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5689         } else if (queue_id < 64) {
5690                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5691                 mask &= ~(1 << (queue_id - 32));
5692                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5693         }
5694
5695         return 0;
5696 }
5697
5698 static void
5699 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5700                      uint8_t queue, uint8_t msix_vector)
5701 {
5702         uint32_t tmp, idx;
5703
5704         if (direction == -1) {
5705                 /* other causes */
5706                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5707                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5708                 tmp &= ~0xFF;
5709                 tmp |= msix_vector;
5710                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5711         } else {
5712                 /* rx or tx cause */
5713                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5714                 idx = ((16 * (queue & 1)) + (8 * direction));
5715                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5716                 tmp &= ~(0xFF << idx);
5717                 tmp |= (msix_vector << idx);
5718                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5719         }
5720 }
5721
5722 /**
5723  * set the IVAR registers, mapping interrupt causes to vectors
5724  * @param hw
5725  *  pointer to ixgbe_hw struct
5726  * @direction
5727  *  0 for Rx, 1 for Tx, -1 for other causes
5728  * @queue
5729  *  queue to map the corresponding interrupt to
5730  * @msix_vector
5731  *  the vector to map to the corresponding queue
5732  */
5733 static void
5734 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5735                    uint8_t queue, uint8_t msix_vector)
5736 {
5737         uint32_t tmp, idx;
5738
5739         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5740         if (hw->mac.type == ixgbe_mac_82598EB) {
5741                 if (direction == -1)
5742                         direction = 0;
5743                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
5744                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
5745                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
5746                 tmp |= (msix_vector << (8 * (queue & 0x3)));
5747                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
5748         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
5749                         (hw->mac.type == ixgbe_mac_X540) ||
5750                         (hw->mac.type == ixgbe_mac_X550)) {
5751                 if (direction == -1) {
5752                         /* other causes */
5753                         idx = ((queue & 1) * 8);
5754                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5755                         tmp &= ~(0xFF << idx);
5756                         tmp |= (msix_vector << idx);
5757                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
5758                 } else {
5759                         /* rx or tx causes */
5760                         idx = ((16 * (queue & 1)) + (8 * direction));
5761                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
5762                         tmp &= ~(0xFF << idx);
5763                         tmp |= (msix_vector << idx);
5764                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
5765                 }
5766         }
5767 }
5768
5769 static void
5770 ixgbevf_configure_msix(struct rte_eth_dev *dev)
5771 {
5772         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5773         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5774         struct ixgbe_hw *hw =
5775                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5776         uint32_t q_idx;
5777         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
5778         uint32_t base = IXGBE_MISC_VEC_ID;
5779
5780         /* Configure VF other cause ivar */
5781         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
5782
5783         /* won't configure msix register if no mapping is done
5784          * between intr vector and event fd.
5785          */
5786         if (!rte_intr_dp_is_en(intr_handle))
5787                 return;
5788
5789         if (rte_intr_allow_others(intr_handle)) {
5790                 base = IXGBE_RX_VEC_START;
5791                 vector_idx = IXGBE_RX_VEC_START;
5792         }
5793
5794         /* Configure all RX queues of VF */
5795         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
5796                 /* Force all queue use vector 0,
5797                  * as IXGBE_VF_MAXMSIVECOTR = 1
5798                  */
5799                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
5800                 intr_handle->intr_vec[q_idx] = vector_idx;
5801                 if (vector_idx < base + intr_handle->nb_efd - 1)
5802                         vector_idx++;
5803         }
5804 }
5805
5806 /**
5807  * Sets up the hardware to properly generate MSI-X interrupts
5808  * @hw
5809  *  board private structure
5810  */
5811 static void
5812 ixgbe_configure_msix(struct rte_eth_dev *dev)
5813 {
5814         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5815         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5816         struct ixgbe_hw *hw =
5817                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5818         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
5819         uint32_t vec = IXGBE_MISC_VEC_ID;
5820         uint32_t mask;
5821         uint32_t gpie;
5822
5823         /* won't configure msix register if no mapping is done
5824          * between intr vector and event fd
5825          */
5826         if (!rte_intr_dp_is_en(intr_handle))
5827                 return;
5828
5829         if (rte_intr_allow_others(intr_handle))
5830                 vec = base = IXGBE_RX_VEC_START;
5831
5832         /* setup GPIE for MSI-x mode */
5833         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5834         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5835                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
5836         /* auto clearing and auto setting corresponding bits in EIMS
5837          * when MSI-X interrupt is triggered
5838          */
5839         if (hw->mac.type == ixgbe_mac_82598EB) {
5840                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5841         } else {
5842                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5843                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5844         }
5845         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5846
5847         /* Populate the IVAR table and set the ITR values to the
5848          * corresponding register.
5849          */
5850         for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
5851              queue_id++) {
5852                 /* by default, 1:1 mapping */
5853                 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
5854                 intr_handle->intr_vec[queue_id] = vec;
5855                 if (vec < base + intr_handle->nb_efd - 1)
5856                         vec++;
5857         }
5858
5859         switch (hw->mac.type) {
5860         case ixgbe_mac_82598EB:
5861                 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
5862                                    IXGBE_MISC_VEC_ID);
5863                 break;
5864         case ixgbe_mac_82599EB:
5865         case ixgbe_mac_X540:
5866         case ixgbe_mac_X550:
5867                 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
5868                 break;
5869         default:
5870                 break;
5871         }
5872         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
5873                         IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
5874
5875         /* set up to autoclear timer, and the vectors */
5876         mask = IXGBE_EIMS_ENABLE_MASK;
5877         mask &= ~(IXGBE_EIMS_OTHER |
5878                   IXGBE_EIMS_MAILBOX |
5879                   IXGBE_EIMS_LSC);
5880
5881         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5882 }
5883
5884 int
5885 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
5886                            uint16_t queue_idx, uint16_t tx_rate)
5887 {
5888         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5889         uint32_t rf_dec, rf_int;
5890         uint32_t bcnrc_val;
5891         uint16_t link_speed = dev->data->dev_link.link_speed;
5892
5893         if (queue_idx >= hw->mac.max_tx_queues)
5894                 return -EINVAL;
5895
5896         if (tx_rate != 0) {
5897                 /* Calculate the rate factor values to set */
5898                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
5899                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
5900                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
5901
5902                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
5903                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
5904                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
5905                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
5906         } else {
5907                 bcnrc_val = 0;
5908         }
5909
5910         /*
5911          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
5912          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
5913          * set as 0x4.
5914          */
5915         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
5916                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
5917                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
5918                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5919                         IXGBE_MMW_SIZE_JUMBO_FRAME);
5920         else
5921                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5922                         IXGBE_MMW_SIZE_DEFAULT);
5923
5924         /* Set RTTBCNRC of queue X */
5925         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
5926         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
5927         IXGBE_WRITE_FLUSH(hw);
5928
5929         return 0;
5930 }
5931
5932 static int
5933 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5934                      __attribute__((unused)) uint32_t index,
5935                      __attribute__((unused)) uint32_t pool)
5936 {
5937         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5938         int diag;
5939
5940         /*
5941          * On a 82599 VF, adding again the same MAC addr is not an idempotent
5942          * operation. Trap this case to avoid exhausting the [very limited]
5943          * set of PF resources used to store VF MAC addresses.
5944          */
5945         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5946                 return -1;
5947         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5948         if (diag != 0)
5949                 PMD_DRV_LOG(ERR, "Unable to add MAC address "
5950                             "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
5951                             mac_addr->addr_bytes[0],
5952                             mac_addr->addr_bytes[1],
5953                             mac_addr->addr_bytes[2],
5954                             mac_addr->addr_bytes[3],
5955                             mac_addr->addr_bytes[4],
5956                             mac_addr->addr_bytes[5],
5957                             diag);
5958         return diag;
5959 }
5960
5961 static void
5962 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
5963 {
5964         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5965         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
5966         struct ether_addr *mac_addr;
5967         uint32_t i;
5968         int diag;
5969
5970         /*
5971          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
5972          * not support the deletion of a given MAC address.
5973          * Instead, it imposes to delete all MAC addresses, then to add again
5974          * all MAC addresses with the exception of the one to be deleted.
5975          */
5976         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
5977
5978         /*
5979          * Add again all MAC addresses, with the exception of the deleted one
5980          * and of the permanent MAC address.
5981          */
5982         for (i = 0, mac_addr = dev->data->mac_addrs;
5983              i < hw->mac.num_rar_entries; i++, mac_addr++) {
5984                 /* Skip the deleted MAC address */
5985                 if (i == index)
5986                         continue;
5987                 /* Skip NULL MAC addresses */
5988                 if (is_zero_ether_addr(mac_addr))
5989                         continue;
5990                 /* Skip the permanent MAC address */
5991                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5992                         continue;
5993                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5994                 if (diag != 0)
5995                         PMD_DRV_LOG(ERR,
5996                                     "Adding again MAC address "
5997                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
5998                                     "diag=%d",
5999                                     mac_addr->addr_bytes[0],
6000                                     mac_addr->addr_bytes[1],
6001                                     mac_addr->addr_bytes[2],
6002                                     mac_addr->addr_bytes[3],
6003                                     mac_addr->addr_bytes[4],
6004                                     mac_addr->addr_bytes[5],
6005                                     diag);
6006         }
6007 }
6008
6009 static void
6010 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
6011 {
6012         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6013
6014         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
6015 }
6016
6017 int
6018 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
6019                         struct rte_eth_syn_filter *filter,
6020                         bool add)
6021 {
6022         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6023         struct ixgbe_filter_info *filter_info =
6024                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6025         uint32_t syn_info;
6026         uint32_t synqf;
6027
6028         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6029                 return -EINVAL;
6030
6031         syn_info = filter_info->syn_info;
6032
6033         if (add) {
6034                 if (syn_info & IXGBE_SYN_FILTER_ENABLE)
6035                         return -EINVAL;
6036                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
6037                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
6038
6039                 if (filter->hig_pri)
6040                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
6041                 else
6042                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
6043         } else {
6044                 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6045                 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
6046                         return -ENOENT;
6047                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
6048         }
6049
6050         filter_info->syn_info = synqf;
6051         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
6052         IXGBE_WRITE_FLUSH(hw);
6053         return 0;
6054 }
6055
6056 static int
6057 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
6058                         struct rte_eth_syn_filter *filter)
6059 {
6060         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6061         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6062
6063         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
6064                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
6065                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
6066                 return 0;
6067         }
6068         return -ENOENT;
6069 }
6070
6071 static int
6072 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
6073                         enum rte_filter_op filter_op,
6074                         void *arg)
6075 {
6076         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6077         int ret;
6078
6079         MAC_TYPE_FILTER_SUP(hw->mac.type);
6080
6081         if (filter_op == RTE_ETH_FILTER_NOP)
6082                 return 0;
6083
6084         if (arg == NULL) {
6085                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
6086                             filter_op);
6087                 return -EINVAL;
6088         }
6089
6090         switch (filter_op) {
6091         case RTE_ETH_FILTER_ADD:
6092                 ret = ixgbe_syn_filter_set(dev,
6093                                 (struct rte_eth_syn_filter *)arg,
6094                                 TRUE);
6095                 break;
6096         case RTE_ETH_FILTER_DELETE:
6097                 ret = ixgbe_syn_filter_set(dev,
6098                                 (struct rte_eth_syn_filter *)arg,
6099                                 FALSE);
6100                 break;
6101         case RTE_ETH_FILTER_GET:
6102                 ret = ixgbe_syn_filter_get(dev,
6103                                 (struct rte_eth_syn_filter *)arg);
6104                 break;
6105         default:
6106                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
6107                 ret = -EINVAL;
6108                 break;
6109         }
6110
6111         return ret;
6112 }
6113
6114
6115 static inline enum ixgbe_5tuple_protocol
6116 convert_protocol_type(uint8_t protocol_value)
6117 {
6118         if (protocol_value == IPPROTO_TCP)
6119                 return IXGBE_FILTER_PROTOCOL_TCP;
6120         else if (protocol_value == IPPROTO_UDP)
6121                 return IXGBE_FILTER_PROTOCOL_UDP;
6122         else if (protocol_value == IPPROTO_SCTP)
6123                 return IXGBE_FILTER_PROTOCOL_SCTP;
6124         else
6125                 return IXGBE_FILTER_PROTOCOL_NONE;
6126 }
6127
6128 /* inject a 5-tuple filter to HW */
6129 static inline void
6130 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
6131                            struct ixgbe_5tuple_filter *filter)
6132 {
6133         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6134         int i;
6135         uint32_t ftqf, sdpqf;
6136         uint32_t l34timir = 0;
6137         uint8_t mask = 0xff;
6138
6139         i = filter->index;
6140
6141         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
6142                                 IXGBE_SDPQF_DSTPORT_SHIFT);
6143         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
6144
6145         ftqf = (uint32_t)(filter->filter_info.proto &
6146                 IXGBE_FTQF_PROTOCOL_MASK);
6147         ftqf |= (uint32_t)((filter->filter_info.priority &
6148                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
6149         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
6150                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
6151         if (filter->filter_info.dst_ip_mask == 0)
6152                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
6153         if (filter->filter_info.src_port_mask == 0)
6154                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
6155         if (filter->filter_info.dst_port_mask == 0)
6156                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
6157         if (filter->filter_info.proto_mask == 0)
6158                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
6159         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
6160         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6161         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6162
6163         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6164         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6165         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6166         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6167
6168         l34timir |= IXGBE_L34T_IMIR_RESERVE;
6169         l34timir |= (uint32_t)(filter->queue <<
6170                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
6171         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6172 }
6173
6174 /*
6175  * add a 5tuple filter
6176  *
6177  * @param
6178  * dev: Pointer to struct rte_eth_dev.
6179  * index: the index the filter allocates.
6180  * filter: ponter to the filter that will be added.
6181  * rx_queue: the queue id the filter assigned to.
6182  *
6183  * @return
6184  *    - On success, zero.
6185  *    - On failure, a negative value.
6186  */
6187 static int
6188 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6189                         struct ixgbe_5tuple_filter *filter)
6190 {
6191         struct ixgbe_filter_info *filter_info =
6192                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6193         int i, idx, shift;
6194
6195         /*
6196          * look for an unused 5tuple filter index,
6197          * and insert the filter to list.
6198          */
6199         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6200                 idx = i / (sizeof(uint32_t) * NBBY);
6201                 shift = i % (sizeof(uint32_t) * NBBY);
6202                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6203                         filter_info->fivetuple_mask[idx] |= 1 << shift;
6204                         filter->index = i;
6205                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6206                                           filter,
6207                                           entries);
6208                         break;
6209                 }
6210         }
6211         if (i >= IXGBE_MAX_FTQF_FILTERS) {
6212                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
6213                 return -ENOSYS;
6214         }
6215
6216         ixgbe_inject_5tuple_filter(dev, filter);
6217
6218         return 0;
6219 }
6220
6221 /*
6222  * remove a 5tuple filter
6223  *
6224  * @param
6225  * dev: Pointer to struct rte_eth_dev.
6226  * filter: the pointer of the filter will be removed.
6227  */
6228 static void
6229 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6230                         struct ixgbe_5tuple_filter *filter)
6231 {
6232         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6233         struct ixgbe_filter_info *filter_info =
6234                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6235         uint16_t index = filter->index;
6236
6237         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6238                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
6239         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6240         rte_free(filter);
6241
6242         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6243         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6244         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6245         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6246         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6247 }
6248
6249 static int
6250 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6251 {
6252         struct ixgbe_hw *hw;
6253         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
6254         struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
6255
6256         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6257
6258         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
6259                 return -EINVAL;
6260
6261         /* refuse mtu that requires the support of scattered packets when this
6262          * feature has not been enabled before.
6263          */
6264         if (!rx_conf->enable_scatter &&
6265             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6266              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
6267                 return -EINVAL;
6268
6269         /*
6270          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6271          * request of the version 2.0 of the mailbox API.
6272          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6273          * of the mailbox API.
6274          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6275          * prior to 3.11.33 which contains the following change:
6276          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6277          */
6278         ixgbevf_rlpml_set_vf(hw, max_frame);
6279
6280         /* update max frame size */
6281         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6282         return 0;
6283 }
6284
6285 static inline struct ixgbe_5tuple_filter *
6286 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6287                         struct ixgbe_5tuple_filter_info *key)
6288 {
6289         struct ixgbe_5tuple_filter *it;
6290
6291         TAILQ_FOREACH(it, filter_list, entries) {
6292                 if (memcmp(key, &it->filter_info,
6293                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6294                         return it;
6295                 }
6296         }
6297         return NULL;
6298 }
6299
6300 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6301 static inline int
6302 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6303                         struct ixgbe_5tuple_filter_info *filter_info)
6304 {
6305         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6306                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6307                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
6308                 return -EINVAL;
6309
6310         switch (filter->dst_ip_mask) {
6311         case UINT32_MAX:
6312                 filter_info->dst_ip_mask = 0;
6313                 filter_info->dst_ip = filter->dst_ip;
6314                 break;
6315         case 0:
6316                 filter_info->dst_ip_mask = 1;
6317                 break;
6318         default:
6319                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6320                 return -EINVAL;
6321         }
6322
6323         switch (filter->src_ip_mask) {
6324         case UINT32_MAX:
6325                 filter_info->src_ip_mask = 0;
6326                 filter_info->src_ip = filter->src_ip;
6327                 break;
6328         case 0:
6329                 filter_info->src_ip_mask = 1;
6330                 break;
6331         default:
6332                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6333                 return -EINVAL;
6334         }
6335
6336         switch (filter->dst_port_mask) {
6337         case UINT16_MAX:
6338                 filter_info->dst_port_mask = 0;
6339                 filter_info->dst_port = filter->dst_port;
6340                 break;
6341         case 0:
6342                 filter_info->dst_port_mask = 1;
6343                 break;
6344         default:
6345                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6346                 return -EINVAL;
6347         }
6348
6349         switch (filter->src_port_mask) {
6350         case UINT16_MAX:
6351                 filter_info->src_port_mask = 0;
6352                 filter_info->src_port = filter->src_port;
6353                 break;
6354         case 0:
6355                 filter_info->src_port_mask = 1;
6356                 break;
6357         default:
6358                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6359                 return -EINVAL;
6360         }
6361
6362         switch (filter->proto_mask) {
6363         case UINT8_MAX:
6364                 filter_info->proto_mask = 0;
6365                 filter_info->proto =
6366                         convert_protocol_type(filter->proto);
6367                 break;
6368         case 0:
6369                 filter_info->proto_mask = 1;
6370                 break;
6371         default:
6372                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6373                 return -EINVAL;
6374         }
6375
6376         filter_info->priority = (uint8_t)filter->priority;
6377         return 0;
6378 }
6379
6380 /*
6381  * add or delete a ntuple filter
6382  *
6383  * @param
6384  * dev: Pointer to struct rte_eth_dev.
6385  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6386  * add: if true, add filter, if false, remove filter
6387  *
6388  * @return
6389  *    - On success, zero.
6390  *    - On failure, a negative value.
6391  */
6392 int
6393 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6394                         struct rte_eth_ntuple_filter *ntuple_filter,
6395                         bool add)
6396 {
6397         struct ixgbe_filter_info *filter_info =
6398                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6399         struct ixgbe_5tuple_filter_info filter_5tuple;
6400         struct ixgbe_5tuple_filter *filter;
6401         int ret;
6402
6403         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6404                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6405                 return -EINVAL;
6406         }
6407
6408         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6409         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6410         if (ret < 0)
6411                 return ret;
6412
6413         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6414                                          &filter_5tuple);
6415         if (filter != NULL && add) {
6416                 PMD_DRV_LOG(ERR, "filter exists.");
6417                 return -EEXIST;
6418         }
6419         if (filter == NULL && !add) {
6420                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6421                 return -ENOENT;
6422         }
6423
6424         if (add) {
6425                 filter = rte_zmalloc("ixgbe_5tuple_filter",
6426                                 sizeof(struct ixgbe_5tuple_filter), 0);
6427                 if (filter == NULL)
6428                         return -ENOMEM;
6429                 rte_memcpy(&filter->filter_info,
6430                                  &filter_5tuple,
6431                                  sizeof(struct ixgbe_5tuple_filter_info));
6432                 filter->queue = ntuple_filter->queue;
6433                 ret = ixgbe_add_5tuple_filter(dev, filter);
6434                 if (ret < 0) {
6435                         rte_free(filter);
6436                         return ret;
6437                 }
6438         } else
6439                 ixgbe_remove_5tuple_filter(dev, filter);
6440
6441         return 0;
6442 }
6443
6444 /*
6445  * get a ntuple filter
6446  *
6447  * @param
6448  * dev: Pointer to struct rte_eth_dev.
6449  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6450  *
6451  * @return
6452  *    - On success, zero.
6453  *    - On failure, a negative value.
6454  */
6455 static int
6456 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6457                         struct rte_eth_ntuple_filter *ntuple_filter)
6458 {
6459         struct ixgbe_filter_info *filter_info =
6460                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6461         struct ixgbe_5tuple_filter_info filter_5tuple;
6462         struct ixgbe_5tuple_filter *filter;
6463         int ret;
6464
6465         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6466                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6467                 return -EINVAL;
6468         }
6469
6470         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6471         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6472         if (ret < 0)
6473                 return ret;
6474
6475         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6476                                          &filter_5tuple);
6477         if (filter == NULL) {
6478                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6479                 return -ENOENT;
6480         }
6481         ntuple_filter->queue = filter->queue;
6482         return 0;
6483 }
6484
6485 /*
6486  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6487  * @dev: pointer to rte_eth_dev structure
6488  * @filter_op:operation will be taken.
6489  * @arg: a pointer to specific structure corresponding to the filter_op
6490  *
6491  * @return
6492  *    - On success, zero.
6493  *    - On failure, a negative value.
6494  */
6495 static int
6496 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6497                                 enum rte_filter_op filter_op,
6498                                 void *arg)
6499 {
6500         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6501         int ret;
6502
6503         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6504
6505         if (filter_op == RTE_ETH_FILTER_NOP)
6506                 return 0;
6507
6508         if (arg == NULL) {
6509                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6510                             filter_op);
6511                 return -EINVAL;
6512         }
6513
6514         switch (filter_op) {
6515         case RTE_ETH_FILTER_ADD:
6516                 ret = ixgbe_add_del_ntuple_filter(dev,
6517                         (struct rte_eth_ntuple_filter *)arg,
6518                         TRUE);
6519                 break;
6520         case RTE_ETH_FILTER_DELETE:
6521                 ret = ixgbe_add_del_ntuple_filter(dev,
6522                         (struct rte_eth_ntuple_filter *)arg,
6523                         FALSE);
6524                 break;
6525         case RTE_ETH_FILTER_GET:
6526                 ret = ixgbe_get_ntuple_filter(dev,
6527                         (struct rte_eth_ntuple_filter *)arg);
6528                 break;
6529         default:
6530                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6531                 ret = -EINVAL;
6532                 break;
6533         }
6534         return ret;
6535 }
6536
6537 int
6538 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6539                         struct rte_eth_ethertype_filter *filter,
6540                         bool add)
6541 {
6542         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6543         struct ixgbe_filter_info *filter_info =
6544                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6545         uint32_t etqf = 0;
6546         uint32_t etqs = 0;
6547         int ret;
6548         struct ixgbe_ethertype_filter ethertype_filter;
6549
6550         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6551                 return -EINVAL;
6552
6553         if (filter->ether_type == ETHER_TYPE_IPv4 ||
6554                 filter->ether_type == ETHER_TYPE_IPv6) {
6555                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6556                         " ethertype filter.", filter->ether_type);
6557                 return -EINVAL;
6558         }
6559
6560         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6561                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6562                 return -EINVAL;
6563         }
6564         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6565                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6566                 return -EINVAL;
6567         }
6568
6569         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6570         if (ret >= 0 && add) {
6571                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6572                             filter->ether_type);
6573                 return -EEXIST;
6574         }
6575         if (ret < 0 && !add) {
6576                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6577                             filter->ether_type);
6578                 return -ENOENT;
6579         }
6580
6581         if (add) {
6582                 etqf = IXGBE_ETQF_FILTER_EN;
6583                 etqf |= (uint32_t)filter->ether_type;
6584                 etqs |= (uint32_t)((filter->queue <<
6585                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
6586                                     IXGBE_ETQS_RX_QUEUE);
6587                 etqs |= IXGBE_ETQS_QUEUE_EN;
6588
6589                 ethertype_filter.ethertype = filter->ether_type;
6590                 ethertype_filter.etqf = etqf;
6591                 ethertype_filter.etqs = etqs;
6592                 ethertype_filter.conf = FALSE;
6593                 ret = ixgbe_ethertype_filter_insert(filter_info,
6594                                                     &ethertype_filter);
6595                 if (ret < 0) {
6596                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
6597                         return -ENOSPC;
6598                 }
6599         } else {
6600                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6601                 if (ret < 0)
6602                         return -ENOSYS;
6603         }
6604         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6605         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6606         IXGBE_WRITE_FLUSH(hw);
6607
6608         return 0;
6609 }
6610
6611 static int
6612 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6613                         struct rte_eth_ethertype_filter *filter)
6614 {
6615         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6616         struct ixgbe_filter_info *filter_info =
6617                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6618         uint32_t etqf, etqs;
6619         int ret;
6620
6621         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6622         if (ret < 0) {
6623                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6624                             filter->ether_type);
6625                 return -ENOENT;
6626         }
6627
6628         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6629         if (etqf & IXGBE_ETQF_FILTER_EN) {
6630                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6631                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6632                 filter->flags = 0;
6633                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6634                                IXGBE_ETQS_RX_QUEUE_SHIFT;
6635                 return 0;
6636         }
6637         return -ENOENT;
6638 }
6639
6640 /*
6641  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6642  * @dev: pointer to rte_eth_dev structure
6643  * @filter_op:operation will be taken.
6644  * @arg: a pointer to specific structure corresponding to the filter_op
6645  */
6646 static int
6647 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6648                                 enum rte_filter_op filter_op,
6649                                 void *arg)
6650 {
6651         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6652         int ret;
6653
6654         MAC_TYPE_FILTER_SUP(hw->mac.type);
6655
6656         if (filter_op == RTE_ETH_FILTER_NOP)
6657                 return 0;
6658
6659         if (arg == NULL) {
6660                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6661                             filter_op);
6662                 return -EINVAL;
6663         }
6664
6665         switch (filter_op) {
6666         case RTE_ETH_FILTER_ADD:
6667                 ret = ixgbe_add_del_ethertype_filter(dev,
6668                         (struct rte_eth_ethertype_filter *)arg,
6669                         TRUE);
6670                 break;
6671         case RTE_ETH_FILTER_DELETE:
6672                 ret = ixgbe_add_del_ethertype_filter(dev,
6673                         (struct rte_eth_ethertype_filter *)arg,
6674                         FALSE);
6675                 break;
6676         case RTE_ETH_FILTER_GET:
6677                 ret = ixgbe_get_ethertype_filter(dev,
6678                         (struct rte_eth_ethertype_filter *)arg);
6679                 break;
6680         default:
6681                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6682                 ret = -EINVAL;
6683                 break;
6684         }
6685         return ret;
6686 }
6687
6688 static int
6689 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6690                      enum rte_filter_type filter_type,
6691                      enum rte_filter_op filter_op,
6692                      void *arg)
6693 {
6694         int ret = 0;
6695
6696         switch (filter_type) {
6697         case RTE_ETH_FILTER_NTUPLE:
6698                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6699                 break;
6700         case RTE_ETH_FILTER_ETHERTYPE:
6701                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6702                 break;
6703         case RTE_ETH_FILTER_SYN:
6704                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6705                 break;
6706         case RTE_ETH_FILTER_FDIR:
6707                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6708                 break;
6709         case RTE_ETH_FILTER_L2_TUNNEL:
6710                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6711                 break;
6712         case RTE_ETH_FILTER_GENERIC:
6713                 if (filter_op != RTE_ETH_FILTER_GET)
6714                         return -EINVAL;
6715                 *(const void **)arg = &ixgbe_flow_ops;
6716                 break;
6717         default:
6718                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6719                                                         filter_type);
6720                 ret = -EINVAL;
6721                 break;
6722         }
6723
6724         return ret;
6725 }
6726
6727 static u8 *
6728 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
6729                         u8 **mc_addr_ptr, u32 *vmdq)
6730 {
6731         u8 *mc_addr;
6732
6733         *vmdq = 0;
6734         mc_addr = *mc_addr_ptr;
6735         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
6736         return mc_addr;
6737 }
6738
6739 static int
6740 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
6741                           struct ether_addr *mc_addr_set,
6742                           uint32_t nb_mc_addr)
6743 {
6744         struct ixgbe_hw *hw;
6745         u8 *mc_addr_list;
6746
6747         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6748         mc_addr_list = (u8 *)mc_addr_set;
6749         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6750                                          ixgbe_dev_addr_list_itr, TRUE);
6751 }
6752
6753 static uint64_t
6754 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6755 {
6756         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6757         uint64_t systime_cycles;
6758
6759         switch (hw->mac.type) {
6760         case ixgbe_mac_X550:
6761         case ixgbe_mac_X550EM_x:
6762         case ixgbe_mac_X550EM_a:
6763                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6764                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6765                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6766                                 * NSEC_PER_SEC;
6767                 break;
6768         default:
6769                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6770                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6771                                 << 32;
6772         }
6773
6774         return systime_cycles;
6775 }
6776
6777 static uint64_t
6778 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6779 {
6780         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6781         uint64_t rx_tstamp_cycles;
6782
6783         switch (hw->mac.type) {
6784         case ixgbe_mac_X550:
6785         case ixgbe_mac_X550EM_x:
6786         case ixgbe_mac_X550EM_a:
6787                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6788                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6789                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6790                                 * NSEC_PER_SEC;
6791                 break;
6792         default:
6793                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6794                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6795                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6796                                 << 32;
6797         }
6798
6799         return rx_tstamp_cycles;
6800 }
6801
6802 static uint64_t
6803 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6804 {
6805         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6806         uint64_t tx_tstamp_cycles;
6807
6808         switch (hw->mac.type) {
6809         case ixgbe_mac_X550:
6810         case ixgbe_mac_X550EM_x:
6811         case ixgbe_mac_X550EM_a:
6812                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6813                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6814                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6815                                 * NSEC_PER_SEC;
6816                 break;
6817         default:
6818                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6819                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6820                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6821                                 << 32;
6822         }
6823
6824         return tx_tstamp_cycles;
6825 }
6826
6827 static void
6828 ixgbe_start_timecounters(struct rte_eth_dev *dev)
6829 {
6830         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6831         struct ixgbe_adapter *adapter =
6832                 (struct ixgbe_adapter *)dev->data->dev_private;
6833         struct rte_eth_link link;
6834         uint32_t incval = 0;
6835         uint32_t shift = 0;
6836
6837         /* Get current link speed. */
6838         memset(&link, 0, sizeof(link));
6839         ixgbe_dev_link_update(dev, 1);
6840         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
6841
6842         switch (link.link_speed) {
6843         case ETH_SPEED_NUM_100M:
6844                 incval = IXGBE_INCVAL_100;
6845                 shift = IXGBE_INCVAL_SHIFT_100;
6846                 break;
6847         case ETH_SPEED_NUM_1G:
6848                 incval = IXGBE_INCVAL_1GB;
6849                 shift = IXGBE_INCVAL_SHIFT_1GB;
6850                 break;
6851         case ETH_SPEED_NUM_10G:
6852         default:
6853                 incval = IXGBE_INCVAL_10GB;
6854                 shift = IXGBE_INCVAL_SHIFT_10GB;
6855                 break;
6856         }
6857
6858         switch (hw->mac.type) {
6859         case ixgbe_mac_X550:
6860         case ixgbe_mac_X550EM_x:
6861         case ixgbe_mac_X550EM_a:
6862                 /* Independent of link speed. */
6863                 incval = 1;
6864                 /* Cycles read will be interpreted as ns. */
6865                 shift = 0;
6866                 /* Fall-through */
6867         case ixgbe_mac_X540:
6868                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
6869                 break;
6870         case ixgbe_mac_82599EB:
6871                 incval >>= IXGBE_INCVAL_SHIFT_82599;
6872                 shift -= IXGBE_INCVAL_SHIFT_82599;
6873                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
6874                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
6875                 break;
6876         default:
6877                 /* Not supported. */
6878                 return;
6879         }
6880
6881         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
6882         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6883         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6884
6885         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6886         adapter->systime_tc.cc_shift = shift;
6887         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
6888
6889         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6890         adapter->rx_tstamp_tc.cc_shift = shift;
6891         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6892
6893         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6894         adapter->tx_tstamp_tc.cc_shift = shift;
6895         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6896 }
6897
6898 static int
6899 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
6900 {
6901         struct ixgbe_adapter *adapter =
6902                         (struct ixgbe_adapter *)dev->data->dev_private;
6903
6904         adapter->systime_tc.nsec += delta;
6905         adapter->rx_tstamp_tc.nsec += delta;
6906         adapter->tx_tstamp_tc.nsec += delta;
6907
6908         return 0;
6909 }
6910
6911 static int
6912 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
6913 {
6914         uint64_t ns;
6915         struct ixgbe_adapter *adapter =
6916                         (struct ixgbe_adapter *)dev->data->dev_private;
6917
6918         ns = rte_timespec_to_ns(ts);
6919         /* Set the timecounters to a new value. */
6920         adapter->systime_tc.nsec = ns;
6921         adapter->rx_tstamp_tc.nsec = ns;
6922         adapter->tx_tstamp_tc.nsec = ns;
6923
6924         return 0;
6925 }
6926
6927 static int
6928 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
6929 {
6930         uint64_t ns, systime_cycles;
6931         struct ixgbe_adapter *adapter =
6932                         (struct ixgbe_adapter *)dev->data->dev_private;
6933
6934         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
6935         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
6936         *ts = rte_ns_to_timespec(ns);
6937
6938         return 0;
6939 }
6940
6941 static int
6942 ixgbe_timesync_enable(struct rte_eth_dev *dev)
6943 {
6944         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6945         uint32_t tsync_ctl;
6946         uint32_t tsauxc;
6947
6948         /* Stop the timesync system time. */
6949         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
6950         /* Reset the timesync system time value. */
6951         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
6952         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
6953
6954         /* Enable system time for platforms where it isn't on by default. */
6955         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
6956         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
6957         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
6958
6959         ixgbe_start_timecounters(dev);
6960
6961         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6962         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
6963                         (ETHER_TYPE_1588 |
6964                          IXGBE_ETQF_FILTER_EN |
6965                          IXGBE_ETQF_1588));
6966
6967         /* Enable timestamping of received PTP packets. */
6968         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6969         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
6970         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6971
6972         /* Enable timestamping of transmitted PTP packets. */
6973         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6974         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
6975         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6976
6977         IXGBE_WRITE_FLUSH(hw);
6978
6979         return 0;
6980 }
6981
6982 static int
6983 ixgbe_timesync_disable(struct rte_eth_dev *dev)
6984 {
6985         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6986         uint32_t tsync_ctl;
6987
6988         /* Disable timestamping of transmitted PTP packets. */
6989         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6990         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
6991         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6992
6993         /* Disable timestamping of received PTP packets. */
6994         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6995         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
6996         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6997
6998         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6999         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
7000
7001         /* Stop incrementating the System Time registers. */
7002         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
7003
7004         return 0;
7005 }
7006
7007 static int
7008 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
7009                                  struct timespec *timestamp,
7010                                  uint32_t flags __rte_unused)
7011 {
7012         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7013         struct ixgbe_adapter *adapter =
7014                 (struct ixgbe_adapter *)dev->data->dev_private;
7015         uint32_t tsync_rxctl;
7016         uint64_t rx_tstamp_cycles;
7017         uint64_t ns;
7018
7019         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7020         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
7021                 return -EINVAL;
7022
7023         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
7024         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
7025         *timestamp = rte_ns_to_timespec(ns);
7026
7027         return  0;
7028 }
7029
7030 static int
7031 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
7032                                  struct timespec *timestamp)
7033 {
7034         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7035         struct ixgbe_adapter *adapter =
7036                 (struct ixgbe_adapter *)dev->data->dev_private;
7037         uint32_t tsync_txctl;
7038         uint64_t tx_tstamp_cycles;
7039         uint64_t ns;
7040
7041         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7042         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
7043                 return -EINVAL;
7044
7045         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
7046         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
7047         *timestamp = rte_ns_to_timespec(ns);
7048
7049         return 0;
7050 }
7051
7052 static int
7053 ixgbe_get_reg_length(struct rte_eth_dev *dev)
7054 {
7055         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7056         int count = 0;
7057         int g_ind = 0;
7058         const struct reg_info *reg_group;
7059         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7060                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7061
7062         while ((reg_group = reg_set[g_ind++]))
7063                 count += ixgbe_regs_group_count(reg_group);
7064
7065         return count;
7066 }
7067
7068 static int
7069 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
7070 {
7071         int count = 0;
7072         int g_ind = 0;
7073         const struct reg_info *reg_group;
7074
7075         while ((reg_group = ixgbevf_regs[g_ind++]))
7076                 count += ixgbe_regs_group_count(reg_group);
7077
7078         return count;
7079 }
7080
7081 static int
7082 ixgbe_get_regs(struct rte_eth_dev *dev,
7083               struct rte_dev_reg_info *regs)
7084 {
7085         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7086         uint32_t *data = regs->data;
7087         int g_ind = 0;
7088         int count = 0;
7089         const struct reg_info *reg_group;
7090         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7091                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7092
7093         if (data == NULL) {
7094                 regs->length = ixgbe_get_reg_length(dev);
7095                 regs->width = sizeof(uint32_t);
7096                 return 0;
7097         }
7098
7099         /* Support only full register dump */
7100         if ((regs->length == 0) ||
7101             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
7102                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7103                         hw->device_id;
7104                 while ((reg_group = reg_set[g_ind++]))
7105                         count += ixgbe_read_regs_group(dev, &data[count],
7106                                 reg_group);
7107                 return 0;
7108         }
7109
7110         return -ENOTSUP;
7111 }
7112
7113 static int
7114 ixgbevf_get_regs(struct rte_eth_dev *dev,
7115                 struct rte_dev_reg_info *regs)
7116 {
7117         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7118         uint32_t *data = regs->data;
7119         int g_ind = 0;
7120         int count = 0;
7121         const struct reg_info *reg_group;
7122
7123         if (data == NULL) {
7124                 regs->length = ixgbevf_get_reg_length(dev);
7125                 regs->width = sizeof(uint32_t);
7126                 return 0;
7127         }
7128
7129         /* Support only full register dump */
7130         if ((regs->length == 0) ||
7131             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
7132                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7133                         hw->device_id;
7134                 while ((reg_group = ixgbevf_regs[g_ind++]))
7135                         count += ixgbe_read_regs_group(dev, &data[count],
7136                                                       reg_group);
7137                 return 0;
7138         }
7139
7140         return -ENOTSUP;
7141 }
7142
7143 static int
7144 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
7145 {
7146         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7147
7148         /* Return unit is byte count */
7149         return hw->eeprom.word_size * 2;
7150 }
7151
7152 static int
7153 ixgbe_get_eeprom(struct rte_eth_dev *dev,
7154                 struct rte_dev_eeprom_info *in_eeprom)
7155 {
7156         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7157         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7158         uint16_t *data = in_eeprom->data;
7159         int first, length;
7160
7161         first = in_eeprom->offset >> 1;
7162         length = in_eeprom->length >> 1;
7163         if ((first > hw->eeprom.word_size) ||
7164             ((first + length) > hw->eeprom.word_size))
7165                 return -EINVAL;
7166
7167         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7168
7169         return eeprom->ops.read_buffer(hw, first, length, data);
7170 }
7171
7172 static int
7173 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7174                 struct rte_dev_eeprom_info *in_eeprom)
7175 {
7176         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7177         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7178         uint16_t *data = in_eeprom->data;
7179         int first, length;
7180
7181         first = in_eeprom->offset >> 1;
7182         length = in_eeprom->length >> 1;
7183         if ((first > hw->eeprom.word_size) ||
7184             ((first + length) > hw->eeprom.word_size))
7185                 return -EINVAL;
7186
7187         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7188
7189         return eeprom->ops.write_buffer(hw,  first, length, data);
7190 }
7191
7192 uint16_t
7193 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7194         switch (mac_type) {
7195         case ixgbe_mac_X550:
7196         case ixgbe_mac_X550EM_x:
7197         case ixgbe_mac_X550EM_a:
7198                 return ETH_RSS_RETA_SIZE_512;
7199         case ixgbe_mac_X550_vf:
7200         case ixgbe_mac_X550EM_x_vf:
7201         case ixgbe_mac_X550EM_a_vf:
7202                 return ETH_RSS_RETA_SIZE_64;
7203         default:
7204                 return ETH_RSS_RETA_SIZE_128;
7205         }
7206 }
7207
7208 uint32_t
7209 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7210         switch (mac_type) {
7211         case ixgbe_mac_X550:
7212         case ixgbe_mac_X550EM_x:
7213         case ixgbe_mac_X550EM_a:
7214                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
7215                         return IXGBE_RETA(reta_idx >> 2);
7216                 else
7217                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7218         case ixgbe_mac_X550_vf:
7219         case ixgbe_mac_X550EM_x_vf:
7220         case ixgbe_mac_X550EM_a_vf:
7221                 return IXGBE_VFRETA(reta_idx >> 2);
7222         default:
7223                 return IXGBE_RETA(reta_idx >> 2);
7224         }
7225 }
7226
7227 uint32_t
7228 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7229         switch (mac_type) {
7230         case ixgbe_mac_X550_vf:
7231         case ixgbe_mac_X550EM_x_vf:
7232         case ixgbe_mac_X550EM_a_vf:
7233                 return IXGBE_VFMRQC;
7234         default:
7235                 return IXGBE_MRQC;
7236         }
7237 }
7238
7239 uint32_t
7240 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7241         switch (mac_type) {
7242         case ixgbe_mac_X550_vf:
7243         case ixgbe_mac_X550EM_x_vf:
7244         case ixgbe_mac_X550EM_a_vf:
7245                 return IXGBE_VFRSSRK(i);
7246         default:
7247                 return IXGBE_RSSRK(i);
7248         }
7249 }
7250
7251 bool
7252 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7253         switch (mac_type) {
7254         case ixgbe_mac_82599_vf:
7255         case ixgbe_mac_X540_vf:
7256                 return 0;
7257         default:
7258                 return 1;
7259         }
7260 }
7261
7262 static int
7263 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7264                         struct rte_eth_dcb_info *dcb_info)
7265 {
7266         struct ixgbe_dcb_config *dcb_config =
7267                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7268         struct ixgbe_dcb_tc_config *tc;
7269         struct rte_eth_dcb_tc_queue_mapping *tc_queue;
7270         uint8_t nb_tcs;
7271         uint8_t i, j;
7272
7273         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7274                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7275         else
7276                 dcb_info->nb_tcs = 1;
7277
7278         tc_queue = &dcb_info->tc_queue;
7279         nb_tcs = dcb_info->nb_tcs;
7280
7281         if (dcb_config->vt_mode) { /* vt is enabled*/
7282                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7283                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7284                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7285                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7286                 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
7287                         for (j = 0; j < nb_tcs; j++) {
7288                                 tc_queue->tc_rxq[0][j].base = j;
7289                                 tc_queue->tc_rxq[0][j].nb_queue = 1;
7290                                 tc_queue->tc_txq[0][j].base = j;
7291                                 tc_queue->tc_txq[0][j].nb_queue = 1;
7292                         }
7293                 } else {
7294                         for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7295                                 for (j = 0; j < nb_tcs; j++) {
7296                                         tc_queue->tc_rxq[i][j].base =
7297                                                 i * nb_tcs + j;
7298                                         tc_queue->tc_rxq[i][j].nb_queue = 1;
7299                                         tc_queue->tc_txq[i][j].base =
7300                                                 i * nb_tcs + j;
7301                                         tc_queue->tc_txq[i][j].nb_queue = 1;
7302                                 }
7303                         }
7304                 }
7305         } else { /* vt is disabled*/
7306                 struct rte_eth_dcb_rx_conf *rx_conf =
7307                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7308                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7309                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7310                 if (dcb_info->nb_tcs == ETH_4_TCS) {
7311                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7312                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7313                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7314                         }
7315                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7316                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
7317                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
7318                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
7319                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7320                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7321                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7322                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7323                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7324                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7325                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7326                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7327                         }
7328                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7329                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
7330                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
7331                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
7332                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
7333                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
7334                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
7335                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
7336                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7337                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7338                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7339                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7340                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7341                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7342                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7343                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7344                 }
7345         }
7346         for (i = 0; i < dcb_info->nb_tcs; i++) {
7347                 tc = &dcb_config->tc_config[i];
7348                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7349         }
7350         return 0;
7351 }
7352
7353 /* Update e-tag ether type */
7354 static int
7355 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7356                             uint16_t ether_type)
7357 {
7358         uint32_t etag_etype;
7359
7360         if (hw->mac.type != ixgbe_mac_X550 &&
7361             hw->mac.type != ixgbe_mac_X550EM_x &&
7362             hw->mac.type != ixgbe_mac_X550EM_a) {
7363                 return -ENOTSUP;
7364         }
7365
7366         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7367         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7368         etag_etype |= ether_type;
7369         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7370         IXGBE_WRITE_FLUSH(hw);
7371
7372         return 0;
7373 }
7374
7375 /* Config l2 tunnel ether type */
7376 static int
7377 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7378                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
7379 {
7380         int ret = 0;
7381         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7382         struct ixgbe_l2_tn_info *l2_tn_info =
7383                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7384
7385         if (l2_tunnel == NULL)
7386                 return -EINVAL;
7387
7388         switch (l2_tunnel->l2_tunnel_type) {
7389         case RTE_L2_TUNNEL_TYPE_E_TAG:
7390                 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7391                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7392                 break;
7393         default:
7394                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7395                 ret = -EINVAL;
7396                 break;
7397         }
7398
7399         return ret;
7400 }
7401
7402 /* Enable e-tag tunnel */
7403 static int
7404 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7405 {
7406         uint32_t etag_etype;
7407
7408         if (hw->mac.type != ixgbe_mac_X550 &&
7409             hw->mac.type != ixgbe_mac_X550EM_x &&
7410             hw->mac.type != ixgbe_mac_X550EM_a) {
7411                 return -ENOTSUP;
7412         }
7413
7414         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7415         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7416         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7417         IXGBE_WRITE_FLUSH(hw);
7418
7419         return 0;
7420 }
7421
7422 /* Enable l2 tunnel */
7423 static int
7424 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7425                            enum rte_eth_tunnel_type l2_tunnel_type)
7426 {
7427         int ret = 0;
7428         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7429         struct ixgbe_l2_tn_info *l2_tn_info =
7430                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7431
7432         switch (l2_tunnel_type) {
7433         case RTE_L2_TUNNEL_TYPE_E_TAG:
7434                 l2_tn_info->e_tag_en = TRUE;
7435                 ret = ixgbe_e_tag_enable(hw);
7436                 break;
7437         default:
7438                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7439                 ret = -EINVAL;
7440                 break;
7441         }
7442
7443         return ret;
7444 }
7445
7446 /* Disable e-tag tunnel */
7447 static int
7448 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7449 {
7450         uint32_t etag_etype;
7451
7452         if (hw->mac.type != ixgbe_mac_X550 &&
7453             hw->mac.type != ixgbe_mac_X550EM_x &&
7454             hw->mac.type != ixgbe_mac_X550EM_a) {
7455                 return -ENOTSUP;
7456         }
7457
7458         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7459         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7460         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7461         IXGBE_WRITE_FLUSH(hw);
7462
7463         return 0;
7464 }
7465
7466 /* Disable l2 tunnel */
7467 static int
7468 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7469                             enum rte_eth_tunnel_type l2_tunnel_type)
7470 {
7471         int ret = 0;
7472         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7473         struct ixgbe_l2_tn_info *l2_tn_info =
7474                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7475
7476         switch (l2_tunnel_type) {
7477         case RTE_L2_TUNNEL_TYPE_E_TAG:
7478                 l2_tn_info->e_tag_en = FALSE;
7479                 ret = ixgbe_e_tag_disable(hw);
7480                 break;
7481         default:
7482                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7483                 ret = -EINVAL;
7484                 break;
7485         }
7486
7487         return ret;
7488 }
7489
7490 static int
7491 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7492                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7493 {
7494         int ret = 0;
7495         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7496         uint32_t i, rar_entries;
7497         uint32_t rar_low, rar_high;
7498
7499         if (hw->mac.type != ixgbe_mac_X550 &&
7500             hw->mac.type != ixgbe_mac_X550EM_x &&
7501             hw->mac.type != ixgbe_mac_X550EM_a) {
7502                 return -ENOTSUP;
7503         }
7504
7505         rar_entries = ixgbe_get_num_rx_addrs(hw);
7506
7507         for (i = 1; i < rar_entries; i++) {
7508                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7509                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7510                 if ((rar_high & IXGBE_RAH_AV) &&
7511                     (rar_high & IXGBE_RAH_ADTYPE) &&
7512                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7513                      l2_tunnel->tunnel_id)) {
7514                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7515                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7516
7517                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7518
7519                         return ret;
7520                 }
7521         }
7522
7523         return ret;
7524 }
7525
7526 static int
7527 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7528                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7529 {
7530         int ret = 0;
7531         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7532         uint32_t i, rar_entries;
7533         uint32_t rar_low, rar_high;
7534
7535         if (hw->mac.type != ixgbe_mac_X550 &&
7536             hw->mac.type != ixgbe_mac_X550EM_x &&
7537             hw->mac.type != ixgbe_mac_X550EM_a) {
7538                 return -ENOTSUP;
7539         }
7540
7541         /* One entry for one tunnel. Try to remove potential existing entry. */
7542         ixgbe_e_tag_filter_del(dev, l2_tunnel);
7543
7544         rar_entries = ixgbe_get_num_rx_addrs(hw);
7545
7546         for (i = 1; i < rar_entries; i++) {
7547                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7548                 if (rar_high & IXGBE_RAH_AV) {
7549                         continue;
7550                 } else {
7551                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7552                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7553                         rar_low = l2_tunnel->tunnel_id;
7554
7555                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7556                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7557
7558                         return ret;
7559                 }
7560         }
7561
7562         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7563                      " Please remove a rule before adding a new one.");
7564         return -EINVAL;
7565 }
7566
7567 static inline struct ixgbe_l2_tn_filter *
7568 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7569                           struct ixgbe_l2_tn_key *key)
7570 {
7571         int ret;
7572
7573         ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
7574         if (ret < 0)
7575                 return NULL;
7576
7577         return l2_tn_info->hash_map[ret];
7578 }
7579
7580 static inline int
7581 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7582                           struct ixgbe_l2_tn_filter *l2_tn_filter)
7583 {
7584         int ret;
7585
7586         ret = rte_hash_add_key(l2_tn_info->hash_handle,
7587                                &l2_tn_filter->key);
7588
7589         if (ret < 0) {
7590                 PMD_DRV_LOG(ERR,
7591                             "Failed to insert L2 tunnel filter"
7592                             " to hash table %d!",
7593                             ret);
7594                 return ret;
7595         }
7596
7597         l2_tn_info->hash_map[ret] = l2_tn_filter;
7598
7599         TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7600
7601         return 0;
7602 }
7603
7604 static inline int
7605 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7606                           struct ixgbe_l2_tn_key *key)
7607 {
7608         int ret;
7609         struct ixgbe_l2_tn_filter *l2_tn_filter;
7610
7611         ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
7612
7613         if (ret < 0) {
7614                 PMD_DRV_LOG(ERR,
7615                             "No such L2 tunnel filter to delete %d!",
7616                             ret);
7617                 return ret;
7618         }
7619
7620         l2_tn_filter = l2_tn_info->hash_map[ret];
7621         l2_tn_info->hash_map[ret] = NULL;
7622
7623         TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7624         rte_free(l2_tn_filter);
7625
7626         return 0;
7627 }
7628
7629 /* Add l2 tunnel filter */
7630 int
7631 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7632                                struct rte_eth_l2_tunnel_conf *l2_tunnel,
7633                                bool restore)
7634 {
7635         int ret;
7636         struct ixgbe_l2_tn_info *l2_tn_info =
7637                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7638         struct ixgbe_l2_tn_key key;
7639         struct ixgbe_l2_tn_filter *node;
7640
7641         if (!restore) {
7642                 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7643                 key.tn_id = l2_tunnel->tunnel_id;
7644
7645                 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
7646
7647                 if (node) {
7648                         PMD_DRV_LOG(ERR,
7649                                     "The L2 tunnel filter already exists!");
7650                         return -EINVAL;
7651                 }
7652
7653                 node = rte_zmalloc("ixgbe_l2_tn",
7654                                    sizeof(struct ixgbe_l2_tn_filter),
7655                                    0);
7656                 if (!node)
7657                         return -ENOMEM;
7658
7659                 rte_memcpy(&node->key,
7660                                  &key,
7661                                  sizeof(struct ixgbe_l2_tn_key));
7662                 node->pool = l2_tunnel->pool;
7663                 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
7664                 if (ret < 0) {
7665                         rte_free(node);
7666                         return ret;
7667                 }
7668         }
7669
7670         switch (l2_tunnel->l2_tunnel_type) {
7671         case RTE_L2_TUNNEL_TYPE_E_TAG:
7672                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7673                 break;
7674         default:
7675                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7676                 ret = -EINVAL;
7677                 break;
7678         }
7679
7680         if ((!restore) && (ret < 0))
7681                 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7682
7683         return ret;
7684 }
7685
7686 /* Delete l2 tunnel filter */
7687 int
7688 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7689                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
7690 {
7691         int ret;
7692         struct ixgbe_l2_tn_info *l2_tn_info =
7693                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7694         struct ixgbe_l2_tn_key key;
7695
7696         key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7697         key.tn_id = l2_tunnel->tunnel_id;
7698         ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7699         if (ret < 0)
7700                 return ret;
7701
7702         switch (l2_tunnel->l2_tunnel_type) {
7703         case RTE_L2_TUNNEL_TYPE_E_TAG:
7704                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7705                 break;
7706         default:
7707                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7708                 ret = -EINVAL;
7709                 break;
7710         }
7711
7712         return ret;
7713 }
7714
7715 /**
7716  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7717  * @dev: pointer to rte_eth_dev structure
7718  * @filter_op:operation will be taken.
7719  * @arg: a pointer to specific structure corresponding to the filter_op
7720  */
7721 static int
7722 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
7723                                   enum rte_filter_op filter_op,
7724                                   void *arg)
7725 {
7726         int ret;
7727
7728         if (filter_op == RTE_ETH_FILTER_NOP)
7729                 return 0;
7730
7731         if (arg == NULL) {
7732                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
7733                             filter_op);
7734                 return -EINVAL;
7735         }
7736
7737         switch (filter_op) {
7738         case RTE_ETH_FILTER_ADD:
7739                 ret = ixgbe_dev_l2_tunnel_filter_add
7740                         (dev,
7741                          (struct rte_eth_l2_tunnel_conf *)arg,
7742                          FALSE);
7743                 break;
7744         case RTE_ETH_FILTER_DELETE:
7745                 ret = ixgbe_dev_l2_tunnel_filter_del
7746                         (dev,
7747                          (struct rte_eth_l2_tunnel_conf *)arg);
7748                 break;
7749         default:
7750                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
7751                 ret = -EINVAL;
7752                 break;
7753         }
7754         return ret;
7755 }
7756
7757 static int
7758 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
7759 {
7760         int ret = 0;
7761         uint32_t ctrl;
7762         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7763
7764         if (hw->mac.type != ixgbe_mac_X550 &&
7765             hw->mac.type != ixgbe_mac_X550EM_x &&
7766             hw->mac.type != ixgbe_mac_X550EM_a) {
7767                 return -ENOTSUP;
7768         }
7769
7770         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7771         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
7772         if (en)
7773                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
7774         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
7775
7776         return ret;
7777 }
7778
7779 /* Enable l2 tunnel forwarding */
7780 static int
7781 ixgbe_dev_l2_tunnel_forwarding_enable
7782         (struct rte_eth_dev *dev,
7783          enum rte_eth_tunnel_type l2_tunnel_type)
7784 {
7785         struct ixgbe_l2_tn_info *l2_tn_info =
7786                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7787         int ret = 0;
7788
7789         switch (l2_tunnel_type) {
7790         case RTE_L2_TUNNEL_TYPE_E_TAG:
7791                 l2_tn_info->e_tag_fwd_en = TRUE;
7792                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
7793                 break;
7794         default:
7795                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7796                 ret = -EINVAL;
7797                 break;
7798         }
7799
7800         return ret;
7801 }
7802
7803 /* Disable l2 tunnel forwarding */
7804 static int
7805 ixgbe_dev_l2_tunnel_forwarding_disable
7806         (struct rte_eth_dev *dev,
7807          enum rte_eth_tunnel_type l2_tunnel_type)
7808 {
7809         struct ixgbe_l2_tn_info *l2_tn_info =
7810                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7811         int ret = 0;
7812
7813         switch (l2_tunnel_type) {
7814         case RTE_L2_TUNNEL_TYPE_E_TAG:
7815                 l2_tn_info->e_tag_fwd_en = FALSE;
7816                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
7817                 break;
7818         default:
7819                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7820                 ret = -EINVAL;
7821                 break;
7822         }
7823
7824         return ret;
7825 }
7826
7827 static int
7828 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
7829                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
7830                              bool en)
7831 {
7832         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
7833         int ret = 0;
7834         uint32_t vmtir, vmvir;
7835         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7836
7837         if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
7838                 PMD_DRV_LOG(ERR,
7839                             "VF id %u should be less than %u",
7840                             l2_tunnel->vf_id,
7841                             pci_dev->max_vfs);
7842                 return -EINVAL;
7843         }
7844
7845         if (hw->mac.type != ixgbe_mac_X550 &&
7846             hw->mac.type != ixgbe_mac_X550EM_x &&
7847             hw->mac.type != ixgbe_mac_X550EM_a) {
7848                 return -ENOTSUP;
7849         }
7850
7851         if (en)
7852                 vmtir = l2_tunnel->tunnel_id;
7853         else
7854                 vmtir = 0;
7855
7856         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
7857
7858         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
7859         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
7860         if (en)
7861                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
7862         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
7863
7864         return ret;
7865 }
7866
7867 /* Enable l2 tunnel tag insertion */
7868 static int
7869 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
7870                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
7871 {
7872         int ret = 0;
7873
7874         switch (l2_tunnel->l2_tunnel_type) {
7875         case RTE_L2_TUNNEL_TYPE_E_TAG:
7876                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
7877                 break;
7878         default:
7879                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7880                 ret = -EINVAL;
7881                 break;
7882         }
7883
7884         return ret;
7885 }
7886
7887 /* Disable l2 tunnel tag insertion */
7888 static int
7889 ixgbe_dev_l2_tunnel_insertion_disable
7890         (struct rte_eth_dev *dev,
7891          struct rte_eth_l2_tunnel_conf *l2_tunnel)
7892 {
7893         int ret = 0;
7894
7895         switch (l2_tunnel->l2_tunnel_type) {
7896         case RTE_L2_TUNNEL_TYPE_E_TAG:
7897                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
7898                 break;
7899         default:
7900                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7901                 ret = -EINVAL;
7902                 break;
7903         }
7904
7905         return ret;
7906 }
7907
7908 static int
7909 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
7910                              bool en)
7911 {
7912         int ret = 0;
7913         uint32_t qde;
7914         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7915
7916         if (hw->mac.type != ixgbe_mac_X550 &&
7917             hw->mac.type != ixgbe_mac_X550EM_x &&
7918             hw->mac.type != ixgbe_mac_X550EM_a) {
7919                 return -ENOTSUP;
7920         }
7921
7922         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
7923         if (en)
7924                 qde |= IXGBE_QDE_STRIP_TAG;
7925         else
7926                 qde &= ~IXGBE_QDE_STRIP_TAG;
7927         qde &= ~IXGBE_QDE_READ;
7928         qde |= IXGBE_QDE_WRITE;
7929         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
7930
7931         return ret;
7932 }
7933
7934 /* Enable l2 tunnel tag stripping */
7935 static int
7936 ixgbe_dev_l2_tunnel_stripping_enable
7937         (struct rte_eth_dev *dev,
7938          enum rte_eth_tunnel_type l2_tunnel_type)
7939 {
7940         int ret = 0;
7941
7942         switch (l2_tunnel_type) {
7943         case RTE_L2_TUNNEL_TYPE_E_TAG:
7944                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
7945                 break;
7946         default:
7947                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7948                 ret = -EINVAL;
7949                 break;
7950         }
7951
7952         return ret;
7953 }
7954
7955 /* Disable l2 tunnel tag stripping */
7956 static int
7957 ixgbe_dev_l2_tunnel_stripping_disable
7958         (struct rte_eth_dev *dev,
7959          enum rte_eth_tunnel_type l2_tunnel_type)
7960 {
7961         int ret = 0;
7962
7963         switch (l2_tunnel_type) {
7964         case RTE_L2_TUNNEL_TYPE_E_TAG:
7965                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
7966                 break;
7967         default:
7968                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7969                 ret = -EINVAL;
7970                 break;
7971         }
7972
7973         return ret;
7974 }
7975
7976 /* Enable/disable l2 tunnel offload functions */
7977 static int
7978 ixgbe_dev_l2_tunnel_offload_set
7979         (struct rte_eth_dev *dev,
7980          struct rte_eth_l2_tunnel_conf *l2_tunnel,
7981          uint32_t mask,
7982          uint8_t en)
7983 {
7984         int ret = 0;
7985
7986         if (l2_tunnel == NULL)
7987                 return -EINVAL;
7988
7989         ret = -EINVAL;
7990         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
7991                 if (en)
7992                         ret = ixgbe_dev_l2_tunnel_enable(
7993                                 dev,
7994                                 l2_tunnel->l2_tunnel_type);
7995                 else
7996                         ret = ixgbe_dev_l2_tunnel_disable(
7997                                 dev,
7998                                 l2_tunnel->l2_tunnel_type);
7999         }
8000
8001         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
8002                 if (en)
8003                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
8004                                 dev,
8005                                 l2_tunnel);
8006                 else
8007                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
8008                                 dev,
8009                                 l2_tunnel);
8010         }
8011
8012         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
8013                 if (en)
8014                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
8015                                 dev,
8016                                 l2_tunnel->l2_tunnel_type);
8017                 else
8018                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
8019                                 dev,
8020                                 l2_tunnel->l2_tunnel_type);
8021         }
8022
8023         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
8024                 if (en)
8025                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
8026                                 dev,
8027                                 l2_tunnel->l2_tunnel_type);
8028                 else
8029                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
8030                                 dev,
8031                                 l2_tunnel->l2_tunnel_type);
8032         }
8033
8034         return ret;
8035 }
8036
8037 static int
8038 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
8039                         uint16_t port)
8040 {
8041         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
8042         IXGBE_WRITE_FLUSH(hw);
8043
8044         return 0;
8045 }
8046
8047 /* There's only one register for VxLAN UDP port.
8048  * So, we cannot add several ports. Will update it.
8049  */
8050 static int
8051 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
8052                      uint16_t port)
8053 {
8054         if (port == 0) {
8055                 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
8056                 return -EINVAL;
8057         }
8058
8059         return ixgbe_update_vxlan_port(hw, port);
8060 }
8061
8062 /* We cannot delete the VxLAN port. For there's a register for VxLAN
8063  * UDP port, it must have a value.
8064  * So, will reset it to the original value 0.
8065  */
8066 static int
8067 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
8068                      uint16_t port)
8069 {
8070         uint16_t cur_port;
8071
8072         cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
8073
8074         if (cur_port != port) {
8075                 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
8076                 return -EINVAL;
8077         }
8078
8079         return ixgbe_update_vxlan_port(hw, 0);
8080 }
8081
8082 /* Add UDP tunneling port */
8083 static int
8084 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8085                               struct rte_eth_udp_tunnel *udp_tunnel)
8086 {
8087         int ret = 0;
8088         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8089
8090         if (hw->mac.type != ixgbe_mac_X550 &&
8091             hw->mac.type != ixgbe_mac_X550EM_x &&
8092             hw->mac.type != ixgbe_mac_X550EM_a) {
8093                 return -ENOTSUP;
8094         }
8095
8096         if (udp_tunnel == NULL)
8097                 return -EINVAL;
8098
8099         switch (udp_tunnel->prot_type) {
8100         case RTE_TUNNEL_TYPE_VXLAN:
8101                 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
8102                 break;
8103
8104         case RTE_TUNNEL_TYPE_GENEVE:
8105         case RTE_TUNNEL_TYPE_TEREDO:
8106                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8107                 ret = -EINVAL;
8108                 break;
8109
8110         default:
8111                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8112                 ret = -EINVAL;
8113                 break;
8114         }
8115
8116         return ret;
8117 }
8118
8119 /* Remove UDP tunneling port */
8120 static int
8121 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8122                               struct rte_eth_udp_tunnel *udp_tunnel)
8123 {
8124         int ret = 0;
8125         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8126
8127         if (hw->mac.type != ixgbe_mac_X550 &&
8128             hw->mac.type != ixgbe_mac_X550EM_x &&
8129             hw->mac.type != ixgbe_mac_X550EM_a) {
8130                 return -ENOTSUP;
8131         }
8132
8133         if (udp_tunnel == NULL)
8134                 return -EINVAL;
8135
8136         switch (udp_tunnel->prot_type) {
8137         case RTE_TUNNEL_TYPE_VXLAN:
8138                 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
8139                 break;
8140         case RTE_TUNNEL_TYPE_GENEVE:
8141         case RTE_TUNNEL_TYPE_TEREDO:
8142                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8143                 ret = -EINVAL;
8144                 break;
8145         default:
8146                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8147                 ret = -EINVAL;
8148                 break;
8149         }
8150
8151         return ret;
8152 }
8153
8154 static void
8155 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
8156 {
8157         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8158
8159         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
8160 }
8161
8162 static void
8163 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
8164 {
8165         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8166
8167         hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
8168 }
8169
8170 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
8171 {
8172         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8173         u32 in_msg = 0;
8174
8175         if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8176                 return;
8177
8178         /* PF reset VF event */
8179         if (in_msg == IXGBE_PF_CONTROL_MSG)
8180                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8181                                               NULL, NULL);
8182 }
8183
8184 static int
8185 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8186 {
8187         uint32_t eicr;
8188         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8189         struct ixgbe_interrupt *intr =
8190                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8191         ixgbevf_intr_disable(hw);
8192
8193         /* read-on-clear nic registers here */
8194         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8195         intr->flags = 0;
8196
8197         /* only one misc vector supported - mailbox */
8198         eicr &= IXGBE_VTEICR_MASK;
8199         if (eicr == IXGBE_MISC_VEC_ID)
8200                 intr->flags |= IXGBE_FLAG_MAILBOX;
8201
8202         return 0;
8203 }
8204
8205 static int
8206 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8207 {
8208         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8209         struct ixgbe_interrupt *intr =
8210                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8211
8212         if (intr->flags & IXGBE_FLAG_MAILBOX) {
8213                 ixgbevf_mbx_process(dev);
8214                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
8215         }
8216
8217         ixgbevf_intr_enable(hw);
8218
8219         return 0;
8220 }
8221
8222 static void
8223 ixgbevf_dev_interrupt_handler(void *param)
8224 {
8225         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8226
8227         ixgbevf_dev_interrupt_get_status(dev);
8228         ixgbevf_dev_interrupt_action(dev);
8229 }
8230
8231 /**
8232  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8233  *  @hw: pointer to hardware structure
8234  *
8235  *  Stops the transmit data path and waits for the HW to internally empty
8236  *  the Tx security block
8237  **/
8238 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8239 {
8240 #define IXGBE_MAX_SECTX_POLL 40
8241
8242         int i;
8243         int sectxreg;
8244
8245         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8246         sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8247         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8248         for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8249                 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8250                 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8251                         break;
8252                 /* Use interrupt-safe sleep just in case */
8253                 usec_delay(1000);
8254         }
8255
8256         /* For informational purposes only */
8257         if (i >= IXGBE_MAX_SECTX_POLL)
8258                 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8259                          "path fully disabled.  Continuing with init.");
8260
8261         return IXGBE_SUCCESS;
8262 }
8263
8264 /**
8265  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8266  *  @hw: pointer to hardware structure
8267  *
8268  *  Enables the transmit data path.
8269  **/
8270 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8271 {
8272         uint32_t sectxreg;
8273
8274         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8275         sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8276         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8277         IXGBE_WRITE_FLUSH(hw);
8278
8279         return IXGBE_SUCCESS;
8280 }
8281
8282 /* restore n-tuple filter */
8283 static inline void
8284 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8285 {
8286         struct ixgbe_filter_info *filter_info =
8287                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8288         struct ixgbe_5tuple_filter *node;
8289
8290         TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8291                 ixgbe_inject_5tuple_filter(dev, node);
8292         }
8293 }
8294
8295 /* restore ethernet type filter */
8296 static inline void
8297 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8298 {
8299         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8300         struct ixgbe_filter_info *filter_info =
8301                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8302         int i;
8303
8304         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8305                 if (filter_info->ethertype_mask & (1 << i)) {
8306                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8307                                         filter_info->ethertype_filters[i].etqf);
8308                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8309                                         filter_info->ethertype_filters[i].etqs);
8310                         IXGBE_WRITE_FLUSH(hw);
8311                 }
8312         }
8313 }
8314
8315 /* restore SYN filter */
8316 static inline void
8317 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8318 {
8319         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8320         struct ixgbe_filter_info *filter_info =
8321                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8322         uint32_t synqf;
8323
8324         synqf = filter_info->syn_info;
8325
8326         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8327                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8328                 IXGBE_WRITE_FLUSH(hw);
8329         }
8330 }
8331
8332 /* restore L2 tunnel filter */
8333 static inline void
8334 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8335 {
8336         struct ixgbe_l2_tn_info *l2_tn_info =
8337                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8338         struct ixgbe_l2_tn_filter *node;
8339         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8340
8341         TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8342                 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8343                 l2_tn_conf.tunnel_id      = node->key.tn_id;
8344                 l2_tn_conf.pool           = node->pool;
8345                 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8346         }
8347 }
8348
8349 static int
8350 ixgbe_filter_restore(struct rte_eth_dev *dev)
8351 {
8352         ixgbe_ntuple_filter_restore(dev);
8353         ixgbe_ethertype_filter_restore(dev);
8354         ixgbe_syn_filter_restore(dev);
8355         ixgbe_fdir_filter_restore(dev);
8356         ixgbe_l2_tn_filter_restore(dev);
8357
8358         return 0;
8359 }
8360
8361 static void
8362 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8363 {
8364         struct ixgbe_l2_tn_info *l2_tn_info =
8365                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8366         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8367
8368         if (l2_tn_info->e_tag_en)
8369                 (void)ixgbe_e_tag_enable(hw);
8370
8371         if (l2_tn_info->e_tag_fwd_en)
8372                 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8373
8374         (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8375 }
8376
8377 /* remove all the n-tuple filters */
8378 void
8379 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8380 {
8381         struct ixgbe_filter_info *filter_info =
8382                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8383         struct ixgbe_5tuple_filter *p_5tuple;
8384
8385         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8386                 ixgbe_remove_5tuple_filter(dev, p_5tuple);
8387 }
8388
8389 /* remove all the ether type filters */
8390 void
8391 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8392 {
8393         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8394         struct ixgbe_filter_info *filter_info =
8395                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8396         int i;
8397
8398         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8399                 if (filter_info->ethertype_mask & (1 << i) &&
8400                     !filter_info->ethertype_filters[i].conf) {
8401                         (void)ixgbe_ethertype_filter_remove(filter_info,
8402                                                             (uint8_t)i);
8403                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8404                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8405                         IXGBE_WRITE_FLUSH(hw);
8406                 }
8407         }
8408 }
8409
8410 /* remove the SYN filter */
8411 void
8412 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8413 {
8414         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8415         struct ixgbe_filter_info *filter_info =
8416                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8417
8418         if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8419                 filter_info->syn_info = 0;
8420
8421                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8422                 IXGBE_WRITE_FLUSH(hw);
8423         }
8424 }
8425
8426 /* remove all the L2 tunnel filters */
8427 int
8428 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8429 {
8430         struct ixgbe_l2_tn_info *l2_tn_info =
8431                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8432         struct ixgbe_l2_tn_filter *l2_tn_filter;
8433         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8434         int ret = 0;
8435
8436         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8437                 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8438                 l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
8439                 l2_tn_conf.pool           = l2_tn_filter->pool;
8440                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8441                 if (ret < 0)
8442                         return ret;
8443         }
8444
8445         return 0;
8446 }
8447
8448 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
8449 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
8450 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
8451 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
8452 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
8453 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");