New upstream version 17.11.1
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_fdir.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdint.h>
36 #include <stdarg.h>
37 #include <errno.h>
38 #include <sys/queue.h>
39
40 #include <rte_interrupts.h>
41 #include <rte_log.h>
42 #include <rte_debug.h>
43 #include <rte_pci.h>
44 #include <rte_ether.h>
45 #include <rte_ethdev.h>
46 #include <rte_malloc.h>
47
48 #include "ixgbe_logs.h"
49 #include "base/ixgbe_api.h"
50 #include "base/ixgbe_common.h"
51 #include "ixgbe_ethdev.h"
52
53 /* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
54 #define FDIRCTRL_PBALLOC_MASK           0x03
55
56 /* For calculating memory required for FDIR filters */
57 #define PBALLOC_SIZE_SHIFT              15
58
59 /* Number of bits used to mask bucket hash for different pballoc sizes */
60 #define PERFECT_BUCKET_64KB_HASH_MASK   0x07FF  /* 11 bits */
61 #define PERFECT_BUCKET_128KB_HASH_MASK  0x0FFF  /* 12 bits */
62 #define PERFECT_BUCKET_256KB_HASH_MASK  0x1FFF  /* 13 bits */
63 #define SIG_BUCKET_64KB_HASH_MASK       0x1FFF  /* 13 bits */
64 #define SIG_BUCKET_128KB_HASH_MASK      0x3FFF  /* 14 bits */
65 #define SIG_BUCKET_256KB_HASH_MASK      0x7FFF  /* 15 bits */
66 #define IXGBE_DEFAULT_FLEXBYTES_OFFSET  12 /* default flexbytes offset in bytes */
67 #define IXGBE_FDIR_MAX_FLEX_LEN         2 /* len in bytes of flexbytes */
68 #define IXGBE_MAX_FLX_SOURCE_OFF        62
69 #define IXGBE_FDIRCTRL_FLEX_MASK        (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
70 #define IXGBE_FDIRCMD_CMD_INTERVAL_US   10
71
72 #define IXGBE_FDIR_FLOW_TYPES ( \
73         (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
74         (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
75         (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
76         (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
77         (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
78         (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
79         (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
80         (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
81
82 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
83         uint8_t ipv6_addr[16]; \
84         uint8_t i; \
85         rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
86         (ipv6m) = 0; \
87         for (i = 0; i < sizeof(ipv6_addr); i++) { \
88                 if (ipv6_addr[i] == UINT8_MAX) \
89                         (ipv6m) |= 1 << i; \
90                 else if (ipv6_addr[i] != 0) { \
91                         PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
92                         return -EINVAL; \
93                 } \
94         } \
95 } while (0)
96
97 #define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
98         uint8_t ipv6_addr[16]; \
99         uint8_t i; \
100         for (i = 0; i < sizeof(ipv6_addr); i++) { \
101                 if ((ipv6m) & (1 << i)) \
102                         ipv6_addr[i] = UINT8_MAX; \
103                 else \
104                         ipv6_addr[i] = 0; \
105         } \
106         rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
107 } while (0)
108
109 #define DEFAULT_VXLAN_PORT 4789
110 #define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
111
112 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
113 static int fdir_set_input_mask(struct rte_eth_dev *dev,
114                                const struct rte_eth_fdir_masks *input_mask);
115 static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
116 static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
117 static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
118                 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
119 static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
120 static int ixgbe_fdir_filter_to_atr_input(
121                 const struct rte_eth_fdir_filter *fdir_filter,
122                 union ixgbe_atr_input *input,
123                 enum rte_fdir_mode mode);
124 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
125                                  uint32_t key);
126 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
127                 enum rte_fdir_pballoc_type pballoc);
128 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
129                 enum rte_fdir_pballoc_type pballoc);
130 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
131                         union ixgbe_atr_input *input, uint8_t queue,
132                         uint32_t fdircmd, uint32_t fdirhash,
133                         enum rte_fdir_mode mode);
134 static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
135                 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
136                 uint32_t fdirhash);
137 static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
138                               const struct rte_eth_fdir_filter *fdir_filter,
139                               bool del,
140                               bool update);
141 static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
142 static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
143                         struct rte_eth_fdir_info *fdir_info);
144 static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
145                         struct rte_eth_fdir_stats *fdir_stats);
146
147 /**
148  * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
149  * It adds extra configuration of fdirctrl that is common for all filter types.
150  *
151  *  Initialize Flow Director control registers
152  *  @hw: pointer to hardware structure
153  *  @fdirctrl: value to write to flow director control register
154  **/
155 static int
156 fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
157 {
158         int i;
159
160         PMD_INIT_FUNC_TRACE();
161
162         /* Prime the keys for hashing */
163         IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
164         IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
165
166         /*
167          * Continue setup of fdirctrl register bits:
168          *  Set the maximum length per hash bucket to 0xA filters
169          *  Send interrupt when 64 filters are left
170          */
171         fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
172                     (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
173
174         /*
175          * Poll init-done after we write the register.  Estimated times:
176          *      10G: PBALLOC = 11b, timing is 60us
177          *       1G: PBALLOC = 11b, timing is 600us
178          *     100M: PBALLOC = 11b, timing is 6ms
179          *
180          *     Multiple these timings by 4 if under full Rx load
181          *
182          * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
183          * 1 msec per poll time.  If we're at line rate and drop to 100M, then
184          * this might not finish in our poll time, but we can live with that
185          * for now.
186          */
187         IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
188         IXGBE_WRITE_FLUSH(hw);
189         for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
190                 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
191                                    IXGBE_FDIRCTRL_INIT_DONE)
192                         break;
193                 msec_delay(1);
194         }
195
196         if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
197                 PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
198                 return -ETIMEDOUT;
199         }
200         return 0;
201 }
202
203 /*
204  * Set appropriate bits in fdirctrl for: variable reporting levels, moving
205  * flexbytes matching field, and drop queue (only for perfect matching mode).
206  */
207 static inline int
208 configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
209 {
210         *fdirctrl = 0;
211
212         switch (conf->pballoc) {
213         case RTE_FDIR_PBALLOC_64K:
214                 /* 8k - 1 signature filters */
215                 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
216                 break;
217         case RTE_FDIR_PBALLOC_128K:
218                 /* 16k - 1 signature filters */
219                 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
220                 break;
221         case RTE_FDIR_PBALLOC_256K:
222                 /* 32k - 1 signature filters */
223                 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
224                 break;
225         default:
226                 /* bad value */
227                 PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
228                 return -EINVAL;
229         };
230
231         /* status flags: write hash & swindex in the rx descriptor */
232         switch (conf->status) {
233         case RTE_FDIR_NO_REPORT_STATUS:
234                 /* do nothing, default mode */
235                 break;
236         case RTE_FDIR_REPORT_STATUS:
237                 /* report status when the packet matches a fdir rule */
238                 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
239                 break;
240         case RTE_FDIR_REPORT_STATUS_ALWAYS:
241                 /* always report status */
242                 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
243                 break;
244         default:
245                 /* bad value */
246                 PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
247                 return -EINVAL;
248         };
249
250         *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
251                      IXGBE_FDIRCTRL_FLEX_SHIFT;
252
253         if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
254             conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
255                 *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
256                 *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
257                 if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
258                         *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
259                                         << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
260                 else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
261                         *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
262                                         << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
263         }
264
265         return 0;
266 }
267
268 /**
269  * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
270  *
271  *  @hi_dword: Bits 31:16 mask to be bit swapped.
272  *  @lo_dword: Bits 15:0  mask to be bit swapped.
273  *
274  *  Flow director uses several registers to store 2 x 16 bit masks with the
275  *  bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
276  *  mask affects the MS bit/byte of the target. This function reverses the
277  *  bits in these masks.
278  *  **/
279 static inline uint32_t
280 reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
281 {
282         uint32_t mask = hi_dword << 16;
283
284         mask |= lo_dword;
285         mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
286         mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
287         mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
288         return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
289 }
290
291 /*
292  * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
293  * but makes use of the rte_fdir_masks structure to see which bits to set.
294  */
295 static int
296 fdir_set_input_mask_82599(struct rte_eth_dev *dev)
297 {
298         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
299         struct ixgbe_hw_fdir_info *info =
300                         IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
301         /*
302          * mask VM pool and DIPv6 since there are currently not supported
303          * mask FLEX byte, it will be set in flex_conf
304          */
305         uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
306         uint32_t fdirtcpm;  /* TCP source and destination port masks. */
307         uint32_t fdiripv6m; /* IPv6 source and destination masks. */
308         volatile uint32_t *reg;
309
310         PMD_INIT_FUNC_TRACE();
311
312         /*
313          * Program the relevant mask registers.  If src/dst_port or src/dst_addr
314          * are zero, then assume a full mask for that field. Also assume that
315          * a VLAN of 0 is unspecified, so mask that out as well.  L4type
316          * cannot be masked out in this implementation.
317          */
318         if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
319                 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
320                 fdirm |= IXGBE_FDIRM_L4P;
321
322         if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
323                 /* mask VLAN Priority */
324                 fdirm |= IXGBE_FDIRM_VLANP;
325         else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
326                 /* mask VLAN ID */
327                 fdirm |= IXGBE_FDIRM_VLANID;
328         else if (info->mask.vlan_tci_mask == 0)
329                 /* mask VLAN ID and Priority */
330                 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
331         else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
332                 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
333                 return -EINVAL;
334         }
335
336         /* flex byte mask */
337         if (info->mask.flex_bytes_mask == 0)
338                 fdirm |= IXGBE_FDIRM_FLEX;
339
340         IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
341
342         /* store the TCP/UDP port masks, bit reversed from port layout */
343         fdirtcpm = reverse_fdir_bitmasks(
344                         rte_be_to_cpu_16(info->mask.dst_port_mask),
345                         rte_be_to_cpu_16(info->mask.src_port_mask));
346
347         /* write all the same so that UDP, TCP and SCTP use the same mask
348          * (little-endian)
349          */
350         IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
351         IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
352         IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
353
354         /* Store source and destination IPv4 masks (big-endian),
355          * can not use IXGBE_WRITE_REG.
356          */
357         reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
358         *reg = ~(info->mask.src_ipv4_mask);
359         reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
360         *reg = ~(info->mask.dst_ipv4_mask);
361
362         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
363                 /*
364                  * Store source and destination IPv6 masks (bit reversed)
365                  */
366                 fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
367                             info->mask.src_ipv6_mask;
368
369                 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
370         }
371
372         return IXGBE_SUCCESS;
373 }
374
375 /*
376  * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
377  * but makes use of the rte_fdir_masks structure to see which bits to set.
378  */
379 static int
380 fdir_set_input_mask_x550(struct rte_eth_dev *dev)
381 {
382         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
383         struct ixgbe_hw_fdir_info *info =
384                         IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
385         /* mask VM pool and DIPv6 since there are currently not supported
386          * mask FLEX byte, it will be set in flex_conf
387          */
388         uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
389                          IXGBE_FDIRM_FLEX;
390         uint32_t fdiripv6m;
391         enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
392         uint16_t mac_mask;
393
394         PMD_INIT_FUNC_TRACE();
395
396         /* set the default UDP port for VxLAN */
397         if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
398                 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
399
400         /* some bits must be set for mac vlan or tunnel mode */
401         fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
402
403         if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
404                 /* mask VLAN Priority */
405                 fdirm |= IXGBE_FDIRM_VLANP;
406         else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
407                 /* mask VLAN ID */
408                 fdirm |= IXGBE_FDIRM_VLANID;
409         else if (info->mask.vlan_tci_mask == 0)
410                 /* mask VLAN ID and Priority */
411                 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
412         else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
413                 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
414                 return -EINVAL;
415         }
416
417         IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
418
419         fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
420         fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
421         if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
422                 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
423                                 IXGBE_FDIRIP6M_TNI_VNI;
424
425         if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
426                 mac_mask = info->mask.mac_addr_byte_mask;
427                 fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
428                                 & IXGBE_FDIRIP6M_INNER_MAC;
429
430                 switch (info->mask.tunnel_type_mask) {
431                 case 0:
432                         /* Mask turnnel type */
433                         fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
434                         break;
435                 case 1:
436                         break;
437                 default:
438                         PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
439                         return -EINVAL;
440                 }
441
442                 switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
443                 case 0x0:
444                         /* Mask vxlan id */
445                         fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
446                         break;
447                 case 0x00FFFFFF:
448                         fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
449                         break;
450                 case 0xFFFFFFFF:
451                         break;
452                 default:
453                         PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
454                         return -EINVAL;
455                 }
456         }
457
458         IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
459         IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
460         IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
461         IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
462         IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
463         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
464
465         return IXGBE_SUCCESS;
466 }
467
468 static int
469 ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
470                                   const struct rte_eth_fdir_masks *input_mask)
471 {
472         struct ixgbe_hw_fdir_info *info =
473                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
474         uint16_t dst_ipv6m = 0;
475         uint16_t src_ipv6m = 0;
476
477         memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
478         info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
479         info->mask.src_port_mask = input_mask->src_port_mask;
480         info->mask.dst_port_mask = input_mask->dst_port_mask;
481         info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
482         info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
483         IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
484         IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
485         info->mask.src_ipv6_mask = src_ipv6m;
486         info->mask.dst_ipv6_mask = dst_ipv6m;
487
488         return IXGBE_SUCCESS;
489 }
490
491 static int
492 ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
493                                  const struct rte_eth_fdir_masks *input_mask)
494 {
495         struct ixgbe_hw_fdir_info *info =
496                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
497
498         memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
499         info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
500         info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
501         info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
502         info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
503
504         return IXGBE_SUCCESS;
505 }
506
507 static int
508 ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
509                             const struct rte_eth_fdir_masks *input_mask)
510 {
511         enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
512
513         if (mode >= RTE_FDIR_MODE_SIGNATURE &&
514             mode <= RTE_FDIR_MODE_PERFECT)
515                 return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
516         else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
517                  mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
518                 return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
519
520         PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
521         return -ENOTSUP;
522 }
523
524 int
525 ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
526 {
527         enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
528
529         if (mode >= RTE_FDIR_MODE_SIGNATURE &&
530             mode <= RTE_FDIR_MODE_PERFECT)
531                 return fdir_set_input_mask_82599(dev);
532         else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
533                  mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
534                 return fdir_set_input_mask_x550(dev);
535
536         PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
537         return -ENOTSUP;
538 }
539
540 int
541 ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
542                                 uint16_t offset)
543 {
544         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
545         uint32_t fdirctrl;
546         int i;
547
548         fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
549
550         fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
551         fdirctrl |= ((offset >> 1) /* convert to word offset */
552                 << IXGBE_FDIRCTRL_FLEX_SHIFT);
553
554         IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
555         IXGBE_WRITE_FLUSH(hw);
556         for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
557                 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
558                         IXGBE_FDIRCTRL_INIT_DONE)
559                         break;
560                 msec_delay(1);
561         }
562         return 0;
563 }
564
565 static int
566 fdir_set_input_mask(struct rte_eth_dev *dev,
567                     const struct rte_eth_fdir_masks *input_mask)
568 {
569         int ret;
570
571         ret = ixgbe_fdir_store_input_mask(dev, input_mask);
572         if (ret)
573                 return ret;
574
575         return ixgbe_fdir_set_input_mask(dev);
576 }
577
578 /*
579  * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
580  * arguments are valid
581  */
582 static int
583 ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
584                 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
585 {
586         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
587         struct ixgbe_hw_fdir_info *info =
588                         IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
589         const struct rte_eth_flex_payload_cfg *flex_cfg;
590         const struct rte_eth_fdir_flex_mask *flex_mask;
591         uint32_t fdirm;
592         uint16_t flexbytes = 0;
593         uint16_t i;
594
595         fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
596
597         if (conf == NULL) {
598                 PMD_DRV_LOG(ERR, "NULL pointer.");
599                 return -EINVAL;
600         }
601
602         for (i = 0; i < conf->nb_payloads; i++) {
603                 flex_cfg = &conf->flex_set[i];
604                 if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
605                         PMD_DRV_LOG(ERR, "unsupported payload type.");
606                         return -EINVAL;
607                 }
608                 if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
609                     (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
610                     (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
611                         *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
612                         *fdirctrl |=
613                                 (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
614                                         IXGBE_FDIRCTRL_FLEX_SHIFT;
615                 } else {
616                         PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
617                         return -EINVAL;
618                 }
619         }
620
621         for (i = 0; i < conf->nb_flexmasks; i++) {
622                 flex_mask = &conf->flex_mask[i];
623                 if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
624                         PMD_DRV_LOG(ERR, "flexmask should be set globally.");
625                         return -EINVAL;
626                 }
627                 flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
628                                         ((flex_mask->mask[1]) & 0xFF));
629                 if (flexbytes == UINT16_MAX)
630                         fdirm &= ~IXGBE_FDIRM_FLEX;
631                 else if (flexbytes != 0) {
632                         /* IXGBE_FDIRM_FLEX is set by default when set mask */
633                         PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
634                         return -EINVAL;
635                 }
636         }
637         IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
638         info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
639         info->flex_bytes_offset = (uint8_t)((*fdirctrl &
640                                             IXGBE_FDIRCTRL_FLEX_MASK) >>
641                                             IXGBE_FDIRCTRL_FLEX_SHIFT);
642         return 0;
643 }
644
645 int
646 ixgbe_fdir_configure(struct rte_eth_dev *dev)
647 {
648         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
649         int err;
650         uint32_t fdirctrl, pbsize;
651         int i;
652         enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
653
654         PMD_INIT_FUNC_TRACE();
655
656         if (hw->mac.type != ixgbe_mac_82599EB &&
657                 hw->mac.type != ixgbe_mac_X540 &&
658                 hw->mac.type != ixgbe_mac_X550 &&
659                 hw->mac.type != ixgbe_mac_X550EM_x &&
660                 hw->mac.type != ixgbe_mac_X550EM_a)
661                 return -ENOSYS;
662
663         /* x550 supports mac-vlan and tunnel mode but other NICs not */
664         if (hw->mac.type != ixgbe_mac_X550 &&
665             hw->mac.type != ixgbe_mac_X550EM_x &&
666             hw->mac.type != ixgbe_mac_X550EM_a &&
667             mode != RTE_FDIR_MODE_SIGNATURE &&
668             mode != RTE_FDIR_MODE_PERFECT)
669                 return -ENOSYS;
670
671         err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
672         if (err)
673                 return err;
674
675         /*
676          * Before enabling Flow Director, the Rx Packet Buffer size
677          * must be reduced.  The new value is the current size minus
678          * flow director memory usage size.
679          */
680         pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
681         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
682             (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
683
684         /*
685          * The defaults in the HW for RX PB 1-7 are not zero and so should be
686          * initialized to zero for non DCB mode otherwise actual total RX PB
687          * would be bigger than programmed and filter space would run into
688          * the PB 0 region.
689          */
690         for (i = 1; i < 8; i++)
691                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
692
693         err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
694         if (err < 0) {
695                 PMD_INIT_LOG(ERR, " Error on setting FD mask");
696                 return err;
697         }
698         err = ixgbe_set_fdir_flex_conf(dev,
699                 &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
700         if (err < 0) {
701                 PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
702                 return err;
703         }
704
705         err = fdir_enable_82599(hw, fdirctrl);
706         if (err < 0) {
707                 PMD_INIT_LOG(ERR, " Error on enabling FD.");
708                 return err;
709         }
710         return 0;
711 }
712
713 /*
714  * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
715  * by the IXGBE driver code.
716  */
717 static int
718 ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
719                 union ixgbe_atr_input *input, enum rte_fdir_mode mode)
720 {
721         input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
722         input->formatted.flex_bytes = (uint16_t)(
723                 (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
724                 (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
725
726         switch (fdir_filter->input.flow_type) {
727         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
728                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
729                 break;
730         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
731                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
732                 break;
733         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
734                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
735                 break;
736         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
737                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
738                 break;
739         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
740                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
741                 break;
742         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
743                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
744                 break;
745         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
746                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
747                 break;
748         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
749                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
750                 break;
751         default:
752                 break;
753         }
754
755         switch (fdir_filter->input.flow_type) {
756         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
757         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
758                 input->formatted.src_port =
759                         fdir_filter->input.flow.udp4_flow.src_port;
760                 input->formatted.dst_port =
761                         fdir_filter->input.flow.udp4_flow.dst_port;
762                 /* fall-through */
763         /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
764         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
765         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
766                 input->formatted.src_ip[0] =
767                         fdir_filter->input.flow.ip4_flow.src_ip;
768                 input->formatted.dst_ip[0] =
769                         fdir_filter->input.flow.ip4_flow.dst_ip;
770                 break;
771
772         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
773         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
774                 input->formatted.src_port =
775                         fdir_filter->input.flow.udp6_flow.src_port;
776                 input->formatted.dst_port =
777                         fdir_filter->input.flow.udp6_flow.dst_port;
778                 /* fall-through */
779         /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
780         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
781         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
782                 rte_memcpy(input->formatted.src_ip,
783                            fdir_filter->input.flow.ipv6_flow.src_ip,
784                            sizeof(input->formatted.src_ip));
785                 rte_memcpy(input->formatted.dst_ip,
786                            fdir_filter->input.flow.ipv6_flow.dst_ip,
787                            sizeof(input->formatted.dst_ip));
788                 break;
789         default:
790                 break;
791         }
792
793         if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
794                 rte_memcpy(
795                         input->formatted.inner_mac,
796                         fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
797                         sizeof(input->formatted.inner_mac));
798         } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
799                 rte_memcpy(
800                         input->formatted.inner_mac,
801                         fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
802                         sizeof(input->formatted.inner_mac));
803                 input->formatted.tunnel_type =
804                         fdir_filter->input.flow.tunnel_flow.tunnel_type;
805                 input->formatted.tni_vni =
806                         fdir_filter->input.flow.tunnel_flow.tunnel_id;
807         }
808
809         return 0;
810 }
811
812 /*
813  * The below function is taken from the FreeBSD IXGBE drivers release
814  * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
815  * before returning, as the signature hash can use 16bits.
816  *
817  * The newer driver has optimised functions for calculating bucket and
818  * signature hashes. However they don't support IPv6 type packets for signature
819  * filters so are not used here.
820  *
821  * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
822  * set.
823  *
824  * Compute the hashes for SW ATR
825  *  @stream: input bitstream to compute the hash on
826  *  @key: 32-bit hash key
827  **/
828 static uint32_t
829 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
830                                  uint32_t key)
831 {
832         /*
833          * The algorithm is as follows:
834          *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
835          *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
836          *    and A[n] x B[n] is bitwise AND between same length strings
837          *
838          *    K[n] is 16 bits, defined as:
839          *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
840          *       for n modulo 32 < 15, K[n] =
841          *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
842          *
843          *    S[n] is 16 bits, defined as:
844          *       for n >= 15, S[n] = S[n:n - 15]
845          *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
846          *
847          *    To simplify for programming, the algorithm is implemented
848          *    in software this way:
849          *
850          *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
851          *
852          *    for (i = 0; i < 352; i+=32)
853          *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
854          *
855          *    lo_hash_dword[15:0]  ^= Stream[15:0];
856          *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
857          *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
858          *
859          *    hi_hash_dword[31:0]  ^= Stream[351:320];
860          *
861          *    if (key[0])
862          *        hash[15:0] ^= Stream[15:0];
863          *
864          *    for (i = 0; i < 16; i++) {
865          *        if (key[i])
866          *            hash[15:0] ^= lo_hash_dword[(i+15):i];
867          *        if (key[i + 16])
868          *            hash[15:0] ^= hi_hash_dword[(i+15):i];
869          *    }
870          *
871          */
872         __be32 common_hash_dword = 0;
873         u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
874         u32 hash_result = 0;
875         u8 i;
876
877         /* record the flow_vm_vlan bits as they are a key part to the hash */
878         flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
879
880         /* generate common hash dword */
881         for (i = 1; i <= 13; i++)
882                 common_hash_dword ^= atr_input->dword_stream[i];
883
884         hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
885
886         /* low dword is word swapped version of common */
887         lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
888
889         /* apply flow ID/VM pool/VLAN ID bits to hash words */
890         hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
891
892         /* Process bits 0 and 16 */
893         if (key & 0x0001)
894                 hash_result ^= lo_hash_dword;
895         if (key & 0x00010000)
896                 hash_result ^= hi_hash_dword;
897
898         /*
899          * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
900          * delay this because bit 0 of the stream should not be processed
901          * so we do not add the vlan until after bit 0 was processed
902          */
903         lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
904
905
906         /* process the remaining 30 bits in the key 2 bits at a time */
907         for (i = 15; i; i--) {
908                 if (key & (0x0001 << i))
909                         hash_result ^= lo_hash_dword >> i;
910                 if (key & (0x00010000 << i))
911                         hash_result ^= hi_hash_dword >> i;
912         }
913
914         return hash_result;
915 }
916
917 static uint32_t
918 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
919                 enum rte_fdir_pballoc_type pballoc)
920 {
921         if (pballoc == RTE_FDIR_PBALLOC_256K)
922                 return ixgbe_atr_compute_hash_82599(input,
923                                 IXGBE_ATR_BUCKET_HASH_KEY) &
924                                 PERFECT_BUCKET_256KB_HASH_MASK;
925         else if (pballoc == RTE_FDIR_PBALLOC_128K)
926                 return ixgbe_atr_compute_hash_82599(input,
927                                 IXGBE_ATR_BUCKET_HASH_KEY) &
928                                 PERFECT_BUCKET_128KB_HASH_MASK;
929         else
930                 return ixgbe_atr_compute_hash_82599(input,
931                                 IXGBE_ATR_BUCKET_HASH_KEY) &
932                                 PERFECT_BUCKET_64KB_HASH_MASK;
933 }
934
935 /**
936  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
937  * @hw: pointer to hardware structure
938  */
939 static inline int
940 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
941 {
942         int i;
943
944         for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
945                 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
946                 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
947                         return 0;
948                 rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
949         }
950
951         return -ETIMEDOUT;
952 }
953
954 /*
955  * Calculate the hash value needed for signature-match filters. In the FreeBSD
956  * driver, this is done by the optimised function
957  * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
958  * doesn't support calculating a hash for an IPv6 filter.
959  */
960 static uint32_t
961 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
962                 enum rte_fdir_pballoc_type pballoc)
963 {
964         uint32_t bucket_hash, sig_hash;
965
966         if (pballoc == RTE_FDIR_PBALLOC_256K)
967                 bucket_hash = ixgbe_atr_compute_hash_82599(input,
968                                 IXGBE_ATR_BUCKET_HASH_KEY) &
969                                 SIG_BUCKET_256KB_HASH_MASK;
970         else if (pballoc == RTE_FDIR_PBALLOC_128K)
971                 bucket_hash = ixgbe_atr_compute_hash_82599(input,
972                                 IXGBE_ATR_BUCKET_HASH_KEY) &
973                                 SIG_BUCKET_128KB_HASH_MASK;
974         else
975                 bucket_hash = ixgbe_atr_compute_hash_82599(input,
976                                 IXGBE_ATR_BUCKET_HASH_KEY) &
977                                 SIG_BUCKET_64KB_HASH_MASK;
978
979         sig_hash = ixgbe_atr_compute_hash_82599(input,
980                         IXGBE_ATR_SIGNATURE_HASH_KEY);
981
982         return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
983 }
984
985 /*
986  * This is based on ixgbe_fdir_write_perfect_filter_82599() in
987  * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
988  * added, and IPv6 support also added. The hash value is also pre-calculated
989  * as the pballoc value is needed to do it.
990  */
991 static int
992 fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
993                         union ixgbe_atr_input *input, uint8_t queue,
994                         uint32_t fdircmd, uint32_t fdirhash,
995                         enum rte_fdir_mode mode)
996 {
997         uint32_t fdirport, fdirvlan;
998         u32 addr_low, addr_high;
999         u32 tunnel_type = 0;
1000         int err = 0;
1001         volatile uint32_t *reg;
1002
1003         if (mode == RTE_FDIR_MODE_PERFECT) {
1004                 /* record the IPv4 address (big-endian)
1005                  * can not use IXGBE_WRITE_REG.
1006                  */
1007                 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA);
1008                 *reg = input->formatted.src_ip[0];
1009                 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA);
1010                 *reg = input->formatted.dst_ip[0];
1011
1012                 /* record source and destination port (little-endian)*/
1013                 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1014                 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1015                 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1016                 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1017         } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
1018                    mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
1019                 /* for mac vlan and tunnel modes */
1020                 addr_low = ((u32)input->formatted.inner_mac[0] |
1021                             ((u32)input->formatted.inner_mac[1] << 8) |
1022                             ((u32)input->formatted.inner_mac[2] << 16) |
1023                             ((u32)input->formatted.inner_mac[3] << 24));
1024                 addr_high = ((u32)input->formatted.inner_mac[4] |
1025                              ((u32)input->formatted.inner_mac[5] << 8));
1026
1027                 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1028                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
1029                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
1030                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
1031                 } else {
1032                         /* tunnel mode */
1033                         if (input->formatted.tunnel_type !=
1034                                 RTE_FDIR_TUNNEL_TYPE_NVGRE)
1035                                 tunnel_type = 0x80000000;
1036                         tunnel_type |= addr_high;
1037                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
1038                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
1039                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
1040                                         input->formatted.tni_vni);
1041                 }
1042         }
1043
1044         /* record vlan (little-endian) and flex_bytes(big-endian) */
1045         fdirvlan = input->formatted.flex_bytes;
1046         fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1047         fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1048         IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1049
1050         /* configure FDIRHASH register */
1051         IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1052
1053         /*
1054          * flush all previous writes to make certain registers are
1055          * programmed prior to issuing the command
1056          */
1057         IXGBE_WRITE_FLUSH(hw);
1058
1059         /* configure FDIRCMD register */
1060         fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1061                   IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1062         fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1063         fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1064         fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1065
1066         IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1067
1068         PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1069
1070         err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1071         if (err < 0)
1072                 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1073
1074         return err;
1075 }
1076
1077 /**
1078  * This function is based on ixgbe_atr_add_signature_filter_82599() in
1079  * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
1080  * setting extra fields in the FDIRCMD register, and removes the code that was
1081  * verifying the flow_type field. According to the documentation, a flow type of
1082  * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
1083  * work ok...
1084  *
1085  *  Adds a signature hash filter
1086  *  @hw: pointer to hardware structure
1087  *  @input: unique input dword
1088  *  @queue: queue index to direct traffic to
1089  *  @fdircmd: any extra flags to set in fdircmd register
1090  *  @fdirhash: pre-calculated hash value for the filter
1091  **/
1092 static int
1093 fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1094                 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
1095                 uint32_t fdirhash)
1096 {
1097         int err = 0;
1098
1099         PMD_INIT_FUNC_TRACE();
1100
1101         /* configure FDIRCMD register */
1102         fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1103                   IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1104         fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1105         fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1106
1107         IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1108         IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1109
1110         PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1111
1112         err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1113         if (err < 0)
1114                 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1115
1116         return err;
1117 }
1118
1119 /*
1120  * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
1121  * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so
1122  * that it can be used for removing signature and perfect filters.
1123  */
1124 static int
1125 fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
1126 {
1127         uint32_t fdircmd = 0;
1128         int err = 0;
1129
1130         IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1131
1132         /* flush hash to HW */
1133         IXGBE_WRITE_FLUSH(hw);
1134
1135         /* Query if filter is present */
1136         IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1137
1138         err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1139         if (err < 0) {
1140                 PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
1141                 return err;
1142         }
1143
1144         /* if filter exists in hardware then remove it */
1145         if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1146                 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1147                 IXGBE_WRITE_FLUSH(hw);
1148                 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1149                                 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1150         }
1151         err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1152         if (err < 0)
1153                 PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
1154         return err;
1155
1156 }
1157
1158 static inline struct ixgbe_fdir_filter *
1159 ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
1160                          union ixgbe_atr_input *key)
1161 {
1162         int ret;
1163
1164         ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
1165         if (ret < 0)
1166                 return NULL;
1167
1168         return fdir_info->hash_map[ret];
1169 }
1170
1171 static inline int
1172 ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1173                          struct ixgbe_fdir_filter *fdir_filter)
1174 {
1175         int ret;
1176
1177         ret = rte_hash_add_key(fdir_info->hash_handle,
1178                                &fdir_filter->ixgbe_fdir);
1179
1180         if (ret < 0) {
1181                 PMD_DRV_LOG(ERR,
1182                             "Failed to insert fdir filter to hash table %d!",
1183                             ret);
1184                 return ret;
1185         }
1186
1187         fdir_info->hash_map[ret] = fdir_filter;
1188
1189         TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
1190
1191         return 0;
1192 }
1193
1194 static inline int
1195 ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1196                          union ixgbe_atr_input *key)
1197 {
1198         int ret;
1199         struct ixgbe_fdir_filter *fdir_filter;
1200
1201         ret = rte_hash_del_key(fdir_info->hash_handle, key);
1202
1203         if (ret < 0) {
1204                 PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
1205                 return ret;
1206         }
1207
1208         fdir_filter = fdir_info->hash_map[ret];
1209         fdir_info->hash_map[ret] = NULL;
1210
1211         TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
1212         rte_free(fdir_filter);
1213
1214         return 0;
1215 }
1216
1217 static int
1218 ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
1219                             const struct rte_eth_fdir_filter *fdir_filter,
1220                             struct ixgbe_fdir_rule *rule)
1221 {
1222         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1223         int err;
1224
1225         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1226
1227         err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
1228                                              &rule->ixgbe_fdir,
1229                                              fdir_mode);
1230         if (err)
1231                 return err;
1232
1233         rule->mode = fdir_mode;
1234         if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
1235                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1236         rule->queue = fdir_filter->action.rx_queue;
1237         rule->soft_id = fdir_filter->soft_id;
1238
1239         return 0;
1240 }
1241
1242 int
1243 ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
1244                           struct ixgbe_fdir_rule *rule,
1245                           bool del,
1246                           bool update)
1247 {
1248         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1249         uint32_t fdircmd_flags;
1250         uint32_t fdirhash;
1251         uint8_t queue;
1252         bool is_perfect = FALSE;
1253         int err;
1254         struct ixgbe_hw_fdir_info *info =
1255                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1256         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1257         struct ixgbe_fdir_filter *node;
1258         bool add_node = FALSE;
1259
1260         if (fdir_mode == RTE_FDIR_MODE_NONE ||
1261             fdir_mode != rule->mode)
1262                 return -ENOTSUP;
1263
1264         /*
1265          * Sanity check for x550.
1266          * When adding a new filter with flow type set to IPv4,
1267          * the flow director mask should be configed before,
1268          * and the L4 protocol and ports are masked.
1269          */
1270         if ((!del) &&
1271             (hw->mac.type == ixgbe_mac_X550 ||
1272              hw->mac.type == ixgbe_mac_X550EM_x ||
1273              hw->mac.type == ixgbe_mac_X550EM_a) &&
1274             (rule->ixgbe_fdir.formatted.flow_type ==
1275              IXGBE_ATR_FLOW_TYPE_IPV4 ||
1276              rule->ixgbe_fdir.formatted.flow_type ==
1277              IXGBE_ATR_FLOW_TYPE_IPV6) &&
1278             (info->mask.src_port_mask != 0 ||
1279              info->mask.dst_port_mask != 0) &&
1280             (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
1281              rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) {
1282                 PMD_DRV_LOG(ERR, "By this device,"
1283                             " IPv4 is not supported without"
1284                             " L4 protocol and ports masked!");
1285                 return -ENOTSUP;
1286         }
1287
1288         if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1289             fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1290                 is_perfect = TRUE;
1291
1292         if (is_perfect) {
1293                 if (rule->ixgbe_fdir.formatted.flow_type &
1294                     IXGBE_ATR_L4TYPE_IPV6_MASK) {
1295                         PMD_DRV_LOG(ERR, "IPv6 is not supported in"
1296                                     " perfect mode!");
1297                         return -ENOTSUP;
1298                 }
1299                 fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
1300                                                           dev->data->dev_conf.fdir_conf.pballoc);
1301                 fdirhash |= rule->soft_id <<
1302                         IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1303         } else
1304                 fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
1305                                                       dev->data->dev_conf.fdir_conf.pballoc);
1306
1307         if (del) {
1308                 err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
1309                 if (err < 0)
1310                         return err;
1311
1312                 err = fdir_erase_filter_82599(hw, fdirhash);
1313                 if (err < 0)
1314                         PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
1315                 else
1316                         PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
1317                 return err;
1318         }
1319         /* add or update an fdir filter*/
1320         fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
1321         if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
1322                 if (is_perfect) {
1323                         queue = dev->data->dev_conf.fdir_conf.drop_queue;
1324                         fdircmd_flags |= IXGBE_FDIRCMD_DROP;
1325                 } else {
1326                         PMD_DRV_LOG(ERR, "Drop option is not supported in"
1327                                     " signature mode.");
1328                         return -EINVAL;
1329                 }
1330         } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
1331                 queue = (uint8_t)rule->queue;
1332         else
1333                 return -EINVAL;
1334
1335         node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
1336         if (node) {
1337                 if (update) {
1338                         node->fdirflags = fdircmd_flags;
1339                         node->fdirhash = fdirhash;
1340                         node->queue = queue;
1341                 } else {
1342                         PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
1343                         return -EINVAL;
1344                 }
1345         } else {
1346                 add_node = TRUE;
1347                 node = rte_zmalloc("ixgbe_fdir",
1348                                    sizeof(struct ixgbe_fdir_filter),
1349                                    0);
1350                 if (!node)
1351                         return -ENOMEM;
1352                 rte_memcpy(&node->ixgbe_fdir,
1353                                  &rule->ixgbe_fdir,
1354                                  sizeof(union ixgbe_atr_input));
1355                 node->fdirflags = fdircmd_flags;
1356                 node->fdirhash = fdirhash;
1357                 node->queue = queue;
1358
1359                 err = ixgbe_insert_fdir_filter(info, node);
1360                 if (err < 0) {
1361                         rte_free(node);
1362                         return err;
1363                 }
1364         }
1365
1366         if (is_perfect) {
1367                 err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
1368                                                       queue, fdircmd_flags,
1369                                                       fdirhash, fdir_mode);
1370         } else {
1371                 err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
1372                                                       queue, fdircmd_flags,
1373                                                       fdirhash);
1374         }
1375         if (err < 0) {
1376                 PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
1377
1378                 if (add_node)
1379                         (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
1380         } else {
1381                 PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
1382         }
1383
1384         return err;
1385 }
1386
1387 /* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
1388  * @dev: pointer to the structure rte_eth_dev
1389  * @fdir_filter: fdir filter entry
1390  * @del: 1 - delete, 0 - add
1391  * @update: 1 - update
1392  */
1393 static int
1394 ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
1395                           const struct rte_eth_fdir_filter *fdir_filter,
1396                           bool del,
1397                           bool update)
1398 {
1399         struct ixgbe_fdir_rule rule;
1400         int err;
1401
1402         err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
1403
1404         if (err)
1405                 return err;
1406
1407         return ixgbe_fdir_filter_program(dev, &rule, del, update);
1408 }
1409
1410 static int
1411 ixgbe_fdir_flush(struct rte_eth_dev *dev)
1412 {
1413         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1414         struct ixgbe_hw_fdir_info *info =
1415                         IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1416         int ret;
1417
1418         ret = ixgbe_reinit_fdir_tables_82599(hw);
1419         if (ret < 0) {
1420                 PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
1421                 return ret;
1422         }
1423
1424         info->f_add = 0;
1425         info->f_remove = 0;
1426         info->add = 0;
1427         info->remove = 0;
1428
1429         return ret;
1430 }
1431
1432 #define FDIRENTRIES_NUM_SHIFT 10
1433 static void
1434 ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1435 {
1436         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1437         struct ixgbe_hw_fdir_info *info =
1438                         IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1439         uint32_t fdirctrl, max_num;
1440         uint8_t offset;
1441
1442         fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1443         offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
1444                         IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
1445
1446         fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1447         max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1448                         (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
1449         if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
1450             fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1451                 fdir_info->guarant_spc = max_num;
1452         else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
1453                 fdir_info->guarant_spc = max_num * 4;
1454
1455         fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
1456         fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
1457         fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
1458         IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
1459                         fdir_info->mask.ipv6_mask.src_ip);
1460         IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
1461                         fdir_info->mask.ipv6_mask.dst_ip);
1462         fdir_info->mask.src_port_mask = info->mask.src_port_mask;
1463         fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
1464         fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
1465         fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
1466         fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
1467         fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
1468
1469         if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
1470             fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
1471                 fdir_info->flow_types_mask[0] = 0;
1472         else
1473                 fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
1474
1475         fdir_info->flex_payload_unit = sizeof(uint16_t);
1476         fdir_info->max_flex_payload_segment_num = 1;
1477         fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
1478         fdir_info->flex_conf.nb_payloads = 1;
1479         fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
1480         fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
1481         fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
1482         fdir_info->flex_conf.nb_flexmasks = 1;
1483         fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
1484         fdir_info->flex_conf.flex_mask[0].mask[0] =
1485                         (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
1486         fdir_info->flex_conf.flex_mask[0].mask[1] =
1487                         (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
1488 }
1489
1490 static void
1491 ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
1492 {
1493         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1494         struct ixgbe_hw_fdir_info *info =
1495                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1496         uint32_t reg, max_num;
1497         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1498
1499         /* Get the information from registers */
1500         reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
1501         info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
1502                                      IXGBE_FDIRFREE_COLL_SHIFT);
1503         info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
1504                                 IXGBE_FDIRFREE_FREE_SHIFT);
1505
1506         reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1507         info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
1508                                    IXGBE_FDIRLEN_MAXHASH_SHIFT);
1509         info->maxlen  = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
1510                                   IXGBE_FDIRLEN_MAXLEN_SHIFT);
1511
1512         reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1513         info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
1514                 IXGBE_FDIRUSTAT_REMOVE_SHIFT;
1515         info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
1516                 IXGBE_FDIRUSTAT_ADD_SHIFT;
1517
1518         reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
1519         info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
1520                 IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
1521         info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
1522                 IXGBE_FDIRFSTAT_FADD_SHIFT;
1523
1524         /*  Copy the new information in the fdir parameter */
1525         fdir_stats->collision = info->collision;
1526         fdir_stats->free = info->free;
1527         fdir_stats->maxhash = info->maxhash;
1528         fdir_stats->maxlen = info->maxlen;
1529         fdir_stats->remove = info->remove;
1530         fdir_stats->add = info->add;
1531         fdir_stats->f_remove = info->f_remove;
1532         fdir_stats->f_add = info->f_add;
1533
1534         reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1535         max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1536                          (reg & FDIRCTRL_PBALLOC_MASK)));
1537         if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1538             fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1539                 fdir_stats->guarant_cnt = max_num - fdir_stats->free;
1540         else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
1541                 fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
1542
1543 }
1544
1545 /*
1546  * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
1547  * @dev: pointer to the structure rte_eth_dev
1548  * @filter_op:operation will be taken
1549  * @arg: a pointer to specific structure corresponding to the filter_op
1550  */
1551 int
1552 ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
1553                         enum rte_filter_op filter_op, void *arg)
1554 {
1555         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1556         int ret = 0;
1557
1558         if (hw->mac.type != ixgbe_mac_82599EB &&
1559                 hw->mac.type != ixgbe_mac_X540 &&
1560                 hw->mac.type != ixgbe_mac_X550 &&
1561                 hw->mac.type != ixgbe_mac_X550EM_x &&
1562                 hw->mac.type != ixgbe_mac_X550EM_a)
1563                 return -ENOTSUP;
1564
1565         if (filter_op == RTE_ETH_FILTER_NOP)
1566                 return 0;
1567
1568         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
1569                 return -EINVAL;
1570
1571         switch (filter_op) {
1572         case RTE_ETH_FILTER_ADD:
1573                 ret = ixgbe_add_del_fdir_filter(dev,
1574                         (struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
1575                 break;
1576         case RTE_ETH_FILTER_UPDATE:
1577                 ret = ixgbe_add_del_fdir_filter(dev,
1578                         (struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
1579                 break;
1580         case RTE_ETH_FILTER_DELETE:
1581                 ret = ixgbe_add_del_fdir_filter(dev,
1582                         (struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
1583                 break;
1584         case RTE_ETH_FILTER_FLUSH:
1585                 ret = ixgbe_fdir_flush(dev);
1586                 break;
1587         case RTE_ETH_FILTER_INFO:
1588                 ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
1589                 break;
1590         case RTE_ETH_FILTER_STATS:
1591                 ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
1592                 break;
1593         default:
1594                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1595                 ret = -EINVAL;
1596                 break;
1597         }
1598         return ret;
1599 }
1600
1601 /* restore flow director filter */
1602 void
1603 ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
1604 {
1605         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1606         struct ixgbe_hw_fdir_info *fdir_info =
1607                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1608         struct ixgbe_fdir_filter *node;
1609         bool is_perfect = FALSE;
1610         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1611
1612         if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1613             fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1614                 is_perfect = TRUE;
1615
1616         if (is_perfect) {
1617                 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1618                         (void)fdir_write_perfect_filter_82599(hw,
1619                                                               &node->ixgbe_fdir,
1620                                                               node->queue,
1621                                                               node->fdirflags,
1622                                                               node->fdirhash,
1623                                                               fdir_mode);
1624                 }
1625         } else {
1626                 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1627                         (void)fdir_add_signature_filter_82599(hw,
1628                                                               &node->ixgbe_fdir,
1629                                                               node->queue,
1630                                                               node->fdirflags,
1631                                                               node->fdirhash);
1632                 }
1633         }
1634 }
1635
1636 /* remove all the flow director filters */
1637 int
1638 ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
1639 {
1640         struct ixgbe_hw_fdir_info *fdir_info =
1641                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1642         struct ixgbe_fdir_filter *fdir_filter;
1643         struct ixgbe_fdir_filter *filter_flag;
1644         int ret = 0;
1645
1646         /* flush flow director */
1647         rte_hash_reset(fdir_info->hash_handle);
1648         memset(fdir_info->hash_map, 0,
1649                sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
1650         filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
1651         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1652                 TAILQ_REMOVE(&fdir_info->fdir_list,
1653                              fdir_filter,
1654                              entries);
1655                 rte_free(fdir_filter);
1656         }
1657
1658         if (filter_flag != NULL)
1659                 ret = ixgbe_fdir_flush(dev);
1660
1661         return ret;
1662 }