New upstream version 17.11.1
[deb_dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_malloc.h>
45 #include <rte_eth_ctrl.h>
46 #include <rte_tailq.h>
47 #include <rte_flow_driver.h>
48
49 #include "i40e_logs.h"
50 #include "base/i40e_type.h"
51 #include "base/i40e_prototype.h"
52 #include "i40e_ethdev.h"
53
54 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
55 #define I40E_IPV6_FRAG_HEADER   44
56 #define I40E_TENANT_ARRAY_NUM   3
57 #define I40E_TCI_MASK           0xFFFF
58
59 static int i40e_flow_validate(struct rte_eth_dev *dev,
60                               const struct rte_flow_attr *attr,
61                               const struct rte_flow_item pattern[],
62                               const struct rte_flow_action actions[],
63                               struct rte_flow_error *error);
64 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
65                                          const struct rte_flow_attr *attr,
66                                          const struct rte_flow_item pattern[],
67                                          const struct rte_flow_action actions[],
68                                          struct rte_flow_error *error);
69 static int i40e_flow_destroy(struct rte_eth_dev *dev,
70                              struct rte_flow *flow,
71                              struct rte_flow_error *error);
72 static int i40e_flow_flush(struct rte_eth_dev *dev,
73                            struct rte_flow_error *error);
74 static int
75 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
76                                   const struct rte_flow_item *pattern,
77                                   struct rte_flow_error *error,
78                                   struct rte_eth_ethertype_filter *filter);
79 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
80                                     const struct rte_flow_action *actions,
81                                     struct rte_flow_error *error,
82                                     struct rte_eth_ethertype_filter *filter);
83 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
84                                         const struct rte_flow_item *pattern,
85                                         struct rte_flow_error *error,
86                                         struct i40e_fdir_filter_conf *filter);
87 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
88                                        const struct rte_flow_action *actions,
89                                        struct rte_flow_error *error,
90                                        struct i40e_fdir_filter_conf *filter);
91 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
92                                  const struct rte_flow_action *actions,
93                                  struct rte_flow_error *error,
94                                  struct i40e_tunnel_filter_conf *filter);
95 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
96                                 struct rte_flow_error *error);
97 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
98                                     const struct rte_flow_attr *attr,
99                                     const struct rte_flow_item pattern[],
100                                     const struct rte_flow_action actions[],
101                                     struct rte_flow_error *error,
102                                     union i40e_filter_t *filter);
103 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
104                                        const struct rte_flow_attr *attr,
105                                        const struct rte_flow_item pattern[],
106                                        const struct rte_flow_action actions[],
107                                        struct rte_flow_error *error,
108                                        union i40e_filter_t *filter);
109 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
110                                         const struct rte_flow_attr *attr,
111                                         const struct rte_flow_item pattern[],
112                                         const struct rte_flow_action actions[],
113                                         struct rte_flow_error *error,
114                                         union i40e_filter_t *filter);
115 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
116                                         const struct rte_flow_attr *attr,
117                                         const struct rte_flow_item pattern[],
118                                         const struct rte_flow_action actions[],
119                                         struct rte_flow_error *error,
120                                         union i40e_filter_t *filter);
121 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
122                                        const struct rte_flow_attr *attr,
123                                        const struct rte_flow_item pattern[],
124                                        const struct rte_flow_action actions[],
125                                        struct rte_flow_error *error,
126                                        union i40e_filter_t *filter);
127 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
128                                       const struct rte_flow_attr *attr,
129                                       const struct rte_flow_item pattern[],
130                                       const struct rte_flow_action actions[],
131                                       struct rte_flow_error *error,
132                                       union i40e_filter_t *filter);
133 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
134                                       struct i40e_ethertype_filter *filter);
135 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
136                                            struct i40e_tunnel_filter *filter);
137 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
138 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
139 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
140 static int
141 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
142                               const struct rte_flow_attr *attr,
143                               const struct rte_flow_item pattern[],
144                               const struct rte_flow_action actions[],
145                               struct rte_flow_error *error,
146                               union i40e_filter_t *filter);
147 static int
148 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
149                               const struct rte_flow_item *pattern,
150                               struct rte_flow_error *error,
151                               struct i40e_tunnel_filter_conf *filter);
152
153 const struct rte_flow_ops i40e_flow_ops = {
154         .validate = i40e_flow_validate,
155         .create = i40e_flow_create,
156         .destroy = i40e_flow_destroy,
157         .flush = i40e_flow_flush,
158 };
159
160 union i40e_filter_t cons_filter;
161 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
162
163 /* Pattern matched ethertype filter */
164 static enum rte_flow_item_type pattern_ethertype[] = {
165         RTE_FLOW_ITEM_TYPE_ETH,
166         RTE_FLOW_ITEM_TYPE_END,
167 };
168
169 /* Pattern matched flow director filter */
170 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
171         RTE_FLOW_ITEM_TYPE_ETH,
172         RTE_FLOW_ITEM_TYPE_IPV4,
173         RTE_FLOW_ITEM_TYPE_END,
174 };
175
176 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
177         RTE_FLOW_ITEM_TYPE_ETH,
178         RTE_FLOW_ITEM_TYPE_IPV4,
179         RTE_FLOW_ITEM_TYPE_UDP,
180         RTE_FLOW_ITEM_TYPE_END,
181 };
182
183 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
184         RTE_FLOW_ITEM_TYPE_ETH,
185         RTE_FLOW_ITEM_TYPE_IPV4,
186         RTE_FLOW_ITEM_TYPE_TCP,
187         RTE_FLOW_ITEM_TYPE_END,
188 };
189
190 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
191         RTE_FLOW_ITEM_TYPE_ETH,
192         RTE_FLOW_ITEM_TYPE_IPV4,
193         RTE_FLOW_ITEM_TYPE_SCTP,
194         RTE_FLOW_ITEM_TYPE_END,
195 };
196
197 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
198         RTE_FLOW_ITEM_TYPE_ETH,
199         RTE_FLOW_ITEM_TYPE_IPV4,
200         RTE_FLOW_ITEM_TYPE_UDP,
201         RTE_FLOW_ITEM_TYPE_GTPC,
202         RTE_FLOW_ITEM_TYPE_END,
203 };
204
205 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
206         RTE_FLOW_ITEM_TYPE_ETH,
207         RTE_FLOW_ITEM_TYPE_IPV4,
208         RTE_FLOW_ITEM_TYPE_UDP,
209         RTE_FLOW_ITEM_TYPE_GTPU,
210         RTE_FLOW_ITEM_TYPE_END,
211 };
212
213 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
214         RTE_FLOW_ITEM_TYPE_ETH,
215         RTE_FLOW_ITEM_TYPE_IPV4,
216         RTE_FLOW_ITEM_TYPE_UDP,
217         RTE_FLOW_ITEM_TYPE_GTPU,
218         RTE_FLOW_ITEM_TYPE_IPV4,
219         RTE_FLOW_ITEM_TYPE_END,
220 };
221
222 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
223         RTE_FLOW_ITEM_TYPE_ETH,
224         RTE_FLOW_ITEM_TYPE_IPV4,
225         RTE_FLOW_ITEM_TYPE_UDP,
226         RTE_FLOW_ITEM_TYPE_GTPU,
227         RTE_FLOW_ITEM_TYPE_IPV6,
228         RTE_FLOW_ITEM_TYPE_END,
229 };
230
231 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
232         RTE_FLOW_ITEM_TYPE_ETH,
233         RTE_FLOW_ITEM_TYPE_IPV6,
234         RTE_FLOW_ITEM_TYPE_END,
235 };
236
237 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
238         RTE_FLOW_ITEM_TYPE_ETH,
239         RTE_FLOW_ITEM_TYPE_IPV6,
240         RTE_FLOW_ITEM_TYPE_UDP,
241         RTE_FLOW_ITEM_TYPE_END,
242 };
243
244 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
245         RTE_FLOW_ITEM_TYPE_ETH,
246         RTE_FLOW_ITEM_TYPE_IPV6,
247         RTE_FLOW_ITEM_TYPE_TCP,
248         RTE_FLOW_ITEM_TYPE_END,
249 };
250
251 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
252         RTE_FLOW_ITEM_TYPE_ETH,
253         RTE_FLOW_ITEM_TYPE_IPV6,
254         RTE_FLOW_ITEM_TYPE_SCTP,
255         RTE_FLOW_ITEM_TYPE_END,
256 };
257
258 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
259         RTE_FLOW_ITEM_TYPE_ETH,
260         RTE_FLOW_ITEM_TYPE_IPV6,
261         RTE_FLOW_ITEM_TYPE_UDP,
262         RTE_FLOW_ITEM_TYPE_GTPC,
263         RTE_FLOW_ITEM_TYPE_END,
264 };
265
266 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
267         RTE_FLOW_ITEM_TYPE_ETH,
268         RTE_FLOW_ITEM_TYPE_IPV6,
269         RTE_FLOW_ITEM_TYPE_UDP,
270         RTE_FLOW_ITEM_TYPE_GTPU,
271         RTE_FLOW_ITEM_TYPE_END,
272 };
273
274 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
275         RTE_FLOW_ITEM_TYPE_ETH,
276         RTE_FLOW_ITEM_TYPE_IPV6,
277         RTE_FLOW_ITEM_TYPE_UDP,
278         RTE_FLOW_ITEM_TYPE_GTPU,
279         RTE_FLOW_ITEM_TYPE_IPV4,
280         RTE_FLOW_ITEM_TYPE_END,
281 };
282
283 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
284         RTE_FLOW_ITEM_TYPE_ETH,
285         RTE_FLOW_ITEM_TYPE_IPV6,
286         RTE_FLOW_ITEM_TYPE_UDP,
287         RTE_FLOW_ITEM_TYPE_GTPU,
288         RTE_FLOW_ITEM_TYPE_IPV6,
289         RTE_FLOW_ITEM_TYPE_END,
290 };
291
292 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
293         RTE_FLOW_ITEM_TYPE_ETH,
294         RTE_FLOW_ITEM_TYPE_RAW,
295         RTE_FLOW_ITEM_TYPE_END,
296 };
297
298 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
299         RTE_FLOW_ITEM_TYPE_ETH,
300         RTE_FLOW_ITEM_TYPE_RAW,
301         RTE_FLOW_ITEM_TYPE_RAW,
302         RTE_FLOW_ITEM_TYPE_END,
303 };
304
305 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
306         RTE_FLOW_ITEM_TYPE_ETH,
307         RTE_FLOW_ITEM_TYPE_RAW,
308         RTE_FLOW_ITEM_TYPE_RAW,
309         RTE_FLOW_ITEM_TYPE_RAW,
310         RTE_FLOW_ITEM_TYPE_END,
311 };
312
313 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
314         RTE_FLOW_ITEM_TYPE_ETH,
315         RTE_FLOW_ITEM_TYPE_IPV4,
316         RTE_FLOW_ITEM_TYPE_RAW,
317         RTE_FLOW_ITEM_TYPE_END,
318 };
319
320 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
321         RTE_FLOW_ITEM_TYPE_ETH,
322         RTE_FLOW_ITEM_TYPE_IPV4,
323         RTE_FLOW_ITEM_TYPE_RAW,
324         RTE_FLOW_ITEM_TYPE_RAW,
325         RTE_FLOW_ITEM_TYPE_END,
326 };
327
328 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
329         RTE_FLOW_ITEM_TYPE_ETH,
330         RTE_FLOW_ITEM_TYPE_IPV4,
331         RTE_FLOW_ITEM_TYPE_RAW,
332         RTE_FLOW_ITEM_TYPE_RAW,
333         RTE_FLOW_ITEM_TYPE_RAW,
334         RTE_FLOW_ITEM_TYPE_END,
335 };
336
337 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
338         RTE_FLOW_ITEM_TYPE_ETH,
339         RTE_FLOW_ITEM_TYPE_IPV4,
340         RTE_FLOW_ITEM_TYPE_UDP,
341         RTE_FLOW_ITEM_TYPE_RAW,
342         RTE_FLOW_ITEM_TYPE_END,
343 };
344
345 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
346         RTE_FLOW_ITEM_TYPE_ETH,
347         RTE_FLOW_ITEM_TYPE_IPV4,
348         RTE_FLOW_ITEM_TYPE_UDP,
349         RTE_FLOW_ITEM_TYPE_RAW,
350         RTE_FLOW_ITEM_TYPE_RAW,
351         RTE_FLOW_ITEM_TYPE_END,
352 };
353
354 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
355         RTE_FLOW_ITEM_TYPE_ETH,
356         RTE_FLOW_ITEM_TYPE_IPV4,
357         RTE_FLOW_ITEM_TYPE_UDP,
358         RTE_FLOW_ITEM_TYPE_RAW,
359         RTE_FLOW_ITEM_TYPE_RAW,
360         RTE_FLOW_ITEM_TYPE_RAW,
361         RTE_FLOW_ITEM_TYPE_END,
362 };
363
364 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
365         RTE_FLOW_ITEM_TYPE_ETH,
366         RTE_FLOW_ITEM_TYPE_IPV4,
367         RTE_FLOW_ITEM_TYPE_TCP,
368         RTE_FLOW_ITEM_TYPE_RAW,
369         RTE_FLOW_ITEM_TYPE_END,
370 };
371
372 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
373         RTE_FLOW_ITEM_TYPE_ETH,
374         RTE_FLOW_ITEM_TYPE_IPV4,
375         RTE_FLOW_ITEM_TYPE_TCP,
376         RTE_FLOW_ITEM_TYPE_RAW,
377         RTE_FLOW_ITEM_TYPE_RAW,
378         RTE_FLOW_ITEM_TYPE_END,
379 };
380
381 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
382         RTE_FLOW_ITEM_TYPE_ETH,
383         RTE_FLOW_ITEM_TYPE_IPV4,
384         RTE_FLOW_ITEM_TYPE_TCP,
385         RTE_FLOW_ITEM_TYPE_RAW,
386         RTE_FLOW_ITEM_TYPE_RAW,
387         RTE_FLOW_ITEM_TYPE_RAW,
388         RTE_FLOW_ITEM_TYPE_END,
389 };
390
391 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
392         RTE_FLOW_ITEM_TYPE_ETH,
393         RTE_FLOW_ITEM_TYPE_IPV4,
394         RTE_FLOW_ITEM_TYPE_SCTP,
395         RTE_FLOW_ITEM_TYPE_RAW,
396         RTE_FLOW_ITEM_TYPE_END,
397 };
398
399 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
400         RTE_FLOW_ITEM_TYPE_ETH,
401         RTE_FLOW_ITEM_TYPE_IPV4,
402         RTE_FLOW_ITEM_TYPE_SCTP,
403         RTE_FLOW_ITEM_TYPE_RAW,
404         RTE_FLOW_ITEM_TYPE_RAW,
405         RTE_FLOW_ITEM_TYPE_END,
406 };
407
408 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
409         RTE_FLOW_ITEM_TYPE_ETH,
410         RTE_FLOW_ITEM_TYPE_IPV4,
411         RTE_FLOW_ITEM_TYPE_SCTP,
412         RTE_FLOW_ITEM_TYPE_RAW,
413         RTE_FLOW_ITEM_TYPE_RAW,
414         RTE_FLOW_ITEM_TYPE_RAW,
415         RTE_FLOW_ITEM_TYPE_END,
416 };
417
418 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
419         RTE_FLOW_ITEM_TYPE_ETH,
420         RTE_FLOW_ITEM_TYPE_IPV6,
421         RTE_FLOW_ITEM_TYPE_RAW,
422         RTE_FLOW_ITEM_TYPE_END,
423 };
424
425 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
426         RTE_FLOW_ITEM_TYPE_ETH,
427         RTE_FLOW_ITEM_TYPE_IPV6,
428         RTE_FLOW_ITEM_TYPE_RAW,
429         RTE_FLOW_ITEM_TYPE_RAW,
430         RTE_FLOW_ITEM_TYPE_END,
431 };
432
433 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
434         RTE_FLOW_ITEM_TYPE_ETH,
435         RTE_FLOW_ITEM_TYPE_IPV6,
436         RTE_FLOW_ITEM_TYPE_RAW,
437         RTE_FLOW_ITEM_TYPE_RAW,
438         RTE_FLOW_ITEM_TYPE_RAW,
439         RTE_FLOW_ITEM_TYPE_END,
440 };
441
442 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
443         RTE_FLOW_ITEM_TYPE_ETH,
444         RTE_FLOW_ITEM_TYPE_IPV6,
445         RTE_FLOW_ITEM_TYPE_UDP,
446         RTE_FLOW_ITEM_TYPE_RAW,
447         RTE_FLOW_ITEM_TYPE_END,
448 };
449
450 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
451         RTE_FLOW_ITEM_TYPE_ETH,
452         RTE_FLOW_ITEM_TYPE_IPV6,
453         RTE_FLOW_ITEM_TYPE_UDP,
454         RTE_FLOW_ITEM_TYPE_RAW,
455         RTE_FLOW_ITEM_TYPE_RAW,
456         RTE_FLOW_ITEM_TYPE_END,
457 };
458
459 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
460         RTE_FLOW_ITEM_TYPE_ETH,
461         RTE_FLOW_ITEM_TYPE_IPV6,
462         RTE_FLOW_ITEM_TYPE_UDP,
463         RTE_FLOW_ITEM_TYPE_RAW,
464         RTE_FLOW_ITEM_TYPE_RAW,
465         RTE_FLOW_ITEM_TYPE_RAW,
466         RTE_FLOW_ITEM_TYPE_END,
467 };
468
469 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
470         RTE_FLOW_ITEM_TYPE_ETH,
471         RTE_FLOW_ITEM_TYPE_IPV6,
472         RTE_FLOW_ITEM_TYPE_TCP,
473         RTE_FLOW_ITEM_TYPE_RAW,
474         RTE_FLOW_ITEM_TYPE_END,
475 };
476
477 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
478         RTE_FLOW_ITEM_TYPE_ETH,
479         RTE_FLOW_ITEM_TYPE_IPV6,
480         RTE_FLOW_ITEM_TYPE_TCP,
481         RTE_FLOW_ITEM_TYPE_RAW,
482         RTE_FLOW_ITEM_TYPE_RAW,
483         RTE_FLOW_ITEM_TYPE_END,
484 };
485
486 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
487         RTE_FLOW_ITEM_TYPE_ETH,
488         RTE_FLOW_ITEM_TYPE_IPV6,
489         RTE_FLOW_ITEM_TYPE_TCP,
490         RTE_FLOW_ITEM_TYPE_RAW,
491         RTE_FLOW_ITEM_TYPE_RAW,
492         RTE_FLOW_ITEM_TYPE_RAW,
493         RTE_FLOW_ITEM_TYPE_END,
494 };
495
496 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
497         RTE_FLOW_ITEM_TYPE_ETH,
498         RTE_FLOW_ITEM_TYPE_IPV6,
499         RTE_FLOW_ITEM_TYPE_SCTP,
500         RTE_FLOW_ITEM_TYPE_RAW,
501         RTE_FLOW_ITEM_TYPE_END,
502 };
503
504 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
505         RTE_FLOW_ITEM_TYPE_ETH,
506         RTE_FLOW_ITEM_TYPE_IPV6,
507         RTE_FLOW_ITEM_TYPE_SCTP,
508         RTE_FLOW_ITEM_TYPE_RAW,
509         RTE_FLOW_ITEM_TYPE_RAW,
510         RTE_FLOW_ITEM_TYPE_END,
511 };
512
513 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
514         RTE_FLOW_ITEM_TYPE_ETH,
515         RTE_FLOW_ITEM_TYPE_IPV6,
516         RTE_FLOW_ITEM_TYPE_SCTP,
517         RTE_FLOW_ITEM_TYPE_RAW,
518         RTE_FLOW_ITEM_TYPE_RAW,
519         RTE_FLOW_ITEM_TYPE_RAW,
520         RTE_FLOW_ITEM_TYPE_END,
521 };
522
523 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
524         RTE_FLOW_ITEM_TYPE_ETH,
525         RTE_FLOW_ITEM_TYPE_VLAN,
526         RTE_FLOW_ITEM_TYPE_END,
527 };
528
529 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
530         RTE_FLOW_ITEM_TYPE_ETH,
531         RTE_FLOW_ITEM_TYPE_VLAN,
532         RTE_FLOW_ITEM_TYPE_IPV4,
533         RTE_FLOW_ITEM_TYPE_END,
534 };
535
536 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
537         RTE_FLOW_ITEM_TYPE_ETH,
538         RTE_FLOW_ITEM_TYPE_VLAN,
539         RTE_FLOW_ITEM_TYPE_IPV4,
540         RTE_FLOW_ITEM_TYPE_UDP,
541         RTE_FLOW_ITEM_TYPE_END,
542 };
543
544 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
545         RTE_FLOW_ITEM_TYPE_ETH,
546         RTE_FLOW_ITEM_TYPE_VLAN,
547         RTE_FLOW_ITEM_TYPE_IPV4,
548         RTE_FLOW_ITEM_TYPE_TCP,
549         RTE_FLOW_ITEM_TYPE_END,
550 };
551
552 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
553         RTE_FLOW_ITEM_TYPE_ETH,
554         RTE_FLOW_ITEM_TYPE_VLAN,
555         RTE_FLOW_ITEM_TYPE_IPV4,
556         RTE_FLOW_ITEM_TYPE_SCTP,
557         RTE_FLOW_ITEM_TYPE_END,
558 };
559
560 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
561         RTE_FLOW_ITEM_TYPE_ETH,
562         RTE_FLOW_ITEM_TYPE_VLAN,
563         RTE_FLOW_ITEM_TYPE_IPV6,
564         RTE_FLOW_ITEM_TYPE_END,
565 };
566
567 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
568         RTE_FLOW_ITEM_TYPE_ETH,
569         RTE_FLOW_ITEM_TYPE_VLAN,
570         RTE_FLOW_ITEM_TYPE_IPV6,
571         RTE_FLOW_ITEM_TYPE_UDP,
572         RTE_FLOW_ITEM_TYPE_END,
573 };
574
575 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
576         RTE_FLOW_ITEM_TYPE_ETH,
577         RTE_FLOW_ITEM_TYPE_VLAN,
578         RTE_FLOW_ITEM_TYPE_IPV6,
579         RTE_FLOW_ITEM_TYPE_TCP,
580         RTE_FLOW_ITEM_TYPE_END,
581 };
582
583 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
584         RTE_FLOW_ITEM_TYPE_ETH,
585         RTE_FLOW_ITEM_TYPE_VLAN,
586         RTE_FLOW_ITEM_TYPE_IPV6,
587         RTE_FLOW_ITEM_TYPE_SCTP,
588         RTE_FLOW_ITEM_TYPE_END,
589 };
590
591 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
592         RTE_FLOW_ITEM_TYPE_ETH,
593         RTE_FLOW_ITEM_TYPE_VLAN,
594         RTE_FLOW_ITEM_TYPE_RAW,
595         RTE_FLOW_ITEM_TYPE_END,
596 };
597
598 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
599         RTE_FLOW_ITEM_TYPE_ETH,
600         RTE_FLOW_ITEM_TYPE_VLAN,
601         RTE_FLOW_ITEM_TYPE_RAW,
602         RTE_FLOW_ITEM_TYPE_RAW,
603         RTE_FLOW_ITEM_TYPE_END,
604 };
605
606 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
607         RTE_FLOW_ITEM_TYPE_ETH,
608         RTE_FLOW_ITEM_TYPE_VLAN,
609         RTE_FLOW_ITEM_TYPE_RAW,
610         RTE_FLOW_ITEM_TYPE_RAW,
611         RTE_FLOW_ITEM_TYPE_RAW,
612         RTE_FLOW_ITEM_TYPE_END,
613 };
614
615 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
616         RTE_FLOW_ITEM_TYPE_ETH,
617         RTE_FLOW_ITEM_TYPE_VLAN,
618         RTE_FLOW_ITEM_TYPE_IPV4,
619         RTE_FLOW_ITEM_TYPE_RAW,
620         RTE_FLOW_ITEM_TYPE_END,
621 };
622
623 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
624         RTE_FLOW_ITEM_TYPE_ETH,
625         RTE_FLOW_ITEM_TYPE_VLAN,
626         RTE_FLOW_ITEM_TYPE_IPV4,
627         RTE_FLOW_ITEM_TYPE_RAW,
628         RTE_FLOW_ITEM_TYPE_RAW,
629         RTE_FLOW_ITEM_TYPE_END,
630 };
631
632 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
633         RTE_FLOW_ITEM_TYPE_ETH,
634         RTE_FLOW_ITEM_TYPE_VLAN,
635         RTE_FLOW_ITEM_TYPE_IPV4,
636         RTE_FLOW_ITEM_TYPE_RAW,
637         RTE_FLOW_ITEM_TYPE_RAW,
638         RTE_FLOW_ITEM_TYPE_RAW,
639         RTE_FLOW_ITEM_TYPE_END,
640 };
641
642 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
643         RTE_FLOW_ITEM_TYPE_ETH,
644         RTE_FLOW_ITEM_TYPE_VLAN,
645         RTE_FLOW_ITEM_TYPE_IPV4,
646         RTE_FLOW_ITEM_TYPE_UDP,
647         RTE_FLOW_ITEM_TYPE_RAW,
648         RTE_FLOW_ITEM_TYPE_END,
649 };
650
651 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
652         RTE_FLOW_ITEM_TYPE_ETH,
653         RTE_FLOW_ITEM_TYPE_VLAN,
654         RTE_FLOW_ITEM_TYPE_IPV4,
655         RTE_FLOW_ITEM_TYPE_UDP,
656         RTE_FLOW_ITEM_TYPE_RAW,
657         RTE_FLOW_ITEM_TYPE_RAW,
658         RTE_FLOW_ITEM_TYPE_END,
659 };
660
661 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
662         RTE_FLOW_ITEM_TYPE_ETH,
663         RTE_FLOW_ITEM_TYPE_VLAN,
664         RTE_FLOW_ITEM_TYPE_IPV4,
665         RTE_FLOW_ITEM_TYPE_UDP,
666         RTE_FLOW_ITEM_TYPE_RAW,
667         RTE_FLOW_ITEM_TYPE_RAW,
668         RTE_FLOW_ITEM_TYPE_RAW,
669         RTE_FLOW_ITEM_TYPE_END,
670 };
671
672 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
673         RTE_FLOW_ITEM_TYPE_ETH,
674         RTE_FLOW_ITEM_TYPE_VLAN,
675         RTE_FLOW_ITEM_TYPE_IPV4,
676         RTE_FLOW_ITEM_TYPE_TCP,
677         RTE_FLOW_ITEM_TYPE_RAW,
678         RTE_FLOW_ITEM_TYPE_END,
679 };
680
681 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
682         RTE_FLOW_ITEM_TYPE_ETH,
683         RTE_FLOW_ITEM_TYPE_VLAN,
684         RTE_FLOW_ITEM_TYPE_IPV4,
685         RTE_FLOW_ITEM_TYPE_TCP,
686         RTE_FLOW_ITEM_TYPE_RAW,
687         RTE_FLOW_ITEM_TYPE_RAW,
688         RTE_FLOW_ITEM_TYPE_END,
689 };
690
691 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
692         RTE_FLOW_ITEM_TYPE_ETH,
693         RTE_FLOW_ITEM_TYPE_VLAN,
694         RTE_FLOW_ITEM_TYPE_IPV4,
695         RTE_FLOW_ITEM_TYPE_TCP,
696         RTE_FLOW_ITEM_TYPE_RAW,
697         RTE_FLOW_ITEM_TYPE_RAW,
698         RTE_FLOW_ITEM_TYPE_RAW,
699         RTE_FLOW_ITEM_TYPE_END,
700 };
701
702 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
703         RTE_FLOW_ITEM_TYPE_ETH,
704         RTE_FLOW_ITEM_TYPE_VLAN,
705         RTE_FLOW_ITEM_TYPE_IPV4,
706         RTE_FLOW_ITEM_TYPE_SCTP,
707         RTE_FLOW_ITEM_TYPE_RAW,
708         RTE_FLOW_ITEM_TYPE_END,
709 };
710
711 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
712         RTE_FLOW_ITEM_TYPE_ETH,
713         RTE_FLOW_ITEM_TYPE_VLAN,
714         RTE_FLOW_ITEM_TYPE_IPV4,
715         RTE_FLOW_ITEM_TYPE_SCTP,
716         RTE_FLOW_ITEM_TYPE_RAW,
717         RTE_FLOW_ITEM_TYPE_RAW,
718         RTE_FLOW_ITEM_TYPE_END,
719 };
720
721 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
722         RTE_FLOW_ITEM_TYPE_ETH,
723         RTE_FLOW_ITEM_TYPE_VLAN,
724         RTE_FLOW_ITEM_TYPE_IPV4,
725         RTE_FLOW_ITEM_TYPE_SCTP,
726         RTE_FLOW_ITEM_TYPE_RAW,
727         RTE_FLOW_ITEM_TYPE_RAW,
728         RTE_FLOW_ITEM_TYPE_RAW,
729         RTE_FLOW_ITEM_TYPE_END,
730 };
731
732 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
733         RTE_FLOW_ITEM_TYPE_ETH,
734         RTE_FLOW_ITEM_TYPE_VLAN,
735         RTE_FLOW_ITEM_TYPE_IPV6,
736         RTE_FLOW_ITEM_TYPE_RAW,
737         RTE_FLOW_ITEM_TYPE_END,
738 };
739
740 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
741         RTE_FLOW_ITEM_TYPE_ETH,
742         RTE_FLOW_ITEM_TYPE_VLAN,
743         RTE_FLOW_ITEM_TYPE_IPV6,
744         RTE_FLOW_ITEM_TYPE_RAW,
745         RTE_FLOW_ITEM_TYPE_RAW,
746         RTE_FLOW_ITEM_TYPE_END,
747 };
748
749 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
750         RTE_FLOW_ITEM_TYPE_ETH,
751         RTE_FLOW_ITEM_TYPE_VLAN,
752         RTE_FLOW_ITEM_TYPE_IPV6,
753         RTE_FLOW_ITEM_TYPE_RAW,
754         RTE_FLOW_ITEM_TYPE_RAW,
755         RTE_FLOW_ITEM_TYPE_RAW,
756         RTE_FLOW_ITEM_TYPE_END,
757 };
758
759 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
760         RTE_FLOW_ITEM_TYPE_ETH,
761         RTE_FLOW_ITEM_TYPE_VLAN,
762         RTE_FLOW_ITEM_TYPE_IPV6,
763         RTE_FLOW_ITEM_TYPE_UDP,
764         RTE_FLOW_ITEM_TYPE_RAW,
765         RTE_FLOW_ITEM_TYPE_END,
766 };
767
768 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
769         RTE_FLOW_ITEM_TYPE_ETH,
770         RTE_FLOW_ITEM_TYPE_VLAN,
771         RTE_FLOW_ITEM_TYPE_IPV6,
772         RTE_FLOW_ITEM_TYPE_UDP,
773         RTE_FLOW_ITEM_TYPE_RAW,
774         RTE_FLOW_ITEM_TYPE_RAW,
775         RTE_FLOW_ITEM_TYPE_END,
776 };
777
778 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
779         RTE_FLOW_ITEM_TYPE_ETH,
780         RTE_FLOW_ITEM_TYPE_VLAN,
781         RTE_FLOW_ITEM_TYPE_IPV6,
782         RTE_FLOW_ITEM_TYPE_UDP,
783         RTE_FLOW_ITEM_TYPE_RAW,
784         RTE_FLOW_ITEM_TYPE_RAW,
785         RTE_FLOW_ITEM_TYPE_RAW,
786         RTE_FLOW_ITEM_TYPE_END,
787 };
788
789 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
790         RTE_FLOW_ITEM_TYPE_ETH,
791         RTE_FLOW_ITEM_TYPE_VLAN,
792         RTE_FLOW_ITEM_TYPE_IPV6,
793         RTE_FLOW_ITEM_TYPE_TCP,
794         RTE_FLOW_ITEM_TYPE_RAW,
795         RTE_FLOW_ITEM_TYPE_END,
796 };
797
798 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
799         RTE_FLOW_ITEM_TYPE_ETH,
800         RTE_FLOW_ITEM_TYPE_VLAN,
801         RTE_FLOW_ITEM_TYPE_IPV6,
802         RTE_FLOW_ITEM_TYPE_TCP,
803         RTE_FLOW_ITEM_TYPE_RAW,
804         RTE_FLOW_ITEM_TYPE_RAW,
805         RTE_FLOW_ITEM_TYPE_END,
806 };
807
808 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
809         RTE_FLOW_ITEM_TYPE_ETH,
810         RTE_FLOW_ITEM_TYPE_VLAN,
811         RTE_FLOW_ITEM_TYPE_IPV6,
812         RTE_FLOW_ITEM_TYPE_TCP,
813         RTE_FLOW_ITEM_TYPE_RAW,
814         RTE_FLOW_ITEM_TYPE_RAW,
815         RTE_FLOW_ITEM_TYPE_RAW,
816         RTE_FLOW_ITEM_TYPE_END,
817 };
818
819 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
820         RTE_FLOW_ITEM_TYPE_ETH,
821         RTE_FLOW_ITEM_TYPE_VLAN,
822         RTE_FLOW_ITEM_TYPE_IPV6,
823         RTE_FLOW_ITEM_TYPE_SCTP,
824         RTE_FLOW_ITEM_TYPE_RAW,
825         RTE_FLOW_ITEM_TYPE_END,
826 };
827
828 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
829         RTE_FLOW_ITEM_TYPE_ETH,
830         RTE_FLOW_ITEM_TYPE_VLAN,
831         RTE_FLOW_ITEM_TYPE_IPV6,
832         RTE_FLOW_ITEM_TYPE_SCTP,
833         RTE_FLOW_ITEM_TYPE_RAW,
834         RTE_FLOW_ITEM_TYPE_RAW,
835         RTE_FLOW_ITEM_TYPE_END,
836 };
837
838 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
839         RTE_FLOW_ITEM_TYPE_ETH,
840         RTE_FLOW_ITEM_TYPE_VLAN,
841         RTE_FLOW_ITEM_TYPE_IPV6,
842         RTE_FLOW_ITEM_TYPE_SCTP,
843         RTE_FLOW_ITEM_TYPE_RAW,
844         RTE_FLOW_ITEM_TYPE_RAW,
845         RTE_FLOW_ITEM_TYPE_RAW,
846         RTE_FLOW_ITEM_TYPE_END,
847 };
848
849 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
850         RTE_FLOW_ITEM_TYPE_ETH,
851         RTE_FLOW_ITEM_TYPE_IPV4,
852         RTE_FLOW_ITEM_TYPE_VF,
853         RTE_FLOW_ITEM_TYPE_END,
854 };
855
856 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
857         RTE_FLOW_ITEM_TYPE_ETH,
858         RTE_FLOW_ITEM_TYPE_IPV4,
859         RTE_FLOW_ITEM_TYPE_UDP,
860         RTE_FLOW_ITEM_TYPE_VF,
861         RTE_FLOW_ITEM_TYPE_END,
862 };
863
864 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
865         RTE_FLOW_ITEM_TYPE_ETH,
866         RTE_FLOW_ITEM_TYPE_IPV4,
867         RTE_FLOW_ITEM_TYPE_TCP,
868         RTE_FLOW_ITEM_TYPE_VF,
869         RTE_FLOW_ITEM_TYPE_END,
870 };
871
872 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
873         RTE_FLOW_ITEM_TYPE_ETH,
874         RTE_FLOW_ITEM_TYPE_IPV4,
875         RTE_FLOW_ITEM_TYPE_SCTP,
876         RTE_FLOW_ITEM_TYPE_VF,
877         RTE_FLOW_ITEM_TYPE_END,
878 };
879
880 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
881         RTE_FLOW_ITEM_TYPE_ETH,
882         RTE_FLOW_ITEM_TYPE_IPV6,
883         RTE_FLOW_ITEM_TYPE_VF,
884         RTE_FLOW_ITEM_TYPE_END,
885 };
886
887 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
888         RTE_FLOW_ITEM_TYPE_ETH,
889         RTE_FLOW_ITEM_TYPE_IPV6,
890         RTE_FLOW_ITEM_TYPE_UDP,
891         RTE_FLOW_ITEM_TYPE_VF,
892         RTE_FLOW_ITEM_TYPE_END,
893 };
894
895 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
896         RTE_FLOW_ITEM_TYPE_ETH,
897         RTE_FLOW_ITEM_TYPE_IPV6,
898         RTE_FLOW_ITEM_TYPE_TCP,
899         RTE_FLOW_ITEM_TYPE_VF,
900         RTE_FLOW_ITEM_TYPE_END,
901 };
902
903 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
904         RTE_FLOW_ITEM_TYPE_ETH,
905         RTE_FLOW_ITEM_TYPE_IPV6,
906         RTE_FLOW_ITEM_TYPE_SCTP,
907         RTE_FLOW_ITEM_TYPE_VF,
908         RTE_FLOW_ITEM_TYPE_END,
909 };
910
911 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
912         RTE_FLOW_ITEM_TYPE_ETH,
913         RTE_FLOW_ITEM_TYPE_RAW,
914         RTE_FLOW_ITEM_TYPE_VF,
915         RTE_FLOW_ITEM_TYPE_END,
916 };
917
918 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
919         RTE_FLOW_ITEM_TYPE_ETH,
920         RTE_FLOW_ITEM_TYPE_RAW,
921         RTE_FLOW_ITEM_TYPE_RAW,
922         RTE_FLOW_ITEM_TYPE_VF,
923         RTE_FLOW_ITEM_TYPE_END,
924 };
925
926 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
927         RTE_FLOW_ITEM_TYPE_ETH,
928         RTE_FLOW_ITEM_TYPE_RAW,
929         RTE_FLOW_ITEM_TYPE_RAW,
930         RTE_FLOW_ITEM_TYPE_RAW,
931         RTE_FLOW_ITEM_TYPE_VF,
932         RTE_FLOW_ITEM_TYPE_END,
933 };
934
935 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
936         RTE_FLOW_ITEM_TYPE_ETH,
937         RTE_FLOW_ITEM_TYPE_IPV4,
938         RTE_FLOW_ITEM_TYPE_RAW,
939         RTE_FLOW_ITEM_TYPE_VF,
940         RTE_FLOW_ITEM_TYPE_END,
941 };
942
943 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
944         RTE_FLOW_ITEM_TYPE_ETH,
945         RTE_FLOW_ITEM_TYPE_IPV4,
946         RTE_FLOW_ITEM_TYPE_RAW,
947         RTE_FLOW_ITEM_TYPE_RAW,
948         RTE_FLOW_ITEM_TYPE_VF,
949         RTE_FLOW_ITEM_TYPE_END,
950 };
951
952 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
953         RTE_FLOW_ITEM_TYPE_ETH,
954         RTE_FLOW_ITEM_TYPE_IPV4,
955         RTE_FLOW_ITEM_TYPE_RAW,
956         RTE_FLOW_ITEM_TYPE_RAW,
957         RTE_FLOW_ITEM_TYPE_RAW,
958         RTE_FLOW_ITEM_TYPE_VF,
959         RTE_FLOW_ITEM_TYPE_END,
960 };
961
962 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
963         RTE_FLOW_ITEM_TYPE_ETH,
964         RTE_FLOW_ITEM_TYPE_IPV4,
965         RTE_FLOW_ITEM_TYPE_UDP,
966         RTE_FLOW_ITEM_TYPE_RAW,
967         RTE_FLOW_ITEM_TYPE_VF,
968         RTE_FLOW_ITEM_TYPE_END,
969 };
970
971 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
972         RTE_FLOW_ITEM_TYPE_ETH,
973         RTE_FLOW_ITEM_TYPE_IPV4,
974         RTE_FLOW_ITEM_TYPE_UDP,
975         RTE_FLOW_ITEM_TYPE_RAW,
976         RTE_FLOW_ITEM_TYPE_RAW,
977         RTE_FLOW_ITEM_TYPE_VF,
978         RTE_FLOW_ITEM_TYPE_END,
979 };
980
981 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
982         RTE_FLOW_ITEM_TYPE_ETH,
983         RTE_FLOW_ITEM_TYPE_IPV4,
984         RTE_FLOW_ITEM_TYPE_UDP,
985         RTE_FLOW_ITEM_TYPE_RAW,
986         RTE_FLOW_ITEM_TYPE_RAW,
987         RTE_FLOW_ITEM_TYPE_RAW,
988         RTE_FLOW_ITEM_TYPE_VF,
989         RTE_FLOW_ITEM_TYPE_END,
990 };
991
992 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
993         RTE_FLOW_ITEM_TYPE_ETH,
994         RTE_FLOW_ITEM_TYPE_IPV4,
995         RTE_FLOW_ITEM_TYPE_TCP,
996         RTE_FLOW_ITEM_TYPE_RAW,
997         RTE_FLOW_ITEM_TYPE_VF,
998         RTE_FLOW_ITEM_TYPE_END,
999 };
1000
1001 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
1002         RTE_FLOW_ITEM_TYPE_ETH,
1003         RTE_FLOW_ITEM_TYPE_IPV4,
1004         RTE_FLOW_ITEM_TYPE_TCP,
1005         RTE_FLOW_ITEM_TYPE_RAW,
1006         RTE_FLOW_ITEM_TYPE_RAW,
1007         RTE_FLOW_ITEM_TYPE_VF,
1008         RTE_FLOW_ITEM_TYPE_END,
1009 };
1010
1011 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
1012         RTE_FLOW_ITEM_TYPE_ETH,
1013         RTE_FLOW_ITEM_TYPE_IPV4,
1014         RTE_FLOW_ITEM_TYPE_TCP,
1015         RTE_FLOW_ITEM_TYPE_RAW,
1016         RTE_FLOW_ITEM_TYPE_RAW,
1017         RTE_FLOW_ITEM_TYPE_RAW,
1018         RTE_FLOW_ITEM_TYPE_VF,
1019         RTE_FLOW_ITEM_TYPE_END,
1020 };
1021
1022 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1023         RTE_FLOW_ITEM_TYPE_ETH,
1024         RTE_FLOW_ITEM_TYPE_IPV4,
1025         RTE_FLOW_ITEM_TYPE_SCTP,
1026         RTE_FLOW_ITEM_TYPE_RAW,
1027         RTE_FLOW_ITEM_TYPE_VF,
1028         RTE_FLOW_ITEM_TYPE_END,
1029 };
1030
1031 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1032         RTE_FLOW_ITEM_TYPE_ETH,
1033         RTE_FLOW_ITEM_TYPE_IPV4,
1034         RTE_FLOW_ITEM_TYPE_SCTP,
1035         RTE_FLOW_ITEM_TYPE_RAW,
1036         RTE_FLOW_ITEM_TYPE_RAW,
1037         RTE_FLOW_ITEM_TYPE_VF,
1038         RTE_FLOW_ITEM_TYPE_END,
1039 };
1040
1041 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1042         RTE_FLOW_ITEM_TYPE_ETH,
1043         RTE_FLOW_ITEM_TYPE_IPV4,
1044         RTE_FLOW_ITEM_TYPE_SCTP,
1045         RTE_FLOW_ITEM_TYPE_RAW,
1046         RTE_FLOW_ITEM_TYPE_RAW,
1047         RTE_FLOW_ITEM_TYPE_RAW,
1048         RTE_FLOW_ITEM_TYPE_VF,
1049         RTE_FLOW_ITEM_TYPE_END,
1050 };
1051
1052 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1053         RTE_FLOW_ITEM_TYPE_ETH,
1054         RTE_FLOW_ITEM_TYPE_IPV6,
1055         RTE_FLOW_ITEM_TYPE_RAW,
1056         RTE_FLOW_ITEM_TYPE_VF,
1057         RTE_FLOW_ITEM_TYPE_END,
1058 };
1059
1060 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1061         RTE_FLOW_ITEM_TYPE_ETH,
1062         RTE_FLOW_ITEM_TYPE_IPV6,
1063         RTE_FLOW_ITEM_TYPE_RAW,
1064         RTE_FLOW_ITEM_TYPE_RAW,
1065         RTE_FLOW_ITEM_TYPE_VF,
1066         RTE_FLOW_ITEM_TYPE_END,
1067 };
1068
1069 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1070         RTE_FLOW_ITEM_TYPE_ETH,
1071         RTE_FLOW_ITEM_TYPE_IPV6,
1072         RTE_FLOW_ITEM_TYPE_RAW,
1073         RTE_FLOW_ITEM_TYPE_RAW,
1074         RTE_FLOW_ITEM_TYPE_RAW,
1075         RTE_FLOW_ITEM_TYPE_VF,
1076         RTE_FLOW_ITEM_TYPE_END,
1077 };
1078
1079 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1080         RTE_FLOW_ITEM_TYPE_ETH,
1081         RTE_FLOW_ITEM_TYPE_IPV6,
1082         RTE_FLOW_ITEM_TYPE_UDP,
1083         RTE_FLOW_ITEM_TYPE_RAW,
1084         RTE_FLOW_ITEM_TYPE_VF,
1085         RTE_FLOW_ITEM_TYPE_END,
1086 };
1087
1088 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1089         RTE_FLOW_ITEM_TYPE_ETH,
1090         RTE_FLOW_ITEM_TYPE_IPV6,
1091         RTE_FLOW_ITEM_TYPE_UDP,
1092         RTE_FLOW_ITEM_TYPE_RAW,
1093         RTE_FLOW_ITEM_TYPE_RAW,
1094         RTE_FLOW_ITEM_TYPE_VF,
1095         RTE_FLOW_ITEM_TYPE_END,
1096 };
1097
1098 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1099         RTE_FLOW_ITEM_TYPE_ETH,
1100         RTE_FLOW_ITEM_TYPE_IPV6,
1101         RTE_FLOW_ITEM_TYPE_UDP,
1102         RTE_FLOW_ITEM_TYPE_RAW,
1103         RTE_FLOW_ITEM_TYPE_RAW,
1104         RTE_FLOW_ITEM_TYPE_RAW,
1105         RTE_FLOW_ITEM_TYPE_VF,
1106         RTE_FLOW_ITEM_TYPE_END,
1107 };
1108
1109 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1110         RTE_FLOW_ITEM_TYPE_ETH,
1111         RTE_FLOW_ITEM_TYPE_IPV6,
1112         RTE_FLOW_ITEM_TYPE_TCP,
1113         RTE_FLOW_ITEM_TYPE_RAW,
1114         RTE_FLOW_ITEM_TYPE_VF,
1115         RTE_FLOW_ITEM_TYPE_END,
1116 };
1117
1118 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1119         RTE_FLOW_ITEM_TYPE_ETH,
1120         RTE_FLOW_ITEM_TYPE_IPV6,
1121         RTE_FLOW_ITEM_TYPE_TCP,
1122         RTE_FLOW_ITEM_TYPE_RAW,
1123         RTE_FLOW_ITEM_TYPE_RAW,
1124         RTE_FLOW_ITEM_TYPE_VF,
1125         RTE_FLOW_ITEM_TYPE_END,
1126 };
1127
1128 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1129         RTE_FLOW_ITEM_TYPE_ETH,
1130         RTE_FLOW_ITEM_TYPE_IPV6,
1131         RTE_FLOW_ITEM_TYPE_TCP,
1132         RTE_FLOW_ITEM_TYPE_RAW,
1133         RTE_FLOW_ITEM_TYPE_RAW,
1134         RTE_FLOW_ITEM_TYPE_RAW,
1135         RTE_FLOW_ITEM_TYPE_VF,
1136         RTE_FLOW_ITEM_TYPE_END,
1137 };
1138
1139 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1140         RTE_FLOW_ITEM_TYPE_ETH,
1141         RTE_FLOW_ITEM_TYPE_IPV6,
1142         RTE_FLOW_ITEM_TYPE_SCTP,
1143         RTE_FLOW_ITEM_TYPE_RAW,
1144         RTE_FLOW_ITEM_TYPE_VF,
1145         RTE_FLOW_ITEM_TYPE_END,
1146 };
1147
1148 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1149         RTE_FLOW_ITEM_TYPE_ETH,
1150         RTE_FLOW_ITEM_TYPE_IPV6,
1151         RTE_FLOW_ITEM_TYPE_SCTP,
1152         RTE_FLOW_ITEM_TYPE_RAW,
1153         RTE_FLOW_ITEM_TYPE_RAW,
1154         RTE_FLOW_ITEM_TYPE_VF,
1155         RTE_FLOW_ITEM_TYPE_END,
1156 };
1157
1158 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1159         RTE_FLOW_ITEM_TYPE_ETH,
1160         RTE_FLOW_ITEM_TYPE_IPV6,
1161         RTE_FLOW_ITEM_TYPE_SCTP,
1162         RTE_FLOW_ITEM_TYPE_RAW,
1163         RTE_FLOW_ITEM_TYPE_RAW,
1164         RTE_FLOW_ITEM_TYPE_RAW,
1165         RTE_FLOW_ITEM_TYPE_VF,
1166         RTE_FLOW_ITEM_TYPE_END,
1167 };
1168
1169 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1170         RTE_FLOW_ITEM_TYPE_ETH,
1171         RTE_FLOW_ITEM_TYPE_VLAN,
1172         RTE_FLOW_ITEM_TYPE_VF,
1173         RTE_FLOW_ITEM_TYPE_END,
1174 };
1175
1176 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1177         RTE_FLOW_ITEM_TYPE_ETH,
1178         RTE_FLOW_ITEM_TYPE_VLAN,
1179         RTE_FLOW_ITEM_TYPE_IPV4,
1180         RTE_FLOW_ITEM_TYPE_VF,
1181         RTE_FLOW_ITEM_TYPE_END,
1182 };
1183
1184 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1185         RTE_FLOW_ITEM_TYPE_ETH,
1186         RTE_FLOW_ITEM_TYPE_VLAN,
1187         RTE_FLOW_ITEM_TYPE_IPV4,
1188         RTE_FLOW_ITEM_TYPE_UDP,
1189         RTE_FLOW_ITEM_TYPE_VF,
1190         RTE_FLOW_ITEM_TYPE_END,
1191 };
1192
1193 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1194         RTE_FLOW_ITEM_TYPE_ETH,
1195         RTE_FLOW_ITEM_TYPE_VLAN,
1196         RTE_FLOW_ITEM_TYPE_IPV4,
1197         RTE_FLOW_ITEM_TYPE_TCP,
1198         RTE_FLOW_ITEM_TYPE_VF,
1199         RTE_FLOW_ITEM_TYPE_END,
1200 };
1201
1202 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1203         RTE_FLOW_ITEM_TYPE_ETH,
1204         RTE_FLOW_ITEM_TYPE_VLAN,
1205         RTE_FLOW_ITEM_TYPE_IPV4,
1206         RTE_FLOW_ITEM_TYPE_SCTP,
1207         RTE_FLOW_ITEM_TYPE_VF,
1208         RTE_FLOW_ITEM_TYPE_END,
1209 };
1210
1211 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1212         RTE_FLOW_ITEM_TYPE_ETH,
1213         RTE_FLOW_ITEM_TYPE_VLAN,
1214         RTE_FLOW_ITEM_TYPE_IPV6,
1215         RTE_FLOW_ITEM_TYPE_VF,
1216         RTE_FLOW_ITEM_TYPE_END,
1217 };
1218
1219 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1220         RTE_FLOW_ITEM_TYPE_ETH,
1221         RTE_FLOW_ITEM_TYPE_VLAN,
1222         RTE_FLOW_ITEM_TYPE_IPV6,
1223         RTE_FLOW_ITEM_TYPE_UDP,
1224         RTE_FLOW_ITEM_TYPE_VF,
1225         RTE_FLOW_ITEM_TYPE_END,
1226 };
1227
1228 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1229         RTE_FLOW_ITEM_TYPE_ETH,
1230         RTE_FLOW_ITEM_TYPE_VLAN,
1231         RTE_FLOW_ITEM_TYPE_IPV6,
1232         RTE_FLOW_ITEM_TYPE_TCP,
1233         RTE_FLOW_ITEM_TYPE_VF,
1234         RTE_FLOW_ITEM_TYPE_END,
1235 };
1236
1237 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1238         RTE_FLOW_ITEM_TYPE_ETH,
1239         RTE_FLOW_ITEM_TYPE_VLAN,
1240         RTE_FLOW_ITEM_TYPE_IPV6,
1241         RTE_FLOW_ITEM_TYPE_SCTP,
1242         RTE_FLOW_ITEM_TYPE_VF,
1243         RTE_FLOW_ITEM_TYPE_END,
1244 };
1245
1246 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1247         RTE_FLOW_ITEM_TYPE_ETH,
1248         RTE_FLOW_ITEM_TYPE_VLAN,
1249         RTE_FLOW_ITEM_TYPE_RAW,
1250         RTE_FLOW_ITEM_TYPE_VF,
1251         RTE_FLOW_ITEM_TYPE_END,
1252 };
1253
1254 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1255         RTE_FLOW_ITEM_TYPE_ETH,
1256         RTE_FLOW_ITEM_TYPE_VLAN,
1257         RTE_FLOW_ITEM_TYPE_RAW,
1258         RTE_FLOW_ITEM_TYPE_RAW,
1259         RTE_FLOW_ITEM_TYPE_VF,
1260         RTE_FLOW_ITEM_TYPE_END,
1261 };
1262
1263 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1264         RTE_FLOW_ITEM_TYPE_ETH,
1265         RTE_FLOW_ITEM_TYPE_VLAN,
1266         RTE_FLOW_ITEM_TYPE_RAW,
1267         RTE_FLOW_ITEM_TYPE_RAW,
1268         RTE_FLOW_ITEM_TYPE_RAW,
1269         RTE_FLOW_ITEM_TYPE_VF,
1270         RTE_FLOW_ITEM_TYPE_END,
1271 };
1272
1273 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1274         RTE_FLOW_ITEM_TYPE_ETH,
1275         RTE_FLOW_ITEM_TYPE_VLAN,
1276         RTE_FLOW_ITEM_TYPE_IPV4,
1277         RTE_FLOW_ITEM_TYPE_RAW,
1278         RTE_FLOW_ITEM_TYPE_VF,
1279         RTE_FLOW_ITEM_TYPE_END,
1280 };
1281
1282 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1283         RTE_FLOW_ITEM_TYPE_ETH,
1284         RTE_FLOW_ITEM_TYPE_VLAN,
1285         RTE_FLOW_ITEM_TYPE_IPV4,
1286         RTE_FLOW_ITEM_TYPE_RAW,
1287         RTE_FLOW_ITEM_TYPE_RAW,
1288         RTE_FLOW_ITEM_TYPE_VF,
1289         RTE_FLOW_ITEM_TYPE_END,
1290 };
1291
1292 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1293         RTE_FLOW_ITEM_TYPE_ETH,
1294         RTE_FLOW_ITEM_TYPE_VLAN,
1295         RTE_FLOW_ITEM_TYPE_IPV4,
1296         RTE_FLOW_ITEM_TYPE_RAW,
1297         RTE_FLOW_ITEM_TYPE_RAW,
1298         RTE_FLOW_ITEM_TYPE_RAW,
1299         RTE_FLOW_ITEM_TYPE_VF,
1300         RTE_FLOW_ITEM_TYPE_END,
1301 };
1302
1303 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1304         RTE_FLOW_ITEM_TYPE_ETH,
1305         RTE_FLOW_ITEM_TYPE_VLAN,
1306         RTE_FLOW_ITEM_TYPE_IPV4,
1307         RTE_FLOW_ITEM_TYPE_UDP,
1308         RTE_FLOW_ITEM_TYPE_RAW,
1309         RTE_FLOW_ITEM_TYPE_VF,
1310         RTE_FLOW_ITEM_TYPE_END,
1311 };
1312
1313 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1314         RTE_FLOW_ITEM_TYPE_ETH,
1315         RTE_FLOW_ITEM_TYPE_VLAN,
1316         RTE_FLOW_ITEM_TYPE_IPV4,
1317         RTE_FLOW_ITEM_TYPE_UDP,
1318         RTE_FLOW_ITEM_TYPE_RAW,
1319         RTE_FLOW_ITEM_TYPE_RAW,
1320         RTE_FLOW_ITEM_TYPE_VF,
1321         RTE_FLOW_ITEM_TYPE_END,
1322 };
1323
1324 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1325         RTE_FLOW_ITEM_TYPE_ETH,
1326         RTE_FLOW_ITEM_TYPE_VLAN,
1327         RTE_FLOW_ITEM_TYPE_IPV4,
1328         RTE_FLOW_ITEM_TYPE_UDP,
1329         RTE_FLOW_ITEM_TYPE_RAW,
1330         RTE_FLOW_ITEM_TYPE_RAW,
1331         RTE_FLOW_ITEM_TYPE_RAW,
1332         RTE_FLOW_ITEM_TYPE_VF,
1333         RTE_FLOW_ITEM_TYPE_END,
1334 };
1335
1336 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1337         RTE_FLOW_ITEM_TYPE_ETH,
1338         RTE_FLOW_ITEM_TYPE_VLAN,
1339         RTE_FLOW_ITEM_TYPE_IPV4,
1340         RTE_FLOW_ITEM_TYPE_TCP,
1341         RTE_FLOW_ITEM_TYPE_RAW,
1342         RTE_FLOW_ITEM_TYPE_VF,
1343         RTE_FLOW_ITEM_TYPE_END,
1344 };
1345
1346 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1347         RTE_FLOW_ITEM_TYPE_ETH,
1348         RTE_FLOW_ITEM_TYPE_VLAN,
1349         RTE_FLOW_ITEM_TYPE_IPV4,
1350         RTE_FLOW_ITEM_TYPE_TCP,
1351         RTE_FLOW_ITEM_TYPE_RAW,
1352         RTE_FLOW_ITEM_TYPE_RAW,
1353         RTE_FLOW_ITEM_TYPE_VF,
1354         RTE_FLOW_ITEM_TYPE_END,
1355 };
1356
1357 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1358         RTE_FLOW_ITEM_TYPE_ETH,
1359         RTE_FLOW_ITEM_TYPE_VLAN,
1360         RTE_FLOW_ITEM_TYPE_IPV4,
1361         RTE_FLOW_ITEM_TYPE_TCP,
1362         RTE_FLOW_ITEM_TYPE_RAW,
1363         RTE_FLOW_ITEM_TYPE_RAW,
1364         RTE_FLOW_ITEM_TYPE_RAW,
1365         RTE_FLOW_ITEM_TYPE_VF,
1366         RTE_FLOW_ITEM_TYPE_END,
1367 };
1368
1369 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1370         RTE_FLOW_ITEM_TYPE_ETH,
1371         RTE_FLOW_ITEM_TYPE_VLAN,
1372         RTE_FLOW_ITEM_TYPE_IPV4,
1373         RTE_FLOW_ITEM_TYPE_SCTP,
1374         RTE_FLOW_ITEM_TYPE_RAW,
1375         RTE_FLOW_ITEM_TYPE_VF,
1376         RTE_FLOW_ITEM_TYPE_END,
1377 };
1378
1379 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1380         RTE_FLOW_ITEM_TYPE_ETH,
1381         RTE_FLOW_ITEM_TYPE_VLAN,
1382         RTE_FLOW_ITEM_TYPE_IPV4,
1383         RTE_FLOW_ITEM_TYPE_SCTP,
1384         RTE_FLOW_ITEM_TYPE_RAW,
1385         RTE_FLOW_ITEM_TYPE_RAW,
1386         RTE_FLOW_ITEM_TYPE_VF,
1387         RTE_FLOW_ITEM_TYPE_END,
1388 };
1389
1390 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1391         RTE_FLOW_ITEM_TYPE_ETH,
1392         RTE_FLOW_ITEM_TYPE_VLAN,
1393         RTE_FLOW_ITEM_TYPE_IPV4,
1394         RTE_FLOW_ITEM_TYPE_SCTP,
1395         RTE_FLOW_ITEM_TYPE_RAW,
1396         RTE_FLOW_ITEM_TYPE_RAW,
1397         RTE_FLOW_ITEM_TYPE_RAW,
1398         RTE_FLOW_ITEM_TYPE_VF,
1399         RTE_FLOW_ITEM_TYPE_END,
1400 };
1401
1402 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1403         RTE_FLOW_ITEM_TYPE_ETH,
1404         RTE_FLOW_ITEM_TYPE_VLAN,
1405         RTE_FLOW_ITEM_TYPE_IPV6,
1406         RTE_FLOW_ITEM_TYPE_RAW,
1407         RTE_FLOW_ITEM_TYPE_VF,
1408         RTE_FLOW_ITEM_TYPE_END,
1409 };
1410
1411 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1412         RTE_FLOW_ITEM_TYPE_ETH,
1413         RTE_FLOW_ITEM_TYPE_VLAN,
1414         RTE_FLOW_ITEM_TYPE_IPV6,
1415         RTE_FLOW_ITEM_TYPE_RAW,
1416         RTE_FLOW_ITEM_TYPE_RAW,
1417         RTE_FLOW_ITEM_TYPE_VF,
1418         RTE_FLOW_ITEM_TYPE_END,
1419 };
1420
1421 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1422         RTE_FLOW_ITEM_TYPE_ETH,
1423         RTE_FLOW_ITEM_TYPE_VLAN,
1424         RTE_FLOW_ITEM_TYPE_IPV6,
1425         RTE_FLOW_ITEM_TYPE_RAW,
1426         RTE_FLOW_ITEM_TYPE_RAW,
1427         RTE_FLOW_ITEM_TYPE_RAW,
1428         RTE_FLOW_ITEM_TYPE_VF,
1429         RTE_FLOW_ITEM_TYPE_END,
1430 };
1431
1432 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1433         RTE_FLOW_ITEM_TYPE_ETH,
1434         RTE_FLOW_ITEM_TYPE_VLAN,
1435         RTE_FLOW_ITEM_TYPE_IPV6,
1436         RTE_FLOW_ITEM_TYPE_UDP,
1437         RTE_FLOW_ITEM_TYPE_RAW,
1438         RTE_FLOW_ITEM_TYPE_VF,
1439         RTE_FLOW_ITEM_TYPE_END,
1440 };
1441
1442 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1443         RTE_FLOW_ITEM_TYPE_ETH,
1444         RTE_FLOW_ITEM_TYPE_VLAN,
1445         RTE_FLOW_ITEM_TYPE_IPV6,
1446         RTE_FLOW_ITEM_TYPE_UDP,
1447         RTE_FLOW_ITEM_TYPE_RAW,
1448         RTE_FLOW_ITEM_TYPE_RAW,
1449         RTE_FLOW_ITEM_TYPE_VF,
1450         RTE_FLOW_ITEM_TYPE_END,
1451 };
1452
1453 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1454         RTE_FLOW_ITEM_TYPE_ETH,
1455         RTE_FLOW_ITEM_TYPE_VLAN,
1456         RTE_FLOW_ITEM_TYPE_IPV6,
1457         RTE_FLOW_ITEM_TYPE_UDP,
1458         RTE_FLOW_ITEM_TYPE_RAW,
1459         RTE_FLOW_ITEM_TYPE_RAW,
1460         RTE_FLOW_ITEM_TYPE_RAW,
1461         RTE_FLOW_ITEM_TYPE_VF,
1462         RTE_FLOW_ITEM_TYPE_END,
1463 };
1464
1465 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1466         RTE_FLOW_ITEM_TYPE_ETH,
1467         RTE_FLOW_ITEM_TYPE_VLAN,
1468         RTE_FLOW_ITEM_TYPE_IPV6,
1469         RTE_FLOW_ITEM_TYPE_TCP,
1470         RTE_FLOW_ITEM_TYPE_RAW,
1471         RTE_FLOW_ITEM_TYPE_VF,
1472         RTE_FLOW_ITEM_TYPE_END,
1473 };
1474
1475 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1476         RTE_FLOW_ITEM_TYPE_ETH,
1477         RTE_FLOW_ITEM_TYPE_VLAN,
1478         RTE_FLOW_ITEM_TYPE_IPV6,
1479         RTE_FLOW_ITEM_TYPE_TCP,
1480         RTE_FLOW_ITEM_TYPE_RAW,
1481         RTE_FLOW_ITEM_TYPE_RAW,
1482         RTE_FLOW_ITEM_TYPE_VF,
1483         RTE_FLOW_ITEM_TYPE_END,
1484 };
1485
1486 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1487         RTE_FLOW_ITEM_TYPE_ETH,
1488         RTE_FLOW_ITEM_TYPE_VLAN,
1489         RTE_FLOW_ITEM_TYPE_IPV6,
1490         RTE_FLOW_ITEM_TYPE_TCP,
1491         RTE_FLOW_ITEM_TYPE_RAW,
1492         RTE_FLOW_ITEM_TYPE_RAW,
1493         RTE_FLOW_ITEM_TYPE_RAW,
1494         RTE_FLOW_ITEM_TYPE_VF,
1495         RTE_FLOW_ITEM_TYPE_END,
1496 };
1497
1498 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1499         RTE_FLOW_ITEM_TYPE_ETH,
1500         RTE_FLOW_ITEM_TYPE_VLAN,
1501         RTE_FLOW_ITEM_TYPE_IPV6,
1502         RTE_FLOW_ITEM_TYPE_SCTP,
1503         RTE_FLOW_ITEM_TYPE_RAW,
1504         RTE_FLOW_ITEM_TYPE_VF,
1505         RTE_FLOW_ITEM_TYPE_END,
1506 };
1507
1508 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1509         RTE_FLOW_ITEM_TYPE_ETH,
1510         RTE_FLOW_ITEM_TYPE_VLAN,
1511         RTE_FLOW_ITEM_TYPE_IPV6,
1512         RTE_FLOW_ITEM_TYPE_SCTP,
1513         RTE_FLOW_ITEM_TYPE_RAW,
1514         RTE_FLOW_ITEM_TYPE_RAW,
1515         RTE_FLOW_ITEM_TYPE_VF,
1516         RTE_FLOW_ITEM_TYPE_END,
1517 };
1518
1519 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1520         RTE_FLOW_ITEM_TYPE_ETH,
1521         RTE_FLOW_ITEM_TYPE_VLAN,
1522         RTE_FLOW_ITEM_TYPE_IPV6,
1523         RTE_FLOW_ITEM_TYPE_SCTP,
1524         RTE_FLOW_ITEM_TYPE_RAW,
1525         RTE_FLOW_ITEM_TYPE_RAW,
1526         RTE_FLOW_ITEM_TYPE_RAW,
1527         RTE_FLOW_ITEM_TYPE_VF,
1528         RTE_FLOW_ITEM_TYPE_END,
1529 };
1530
1531 /* Pattern matched tunnel filter */
1532 static enum rte_flow_item_type pattern_vxlan_1[] = {
1533         RTE_FLOW_ITEM_TYPE_ETH,
1534         RTE_FLOW_ITEM_TYPE_IPV4,
1535         RTE_FLOW_ITEM_TYPE_UDP,
1536         RTE_FLOW_ITEM_TYPE_VXLAN,
1537         RTE_FLOW_ITEM_TYPE_ETH,
1538         RTE_FLOW_ITEM_TYPE_END,
1539 };
1540
1541 static enum rte_flow_item_type pattern_vxlan_2[] = {
1542         RTE_FLOW_ITEM_TYPE_ETH,
1543         RTE_FLOW_ITEM_TYPE_IPV6,
1544         RTE_FLOW_ITEM_TYPE_UDP,
1545         RTE_FLOW_ITEM_TYPE_VXLAN,
1546         RTE_FLOW_ITEM_TYPE_ETH,
1547         RTE_FLOW_ITEM_TYPE_END,
1548 };
1549
1550 static enum rte_flow_item_type pattern_vxlan_3[] = {
1551         RTE_FLOW_ITEM_TYPE_ETH,
1552         RTE_FLOW_ITEM_TYPE_IPV4,
1553         RTE_FLOW_ITEM_TYPE_UDP,
1554         RTE_FLOW_ITEM_TYPE_VXLAN,
1555         RTE_FLOW_ITEM_TYPE_ETH,
1556         RTE_FLOW_ITEM_TYPE_VLAN,
1557         RTE_FLOW_ITEM_TYPE_END,
1558 };
1559
1560 static enum rte_flow_item_type pattern_vxlan_4[] = {
1561         RTE_FLOW_ITEM_TYPE_ETH,
1562         RTE_FLOW_ITEM_TYPE_IPV6,
1563         RTE_FLOW_ITEM_TYPE_UDP,
1564         RTE_FLOW_ITEM_TYPE_VXLAN,
1565         RTE_FLOW_ITEM_TYPE_ETH,
1566         RTE_FLOW_ITEM_TYPE_VLAN,
1567         RTE_FLOW_ITEM_TYPE_END,
1568 };
1569
1570 static enum rte_flow_item_type pattern_nvgre_1[] = {
1571         RTE_FLOW_ITEM_TYPE_ETH,
1572         RTE_FLOW_ITEM_TYPE_IPV4,
1573         RTE_FLOW_ITEM_TYPE_NVGRE,
1574         RTE_FLOW_ITEM_TYPE_ETH,
1575         RTE_FLOW_ITEM_TYPE_END,
1576 };
1577
1578 static enum rte_flow_item_type pattern_nvgre_2[] = {
1579         RTE_FLOW_ITEM_TYPE_ETH,
1580         RTE_FLOW_ITEM_TYPE_IPV6,
1581         RTE_FLOW_ITEM_TYPE_NVGRE,
1582         RTE_FLOW_ITEM_TYPE_ETH,
1583         RTE_FLOW_ITEM_TYPE_END,
1584 };
1585
1586 static enum rte_flow_item_type pattern_nvgre_3[] = {
1587         RTE_FLOW_ITEM_TYPE_ETH,
1588         RTE_FLOW_ITEM_TYPE_IPV4,
1589         RTE_FLOW_ITEM_TYPE_NVGRE,
1590         RTE_FLOW_ITEM_TYPE_ETH,
1591         RTE_FLOW_ITEM_TYPE_VLAN,
1592         RTE_FLOW_ITEM_TYPE_END,
1593 };
1594
1595 static enum rte_flow_item_type pattern_nvgre_4[] = {
1596         RTE_FLOW_ITEM_TYPE_ETH,
1597         RTE_FLOW_ITEM_TYPE_IPV6,
1598         RTE_FLOW_ITEM_TYPE_NVGRE,
1599         RTE_FLOW_ITEM_TYPE_ETH,
1600         RTE_FLOW_ITEM_TYPE_VLAN,
1601         RTE_FLOW_ITEM_TYPE_END,
1602 };
1603
1604 static enum rte_flow_item_type pattern_mpls_1[] = {
1605         RTE_FLOW_ITEM_TYPE_ETH,
1606         RTE_FLOW_ITEM_TYPE_IPV4,
1607         RTE_FLOW_ITEM_TYPE_UDP,
1608         RTE_FLOW_ITEM_TYPE_MPLS,
1609         RTE_FLOW_ITEM_TYPE_END,
1610 };
1611
1612 static enum rte_flow_item_type pattern_mpls_2[] = {
1613         RTE_FLOW_ITEM_TYPE_ETH,
1614         RTE_FLOW_ITEM_TYPE_IPV6,
1615         RTE_FLOW_ITEM_TYPE_UDP,
1616         RTE_FLOW_ITEM_TYPE_MPLS,
1617         RTE_FLOW_ITEM_TYPE_END,
1618 };
1619
1620 static enum rte_flow_item_type pattern_mpls_3[] = {
1621         RTE_FLOW_ITEM_TYPE_ETH,
1622         RTE_FLOW_ITEM_TYPE_IPV4,
1623         RTE_FLOW_ITEM_TYPE_GRE,
1624         RTE_FLOW_ITEM_TYPE_MPLS,
1625         RTE_FLOW_ITEM_TYPE_END,
1626 };
1627
1628 static enum rte_flow_item_type pattern_mpls_4[] = {
1629         RTE_FLOW_ITEM_TYPE_ETH,
1630         RTE_FLOW_ITEM_TYPE_IPV6,
1631         RTE_FLOW_ITEM_TYPE_GRE,
1632         RTE_FLOW_ITEM_TYPE_MPLS,
1633         RTE_FLOW_ITEM_TYPE_END,
1634 };
1635
1636 static enum rte_flow_item_type pattern_qinq_1[] = {
1637         RTE_FLOW_ITEM_TYPE_ETH,
1638         RTE_FLOW_ITEM_TYPE_VLAN,
1639         RTE_FLOW_ITEM_TYPE_VLAN,
1640         RTE_FLOW_ITEM_TYPE_END,
1641 };
1642
1643 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1644         /* Ethertype */
1645         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1646         /* FDIR - support default flow type without flexible payload*/
1647         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1648         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1649         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1650         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1651         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1652         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1653         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1654         { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1655         { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1656         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1657         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1658         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1659         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1660         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1661         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1662         { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1663         { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1664         /* FDIR - support default flow type with flexible payload */
1665         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1666         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1667         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1668         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1669         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1670         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1671         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1672         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1673         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1674         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1675         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1676         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1677         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1678         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1679         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1692         /* FDIR - support single vlan input set */
1693         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1720         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1721         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1722         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1723         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1724         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1725         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1726         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1727         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1728         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1729         /* FDIR - support VF item */
1730         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1731         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1732         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1733         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1734         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1735         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1736         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1737         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1738         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1739         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1740         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1741         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1742         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1743         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1744         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1745         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1746         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1747         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1748         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1749         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1750         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1751         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1752         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1753         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1754         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1755         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1756         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1757         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1758         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1759         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1760         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1761         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1762         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1763         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1764         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1765         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1766         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1767         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1768         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1769         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1770         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1771         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1772         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1773         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1774         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1775         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1776         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1777         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1778         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1779         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1780         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1781         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1782         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1783         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1784         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1785         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1786         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1787         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1788         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1789         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1790         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1791         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1792         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1793         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1794         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1795         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1796         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1797         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1798         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1799         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1800         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1801         /* VXLAN */
1802         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1803         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1804         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1805         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1806         /* NVGRE */
1807         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1808         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1809         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1810         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1811         /* MPLSoUDP & MPLSoGRE */
1812         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1813         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1814         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1815         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1816         /* GTP-C & GTP-U */
1817         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1818         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1819         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1820         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1821         /* QINQ */
1822         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1823 };
1824
1825 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1826         do {                                                            \
1827                 act = actions + index;                                  \
1828                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1829                         index++;                                        \
1830                         act = actions + index;                          \
1831                 }                                                       \
1832         } while (0)
1833
1834 /* Find the first VOID or non-VOID item pointer */
1835 static const struct rte_flow_item *
1836 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1837 {
1838         bool is_find;
1839
1840         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1841                 if (is_void)
1842                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1843                 else
1844                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1845                 if (is_find)
1846                         break;
1847                 item++;
1848         }
1849         return item;
1850 }
1851
1852 /* Skip all VOID items of the pattern */
1853 static void
1854 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1855                             const struct rte_flow_item *pattern)
1856 {
1857         uint32_t cpy_count = 0;
1858         const struct rte_flow_item *pb = pattern, *pe = pattern;
1859
1860         for (;;) {
1861                 /* Find a non-void item first */
1862                 pb = i40e_find_first_item(pb, false);
1863                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1864                         pe = pb;
1865                         break;
1866                 }
1867
1868                 /* Find a void item */
1869                 pe = i40e_find_first_item(pb + 1, true);
1870
1871                 cpy_count = pe - pb;
1872                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1873
1874                 items += cpy_count;
1875
1876                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1877                         pb = pe;
1878                         break;
1879                 }
1880
1881                 pb = pe + 1;
1882         }
1883         /* Copy the END item. */
1884         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1885 }
1886
1887 /* Check if the pattern matches a supported item type array */
1888 static bool
1889 i40e_match_pattern(enum rte_flow_item_type *item_array,
1890                    struct rte_flow_item *pattern)
1891 {
1892         struct rte_flow_item *item = pattern;
1893
1894         while ((*item_array == item->type) &&
1895                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1896                 item_array++;
1897                 item++;
1898         }
1899
1900         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1901                 item->type == RTE_FLOW_ITEM_TYPE_END);
1902 }
1903
1904 /* Find if there's parse filter function matched */
1905 static parse_filter_t
1906 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1907 {
1908         parse_filter_t parse_filter = NULL;
1909         uint8_t i = *idx;
1910
1911         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1912                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1913                                         pattern)) {
1914                         parse_filter = i40e_supported_patterns[i].parse_filter;
1915                         break;
1916                 }
1917         }
1918
1919         *idx = ++i;
1920
1921         return parse_filter;
1922 }
1923
1924 /* Parse attributes */
1925 static int
1926 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1927                      struct rte_flow_error *error)
1928 {
1929         /* Must be input direction */
1930         if (!attr->ingress) {
1931                 rte_flow_error_set(error, EINVAL,
1932                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1933                                    attr, "Only support ingress.");
1934                 return -rte_errno;
1935         }
1936
1937         /* Not supported */
1938         if (attr->egress) {
1939                 rte_flow_error_set(error, EINVAL,
1940                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1941                                    attr, "Not support egress.");
1942                 return -rte_errno;
1943         }
1944
1945         /* Not supported */
1946         if (attr->priority) {
1947                 rte_flow_error_set(error, EINVAL,
1948                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1949                                    attr, "Not support priority.");
1950                 return -rte_errno;
1951         }
1952
1953         /* Not supported */
1954         if (attr->group) {
1955                 rte_flow_error_set(error, EINVAL,
1956                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1957                                    attr, "Not support group.");
1958                 return -rte_errno;
1959         }
1960
1961         return 0;
1962 }
1963
1964 static uint16_t
1965 i40e_get_outer_vlan(struct rte_eth_dev *dev)
1966 {
1967         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1968         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
1969         uint64_t reg_r = 0;
1970         uint16_t reg_id;
1971         uint16_t tpid;
1972
1973         if (qinq)
1974                 reg_id = 2;
1975         else
1976                 reg_id = 3;
1977
1978         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
1979                                     &reg_r, NULL);
1980
1981         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
1982
1983         return tpid;
1984 }
1985
1986 /* 1. Last in item should be NULL as range is not supported.
1987  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
1988  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
1989  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
1990  *    FF:FF:FF:FF:FF:FF
1991  * 5. Ether_type mask should be 0xFFFF.
1992  */
1993 static int
1994 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
1995                                   const struct rte_flow_item *pattern,
1996                                   struct rte_flow_error *error,
1997                                   struct rte_eth_ethertype_filter *filter)
1998 {
1999         const struct rte_flow_item *item = pattern;
2000         const struct rte_flow_item_eth *eth_spec;
2001         const struct rte_flow_item_eth *eth_mask;
2002         enum rte_flow_item_type item_type;
2003         uint16_t outer_tpid;
2004
2005         outer_tpid = i40e_get_outer_vlan(dev);
2006
2007         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2008                 if (item->last) {
2009                         rte_flow_error_set(error, EINVAL,
2010                                            RTE_FLOW_ERROR_TYPE_ITEM,
2011                                            item,
2012                                            "Not support range");
2013                         return -rte_errno;
2014                 }
2015                 item_type = item->type;
2016                 switch (item_type) {
2017                 case RTE_FLOW_ITEM_TYPE_ETH:
2018                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
2019                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2020                         /* Get the MAC info. */
2021                         if (!eth_spec || !eth_mask) {
2022                                 rte_flow_error_set(error, EINVAL,
2023                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2024                                                    item,
2025                                                    "NULL ETH spec/mask");
2026                                 return -rte_errno;
2027                         }
2028
2029                         /* Mask bits of source MAC address must be full of 0.
2030                          * Mask bits of destination MAC address must be full
2031                          * of 1 or full of 0.
2032                          */
2033                         if (!is_zero_ether_addr(&eth_mask->src) ||
2034                             (!is_zero_ether_addr(&eth_mask->dst) &&
2035                              !is_broadcast_ether_addr(&eth_mask->dst))) {
2036                                 rte_flow_error_set(error, EINVAL,
2037                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2038                                                    item,
2039                                                    "Invalid MAC_addr mask");
2040                                 return -rte_errno;
2041                         }
2042
2043                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2044                                 rte_flow_error_set(error, EINVAL,
2045                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2046                                                    item,
2047                                                    "Invalid ethertype mask");
2048                                 return -rte_errno;
2049                         }
2050
2051                         /* If mask bits of destination MAC address
2052                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2053                          */
2054                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
2055                                 filter->mac_addr = eth_spec->dst;
2056                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2057                         } else {
2058                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2059                         }
2060                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2061
2062                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
2063                             filter->ether_type == ETHER_TYPE_IPv6 ||
2064                             filter->ether_type == ETHER_TYPE_LLDP ||
2065                             filter->ether_type == outer_tpid) {
2066                                 rte_flow_error_set(error, EINVAL,
2067                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2068                                                    item,
2069                                                    "Unsupported ether_type in"
2070                                                    " control packet filter.");
2071                                 return -rte_errno;
2072                         }
2073                         break;
2074                 default:
2075                         break;
2076                 }
2077         }
2078
2079         return 0;
2080 }
2081
2082 /* Ethertype action only supports QUEUE or DROP. */
2083 static int
2084 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2085                                  const struct rte_flow_action *actions,
2086                                  struct rte_flow_error *error,
2087                                  struct rte_eth_ethertype_filter *filter)
2088 {
2089         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2090         const struct rte_flow_action *act;
2091         const struct rte_flow_action_queue *act_q;
2092         uint32_t index = 0;
2093
2094         /* Check if the first non-void action is QUEUE or DROP. */
2095         NEXT_ITEM_OF_ACTION(act, actions, index);
2096         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2097             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2098                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2099                                    act, "Not supported action.");
2100                 return -rte_errno;
2101         }
2102
2103         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2104                 act_q = (const struct rte_flow_action_queue *)act->conf;
2105                 filter->queue = act_q->index;
2106                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2107                         rte_flow_error_set(error, EINVAL,
2108                                            RTE_FLOW_ERROR_TYPE_ACTION,
2109                                            act, "Invalid queue ID for"
2110                                            " ethertype_filter.");
2111                         return -rte_errno;
2112                 }
2113         } else {
2114                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2115         }
2116
2117         /* Check if the next non-void item is END */
2118         index++;
2119         NEXT_ITEM_OF_ACTION(act, actions, index);
2120         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2121                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2122                                    act, "Not supported action.");
2123                 return -rte_errno;
2124         }
2125
2126         return 0;
2127 }
2128
2129 static int
2130 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2131                                  const struct rte_flow_attr *attr,
2132                                  const struct rte_flow_item pattern[],
2133                                  const struct rte_flow_action actions[],
2134                                  struct rte_flow_error *error,
2135                                  union i40e_filter_t *filter)
2136 {
2137         struct rte_eth_ethertype_filter *ethertype_filter =
2138                 &filter->ethertype_filter;
2139         int ret;
2140
2141         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2142                                                 ethertype_filter);
2143         if (ret)
2144                 return ret;
2145
2146         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2147                                                ethertype_filter);
2148         if (ret)
2149                 return ret;
2150
2151         ret = i40e_flow_parse_attr(attr, error);
2152         if (ret)
2153                 return ret;
2154
2155         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2156
2157         return ret;
2158 }
2159
2160 static int
2161 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2162                          const struct rte_flow_item_raw *raw_spec,
2163                          struct rte_flow_error *error)
2164 {
2165         if (!raw_spec->relative) {
2166                 rte_flow_error_set(error, EINVAL,
2167                                    RTE_FLOW_ERROR_TYPE_ITEM,
2168                                    item,
2169                                    "Relative should be 1.");
2170                 return -rte_errno;
2171         }
2172
2173         if (raw_spec->offset % sizeof(uint16_t)) {
2174                 rte_flow_error_set(error, EINVAL,
2175                                    RTE_FLOW_ERROR_TYPE_ITEM,
2176                                    item,
2177                                    "Offset should be even.");
2178                 return -rte_errno;
2179         }
2180
2181         if (raw_spec->search || raw_spec->limit) {
2182                 rte_flow_error_set(error, EINVAL,
2183                                    RTE_FLOW_ERROR_TYPE_ITEM,
2184                                    item,
2185                                    "search or limit is not supported.");
2186                 return -rte_errno;
2187         }
2188
2189         if (raw_spec->offset < 0) {
2190                 rte_flow_error_set(error, EINVAL,
2191                                    RTE_FLOW_ERROR_TYPE_ITEM,
2192                                    item,
2193                                    "Offset should be non-negative.");
2194                 return -rte_errno;
2195         }
2196         return 0;
2197 }
2198
2199 static int
2200 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2201                          struct i40e_fdir_flex_pit *flex_pit,
2202                          enum i40e_flxpld_layer_idx layer_idx,
2203                          uint8_t raw_id)
2204 {
2205         uint8_t field_idx;
2206
2207         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2208         /* Check if the configuration is conflicted */
2209         if (pf->fdir.flex_pit_flag[layer_idx] &&
2210             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2211              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2212              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2213                 return -1;
2214
2215         /* Check if the configuration exists. */
2216         if (pf->fdir.flex_pit_flag[layer_idx] &&
2217             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2218              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2219              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2220                 return 1;
2221
2222         pf->fdir.flex_set[field_idx].src_offset =
2223                 flex_pit->src_offset;
2224         pf->fdir.flex_set[field_idx].size =
2225                 flex_pit->size;
2226         pf->fdir.flex_set[field_idx].dst_offset =
2227                 flex_pit->dst_offset;
2228
2229         return 0;
2230 }
2231
2232 static int
2233 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2234                           enum i40e_filter_pctype pctype,
2235                           uint8_t *mask)
2236 {
2237         struct i40e_fdir_flex_mask flex_mask;
2238         uint16_t mask_tmp;
2239         uint8_t i, nb_bitmask = 0;
2240
2241         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2242         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2243                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2244                 if (mask_tmp) {
2245                         flex_mask.word_mask |=
2246                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2247                         if (mask_tmp != UINT16_MAX) {
2248                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2249                                 flex_mask.bitmask[nb_bitmask].offset =
2250                                         i / sizeof(uint16_t);
2251                                 nb_bitmask++;
2252                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2253                                         return -1;
2254                         }
2255                 }
2256         }
2257         flex_mask.nb_bitmask = nb_bitmask;
2258
2259         if (pf->fdir.flex_mask_flag[pctype] &&
2260             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2261                     sizeof(struct i40e_fdir_flex_mask))))
2262                 return -2;
2263         else if (pf->fdir.flex_mask_flag[pctype] &&
2264                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2265                           sizeof(struct i40e_fdir_flex_mask))))
2266                 return 1;
2267
2268         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2269                sizeof(struct i40e_fdir_flex_mask));
2270         return 0;
2271 }
2272
2273 static void
2274 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2275                             enum i40e_flxpld_layer_idx layer_idx,
2276                             uint8_t raw_id)
2277 {
2278         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2279         uint32_t flx_pit;
2280         uint8_t field_idx;
2281         uint16_t min_next_off = 0;  /* in words */
2282         uint8_t i;
2283
2284         /* Set flex pit */
2285         for (i = 0; i < raw_id; i++) {
2286                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2287                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2288                                      pf->fdir.flex_set[field_idx].size,
2289                                      pf->fdir.flex_set[field_idx].dst_offset);
2290
2291                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2292                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2293                         pf->fdir.flex_set[field_idx].size;
2294         }
2295
2296         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2297                 /* set the non-used register obeying register's constrain */
2298                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2299                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2300                                      NONUSE_FLX_PIT_DEST_OFF);
2301                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2302                 min_next_off++;
2303         }
2304
2305         pf->fdir.flex_pit_flag[layer_idx] = 1;
2306 }
2307
2308 static void
2309 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2310                             enum i40e_filter_pctype pctype)
2311 {
2312         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2313         struct i40e_fdir_flex_mask *flex_mask;
2314         uint32_t flxinset, fd_mask;
2315         uint8_t i;
2316
2317         /* Set flex mask */
2318         flex_mask = &pf->fdir.flex_mask[pctype];
2319         flxinset = (flex_mask->word_mask <<
2320                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2321                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2322         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2323
2324         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2325                 fd_mask = (flex_mask->bitmask[i].mask <<
2326                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2327                         I40E_PRTQF_FD_MSK_MASK_MASK;
2328                 fd_mask |= ((flex_mask->bitmask[i].offset +
2329                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2330                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2331                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2332                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2333         }
2334
2335         pf->fdir.flex_mask_flag[pctype] = 1;
2336 }
2337
2338 static int
2339 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2340                          enum i40e_filter_pctype pctype,
2341                          uint64_t input_set)
2342 {
2343         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2344         uint64_t inset_reg = 0;
2345         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2346         int i, num;
2347
2348         /* Check if the input set is valid */
2349         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2350                                     input_set) != 0) {
2351                 PMD_DRV_LOG(ERR, "Invalid input set");
2352                 return -EINVAL;
2353         }
2354
2355         /* Check if the configuration is conflicted */
2356         if (pf->fdir.inset_flag[pctype] &&
2357             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2358                 return -1;
2359
2360         if (pf->fdir.inset_flag[pctype] &&
2361             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2362                 return 0;
2363
2364         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2365                                            I40E_INSET_MASK_NUM_REG);
2366         if (num < 0)
2367                 return -EINVAL;
2368
2369         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2370
2371         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2372                              (uint32_t)(inset_reg & UINT32_MAX));
2373         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2374                              (uint32_t)((inset_reg >>
2375                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2376
2377         for (i = 0; i < num; i++)
2378                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2379                                      mask_reg[i]);
2380
2381         /*clear unused mask registers of the pctype */
2382         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2383                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
2384         I40E_WRITE_FLUSH(hw);
2385
2386         pf->fdir.input_set[pctype] = input_set;
2387         pf->fdir.inset_flag[pctype] = 1;
2388         return 0;
2389 }
2390
2391 static uint8_t
2392 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2393                                 enum rte_flow_item_type item_type,
2394                                 struct i40e_fdir_filter_conf *filter)
2395 {
2396         struct i40e_customized_pctype *cus_pctype = NULL;
2397
2398         switch (item_type) {
2399         case RTE_FLOW_ITEM_TYPE_GTPC:
2400                 cus_pctype = i40e_find_customized_pctype(pf,
2401                                                          I40E_CUSTOMIZED_GTPC);
2402                 break;
2403         case RTE_FLOW_ITEM_TYPE_GTPU:
2404                 if (!filter->input.flow_ext.inner_ip)
2405                         cus_pctype = i40e_find_customized_pctype(pf,
2406                                                          I40E_CUSTOMIZED_GTPU);
2407                 else if (filter->input.flow_ext.iip_type ==
2408                          I40E_FDIR_IPTYPE_IPV4)
2409                         cus_pctype = i40e_find_customized_pctype(pf,
2410                                                  I40E_CUSTOMIZED_GTPU_IPV4);
2411                 else if (filter->input.flow_ext.iip_type ==
2412                          I40E_FDIR_IPTYPE_IPV6)
2413                         cus_pctype = i40e_find_customized_pctype(pf,
2414                                                  I40E_CUSTOMIZED_GTPU_IPV6);
2415                 break;
2416         default:
2417                 PMD_DRV_LOG(ERR, "Unsupported item type");
2418                 break;
2419         }
2420
2421         if (cus_pctype)
2422                 return cus_pctype->pctype;
2423
2424         return I40E_FILTER_PCTYPE_INVALID;
2425 }
2426
2427 /* 1. Last in item should be NULL as range is not supported.
2428  * 2. Supported patterns: refer to array i40e_supported_patterns.
2429  * 3. Default supported flow type and input set: refer to array
2430  *    valid_fdir_inset_table in i40e_ethdev.c.
2431  * 4. Mask of fields which need to be matched should be
2432  *    filled with 1.
2433  * 5. Mask of fields which needn't to be matched should be
2434  *    filled with 0.
2435  * 6. GTP profile supports GTPv1 only.
2436  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2437  */
2438 static int
2439 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2440                              const struct rte_flow_item *pattern,
2441                              struct rte_flow_error *error,
2442                              struct i40e_fdir_filter_conf *filter)
2443 {
2444         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2445         const struct rte_flow_item *item = pattern;
2446         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2447         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2448         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2449         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2450         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2451         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2452         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2453         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2454         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2455         const struct rte_flow_item_vf *vf_spec;
2456
2457         uint8_t pctype = 0;
2458         uint64_t input_set = I40E_INSET_NONE;
2459         uint16_t frag_off;
2460         enum rte_flow_item_type item_type;
2461         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2462         enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2463         uint32_t i, j;
2464         uint8_t  ipv6_addr_mask[16] = {
2465                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2466                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2467         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2468         uint8_t raw_id = 0;
2469         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2470         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2471         struct i40e_fdir_flex_pit flex_pit;
2472         uint8_t next_dst_off = 0;
2473         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2474         uint16_t flex_size;
2475         bool cfg_flex_pit = true;
2476         bool cfg_flex_msk = true;
2477         uint16_t outer_tpid;
2478         uint16_t ether_type;
2479         uint32_t vtc_flow_cpu;
2480         bool outer_ip = true;
2481         int ret;
2482
2483         memset(off_arr, 0, sizeof(off_arr));
2484         memset(len_arr, 0, sizeof(len_arr));
2485         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2486         outer_tpid = i40e_get_outer_vlan(dev);
2487         filter->input.flow_ext.customized_pctype = false;
2488         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2489                 if (item->last) {
2490                         rte_flow_error_set(error, EINVAL,
2491                                            RTE_FLOW_ERROR_TYPE_ITEM,
2492                                            item,
2493                                            "Not support range");
2494                         return -rte_errno;
2495                 }
2496                 item_type = item->type;
2497                 switch (item_type) {
2498                 case RTE_FLOW_ITEM_TYPE_ETH:
2499                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
2500                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2501
2502                         if (eth_spec && eth_mask) {
2503                                 if (!is_zero_ether_addr(&eth_mask->src) ||
2504                                     !is_zero_ether_addr(&eth_mask->dst)) {
2505                                         rte_flow_error_set(error, EINVAL,
2506                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2507                                                       item,
2508                                                       "Invalid MAC_addr mask.");
2509                                         return -rte_errno;
2510                                 }
2511
2512                                 if ((eth_mask->type & UINT16_MAX) ==
2513                                     UINT16_MAX) {
2514                                         input_set |= I40E_INSET_LAST_ETHER_TYPE;
2515                                         filter->input.flow.l2_flow.ether_type =
2516                                                 eth_spec->type;
2517                                 }
2518
2519                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2520                                 if (ether_type == ETHER_TYPE_IPv4 ||
2521                                     ether_type == ETHER_TYPE_IPv6 ||
2522                                     ether_type == ETHER_TYPE_ARP ||
2523                                     ether_type == outer_tpid) {
2524                                         rte_flow_error_set(error, EINVAL,
2525                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2526                                                      item,
2527                                                      "Unsupported ether_type.");
2528                                         return -rte_errno;
2529                                 }
2530                         }
2531
2532                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2533                         layer_idx = I40E_FLXPLD_L2_IDX;
2534
2535                         break;
2536                 case RTE_FLOW_ITEM_TYPE_VLAN:
2537                         vlan_spec =
2538                                 (const struct rte_flow_item_vlan *)item->spec;
2539                         vlan_mask =
2540                                 (const struct rte_flow_item_vlan *)item->mask;
2541                         if (vlan_spec && vlan_mask) {
2542                                 if (vlan_mask->tci ==
2543                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2544                                         input_set |= I40E_INSET_VLAN_INNER;
2545                                         filter->input.flow_ext.vlan_tci =
2546                                                 vlan_spec->tci;
2547                                 }
2548                         }
2549
2550                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2551                         layer_idx = I40E_FLXPLD_L2_IDX;
2552
2553                         break;
2554                 case RTE_FLOW_ITEM_TYPE_IPV4:
2555                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2556                         ipv4_spec =
2557                                 (const struct rte_flow_item_ipv4 *)item->spec;
2558                         ipv4_mask =
2559                                 (const struct rte_flow_item_ipv4 *)item->mask;
2560                         pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2561                         layer_idx = I40E_FLXPLD_L3_IDX;
2562
2563                         if (ipv4_spec && ipv4_mask && outer_ip) {
2564                                 /* Check IPv4 mask and update input set */
2565                                 if (ipv4_mask->hdr.version_ihl ||
2566                                     ipv4_mask->hdr.total_length ||
2567                                     ipv4_mask->hdr.packet_id ||
2568                                     ipv4_mask->hdr.fragment_offset ||
2569                                     ipv4_mask->hdr.hdr_checksum) {
2570                                         rte_flow_error_set(error, EINVAL,
2571                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2572                                                    item,
2573                                                    "Invalid IPv4 mask.");
2574                                         return -rte_errno;
2575                                 }
2576
2577                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2578                                         input_set |= I40E_INSET_IPV4_SRC;
2579                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2580                                         input_set |= I40E_INSET_IPV4_DST;
2581                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2582                                         input_set |= I40E_INSET_IPV4_TOS;
2583                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2584                                         input_set |= I40E_INSET_IPV4_TTL;
2585                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2586                                         input_set |= I40E_INSET_IPV4_PROTO;
2587
2588                                 /* Check if it is fragment. */
2589                                 frag_off = ipv4_spec->hdr.fragment_offset;
2590                                 frag_off = rte_be_to_cpu_16(frag_off);
2591                                 if (frag_off & IPV4_HDR_OFFSET_MASK ||
2592                                     frag_off & IPV4_HDR_MF_FLAG)
2593                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2594
2595                                 /* Get the filter info */
2596                                 filter->input.flow.ip4_flow.proto =
2597                                         ipv4_spec->hdr.next_proto_id;
2598                                 filter->input.flow.ip4_flow.tos =
2599                                         ipv4_spec->hdr.type_of_service;
2600                                 filter->input.flow.ip4_flow.ttl =
2601                                         ipv4_spec->hdr.time_to_live;
2602                                 filter->input.flow.ip4_flow.src_ip =
2603                                         ipv4_spec->hdr.src_addr;
2604                                 filter->input.flow.ip4_flow.dst_ip =
2605                                         ipv4_spec->hdr.dst_addr;
2606                         } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2607                                 filter->input.flow_ext.inner_ip = true;
2608                                 filter->input.flow_ext.iip_type =
2609                                         I40E_FDIR_IPTYPE_IPV4;
2610                         } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2611                                 rte_flow_error_set(error, EINVAL,
2612                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2613                                                    item,
2614                                                    "Invalid inner IPv4 mask.");
2615                                 return -rte_errno;
2616                         }
2617
2618                         if (outer_ip)
2619                                 outer_ip = false;
2620
2621                         break;
2622                 case RTE_FLOW_ITEM_TYPE_IPV6:
2623                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2624                         ipv6_spec =
2625                                 (const struct rte_flow_item_ipv6 *)item->spec;
2626                         ipv6_mask =
2627                                 (const struct rte_flow_item_ipv6 *)item->mask;
2628                         pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2629                         layer_idx = I40E_FLXPLD_L3_IDX;
2630
2631                         if (ipv6_spec && ipv6_mask && outer_ip) {
2632                                 /* Check IPv6 mask and update input set */
2633                                 if (ipv6_mask->hdr.payload_len) {
2634                                         rte_flow_error_set(error, EINVAL,
2635                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2636                                                    item,
2637                                                    "Invalid IPv6 mask");
2638                                         return -rte_errno;
2639                                 }
2640
2641                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2642                                             ipv6_addr_mask,
2643                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2644                                         input_set |= I40E_INSET_IPV6_SRC;
2645                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2646                                             ipv6_addr_mask,
2647                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2648                                         input_set |= I40E_INSET_IPV6_DST;
2649
2650                                 if ((ipv6_mask->hdr.vtc_flow &
2651                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2652                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2653                                         input_set |= I40E_INSET_IPV6_TC;
2654                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2655                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2656                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2657                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2658
2659                                 /* Get filter info */
2660                                 vtc_flow_cpu =
2661                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2662                                 filter->input.flow.ipv6_flow.tc =
2663                                         (uint8_t)(vtc_flow_cpu >>
2664                                                   I40E_FDIR_IPv6_TC_OFFSET);
2665                                 filter->input.flow.ipv6_flow.proto =
2666                                         ipv6_spec->hdr.proto;
2667                                 filter->input.flow.ipv6_flow.hop_limits =
2668                                         ipv6_spec->hdr.hop_limits;
2669
2670                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2671                                            ipv6_spec->hdr.src_addr, 16);
2672                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2673                                            ipv6_spec->hdr.dst_addr, 16);
2674
2675                                 /* Check if it is fragment. */
2676                                 if (ipv6_spec->hdr.proto ==
2677                                     I40E_IPV6_FRAG_HEADER)
2678                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2679                         } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2680                                 filter->input.flow_ext.inner_ip = true;
2681                                 filter->input.flow_ext.iip_type =
2682                                         I40E_FDIR_IPTYPE_IPV6;
2683                         } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2684                                 rte_flow_error_set(error, EINVAL,
2685                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2686                                                    item,
2687                                                    "Invalid inner IPv6 mask");
2688                                 return -rte_errno;
2689                         }
2690
2691                         if (outer_ip)
2692                                 outer_ip = false;
2693                         break;
2694                 case RTE_FLOW_ITEM_TYPE_TCP:
2695                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
2696                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
2697
2698                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2699                                 pctype =
2700                                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2701                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2702                                 pctype =
2703                                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2704                         if (tcp_spec && tcp_mask) {
2705                                 /* Check TCP mask and update input set */
2706                                 if (tcp_mask->hdr.sent_seq ||
2707                                     tcp_mask->hdr.recv_ack ||
2708                                     tcp_mask->hdr.data_off ||
2709                                     tcp_mask->hdr.tcp_flags ||
2710                                     tcp_mask->hdr.rx_win ||
2711                                     tcp_mask->hdr.cksum ||
2712                                     tcp_mask->hdr.tcp_urp) {
2713                                         rte_flow_error_set(error, EINVAL,
2714                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2715                                                    item,
2716                                                    "Invalid TCP mask");
2717                                         return -rte_errno;
2718                                 }
2719
2720                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2721                                         input_set |= I40E_INSET_SRC_PORT;
2722                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2723                                         input_set |= I40E_INSET_DST_PORT;
2724
2725                                 /* Get filter info */
2726                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2727                                         filter->input.flow.tcp4_flow.src_port =
2728                                                 tcp_spec->hdr.src_port;
2729                                         filter->input.flow.tcp4_flow.dst_port =
2730                                                 tcp_spec->hdr.dst_port;
2731                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2732                                         filter->input.flow.tcp6_flow.src_port =
2733                                                 tcp_spec->hdr.src_port;
2734                                         filter->input.flow.tcp6_flow.dst_port =
2735                                                 tcp_spec->hdr.dst_port;
2736                                 }
2737                         }
2738
2739                         layer_idx = I40E_FLXPLD_L4_IDX;
2740
2741                         break;
2742                 case RTE_FLOW_ITEM_TYPE_UDP:
2743                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
2744                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
2745
2746                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2747                                 pctype =
2748                                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2749                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2750                                 pctype =
2751                                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2752
2753                         if (udp_spec && udp_mask) {
2754                                 /* Check UDP mask and update input set*/
2755                                 if (udp_mask->hdr.dgram_len ||
2756                                     udp_mask->hdr.dgram_cksum) {
2757                                         rte_flow_error_set(error, EINVAL,
2758                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2759                                                    item,
2760                                                    "Invalid UDP mask");
2761                                         return -rte_errno;
2762                                 }
2763
2764                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2765                                         input_set |= I40E_INSET_SRC_PORT;
2766                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2767                                         input_set |= I40E_INSET_DST_PORT;
2768
2769                                 /* Get filter info */
2770                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2771                                         filter->input.flow.udp4_flow.src_port =
2772                                                 udp_spec->hdr.src_port;
2773                                         filter->input.flow.udp4_flow.dst_port =
2774                                                 udp_spec->hdr.dst_port;
2775                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2776                                         filter->input.flow.udp6_flow.src_port =
2777                                                 udp_spec->hdr.src_port;
2778                                         filter->input.flow.udp6_flow.dst_port =
2779                                                 udp_spec->hdr.dst_port;
2780                                 }
2781                         }
2782
2783                         layer_idx = I40E_FLXPLD_L4_IDX;
2784
2785                         break;
2786                 case RTE_FLOW_ITEM_TYPE_GTPC:
2787                 case RTE_FLOW_ITEM_TYPE_GTPU:
2788                         if (!pf->gtp_support) {
2789                                 rte_flow_error_set(error, EINVAL,
2790                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2791                                                    item,
2792                                                    "Unsupported protocol");
2793                                 return -rte_errno;
2794                         }
2795
2796                         gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
2797                         gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
2798
2799                         if (gtp_spec && gtp_mask) {
2800                                 if (gtp_mask->v_pt_rsv_flags ||
2801                                     gtp_mask->msg_type ||
2802                                     gtp_mask->msg_len ||
2803                                     gtp_mask->teid != UINT32_MAX) {
2804                                         rte_flow_error_set(error, EINVAL,
2805                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2806                                                    item,
2807                                                    "Invalid GTP mask");
2808                                         return -rte_errno;
2809                                 }
2810
2811                                 filter->input.flow.gtp_flow.teid =
2812                                         gtp_spec->teid;
2813                                 filter->input.flow_ext.customized_pctype = true;
2814                                 cus_proto = item_type;
2815                         }
2816                         break;
2817                 case RTE_FLOW_ITEM_TYPE_SCTP:
2818                         sctp_spec =
2819                                 (const struct rte_flow_item_sctp *)item->spec;
2820                         sctp_mask =
2821                                 (const struct rte_flow_item_sctp *)item->mask;
2822
2823                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2824                                 pctype =
2825                                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
2826                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2827                                 pctype =
2828                                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
2829
2830                         if (sctp_spec && sctp_mask) {
2831                                 /* Check SCTP mask and update input set */
2832                                 if (sctp_mask->hdr.cksum) {
2833                                         rte_flow_error_set(error, EINVAL,
2834                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2835                                                    item,
2836                                                    "Invalid UDP mask");
2837                                         return -rte_errno;
2838                                 }
2839
2840                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
2841                                         input_set |= I40E_INSET_SRC_PORT;
2842                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
2843                                         input_set |= I40E_INSET_DST_PORT;
2844                                 if (sctp_mask->hdr.tag == UINT32_MAX)
2845                                         input_set |= I40E_INSET_SCTP_VT;
2846
2847                                 /* Get filter info */
2848                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2849                                         filter->input.flow.sctp4_flow.src_port =
2850                                                 sctp_spec->hdr.src_port;
2851                                         filter->input.flow.sctp4_flow.dst_port =
2852                                                 sctp_spec->hdr.dst_port;
2853                                         filter->input.flow.sctp4_flow.verify_tag
2854                                                 = sctp_spec->hdr.tag;
2855                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2856                                         filter->input.flow.sctp6_flow.src_port =
2857                                                 sctp_spec->hdr.src_port;
2858                                         filter->input.flow.sctp6_flow.dst_port =
2859                                                 sctp_spec->hdr.dst_port;
2860                                         filter->input.flow.sctp6_flow.verify_tag
2861                                                 = sctp_spec->hdr.tag;
2862                                 }
2863                         }
2864
2865                         layer_idx = I40E_FLXPLD_L4_IDX;
2866
2867                         break;
2868                 case RTE_FLOW_ITEM_TYPE_RAW:
2869                         raw_spec = (const struct rte_flow_item_raw *)item->spec;
2870                         raw_mask = (const struct rte_flow_item_raw *)item->mask;
2871
2872                         if (!raw_spec || !raw_mask) {
2873                                 rte_flow_error_set(error, EINVAL,
2874                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2875                                                    item,
2876                                                    "NULL RAW spec/mask");
2877                                 return -rte_errno;
2878                         }
2879
2880                         if (pf->support_multi_driver) {
2881                                 rte_flow_error_set(error, ENOTSUP,
2882                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2883                                                    item,
2884                                                    "Unsupported flexible payload.");
2885                                 return -rte_errno;
2886                         }
2887
2888                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
2889                         if (ret < 0)
2890                                 return ret;
2891
2892                         off_arr[raw_id] = raw_spec->offset;
2893                         len_arr[raw_id] = raw_spec->length;
2894
2895                         flex_size = 0;
2896                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
2897                         flex_pit.size =
2898                                 raw_spec->length / sizeof(uint16_t);
2899                         flex_pit.dst_offset =
2900                                 next_dst_off / sizeof(uint16_t);
2901
2902                         for (i = 0; i <= raw_id; i++) {
2903                                 if (i == raw_id)
2904                                         flex_pit.src_offset +=
2905                                                 raw_spec->offset /
2906                                                 sizeof(uint16_t);
2907                                 else
2908                                         flex_pit.src_offset +=
2909                                                 (off_arr[i] + len_arr[i]) /
2910                                                 sizeof(uint16_t);
2911                                 flex_size += len_arr[i];
2912                         }
2913                         if (((flex_pit.src_offset + flex_pit.size) >=
2914                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
2915                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
2916                                 rte_flow_error_set(error, EINVAL,
2917                                            RTE_FLOW_ERROR_TYPE_ITEM,
2918                                            item,
2919                                            "Exceeds maxmial payload limit.");
2920                                 return -rte_errno;
2921                         }
2922
2923                         /* Store flex pit to SW */
2924                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
2925                                                        layer_idx, raw_id);
2926                         if (ret < 0) {
2927                                 rte_flow_error_set(error, EINVAL,
2928                                    RTE_FLOW_ERROR_TYPE_ITEM,
2929                                    item,
2930                                    "Conflict with the first flexible rule.");
2931                                 return -rte_errno;
2932                         } else if (ret > 0)
2933                                 cfg_flex_pit = false;
2934
2935                         for (i = 0; i < raw_spec->length; i++) {
2936                                 j = i + next_dst_off;
2937                                 filter->input.flow_ext.flexbytes[j] =
2938                                         raw_spec->pattern[i];
2939                                 flex_mask[j] = raw_mask->pattern[i];
2940                         }
2941
2942                         next_dst_off += raw_spec->length;
2943                         raw_id++;
2944                         break;
2945                 case RTE_FLOW_ITEM_TYPE_VF:
2946                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
2947                         filter->input.flow_ext.is_vf = 1;
2948                         filter->input.flow_ext.dst_id = vf_spec->id;
2949                         if (filter->input.flow_ext.is_vf &&
2950                             filter->input.flow_ext.dst_id >= pf->vf_num) {
2951                                 rte_flow_error_set(error, EINVAL,
2952                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2953                                                    item,
2954                                                    "Invalid VF ID for FDIR.");
2955                                 return -rte_errno;
2956                         }
2957                         break;
2958                 default:
2959                         break;
2960                 }
2961         }
2962
2963         /* Get customized pctype value */
2964         if (filter->input.flow_ext.customized_pctype) {
2965                 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
2966                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
2967                         rte_flow_error_set(error, EINVAL,
2968                                            RTE_FLOW_ERROR_TYPE_ITEM,
2969                                            item,
2970                                            "Unsupported pctype");
2971                         return -rte_errno;
2972                 }
2973         }
2974
2975         /* If customized pctype is not used, set fdir configuration.*/
2976         if (!filter->input.flow_ext.customized_pctype) {
2977                 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
2978                 if (ret == -1) {
2979                         rte_flow_error_set(error, EINVAL,
2980                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
2981                                            "Conflict with the first rule's input set.");
2982                         return -rte_errno;
2983                 } else if (ret == -EINVAL) {
2984                         rte_flow_error_set(error, EINVAL,
2985                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
2986                                            "Invalid pattern mask.");
2987                         return -rte_errno;
2988                 }
2989
2990                 /* Store flex mask to SW */
2991                 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
2992                 if (ret == -1) {
2993                         rte_flow_error_set(error, EINVAL,
2994                                            RTE_FLOW_ERROR_TYPE_ITEM,
2995                                            item,
2996                                            "Exceed maximal number of bitmasks");
2997                         return -rte_errno;
2998                 } else if (ret == -2) {
2999                         rte_flow_error_set(error, EINVAL,
3000                                            RTE_FLOW_ERROR_TYPE_ITEM,
3001                                            item,
3002                                            "Conflict with the first flexible rule");
3003                         return -rte_errno;
3004                 } else if (ret > 0)
3005                         cfg_flex_msk = false;
3006
3007                 if (cfg_flex_pit)
3008                         i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
3009
3010                 if (cfg_flex_msk)
3011                         i40e_flow_set_fdir_flex_msk(pf, pctype);
3012         }
3013
3014         filter->input.pctype = pctype;
3015
3016         return 0;
3017 }
3018
3019 /* Parse to get the action info of a FDIR filter.
3020  * FDIR action supports QUEUE or (QUEUE + MARK).
3021  */
3022 static int
3023 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3024                             const struct rte_flow_action *actions,
3025                             struct rte_flow_error *error,
3026                             struct i40e_fdir_filter_conf *filter)
3027 {
3028         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3029         const struct rte_flow_action *act;
3030         const struct rte_flow_action_queue *act_q;
3031         const struct rte_flow_action_mark *mark_spec;
3032         uint32_t index = 0;
3033
3034         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3035         NEXT_ITEM_OF_ACTION(act, actions, index);
3036         switch (act->type) {
3037         case RTE_FLOW_ACTION_TYPE_QUEUE:
3038                 act_q = (const struct rte_flow_action_queue *)act->conf;
3039                 filter->action.rx_queue = act_q->index;
3040                 if ((!filter->input.flow_ext.is_vf &&
3041                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3042                     (filter->input.flow_ext.is_vf &&
3043                      filter->action.rx_queue >= pf->vf_nb_qps)) {
3044                         rte_flow_error_set(error, EINVAL,
3045                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3046                                            "Invalid queue ID for FDIR.");
3047                         return -rte_errno;
3048                 }
3049                 filter->action.behavior = I40E_FDIR_ACCEPT;
3050                 break;
3051         case RTE_FLOW_ACTION_TYPE_DROP:
3052                 filter->action.behavior = I40E_FDIR_REJECT;
3053                 break;
3054         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3055                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3056                 break;
3057         default:
3058                 rte_flow_error_set(error, EINVAL,
3059                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
3060                                    "Invalid action.");
3061                 return -rte_errno;
3062         }
3063
3064         /* Check if the next non-void item is MARK or FLAG or END. */
3065         index++;
3066         NEXT_ITEM_OF_ACTION(act, actions, index);
3067         switch (act->type) {
3068         case RTE_FLOW_ACTION_TYPE_MARK:
3069                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
3070                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3071                 filter->soft_id = mark_spec->id;
3072                 break;
3073         case RTE_FLOW_ACTION_TYPE_FLAG:
3074                 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3075                 break;
3076         case RTE_FLOW_ACTION_TYPE_END:
3077                 return 0;
3078         default:
3079                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3080                                    act, "Invalid action.");
3081                 return -rte_errno;
3082         }
3083
3084         /* Check if the next non-void item is END */
3085         index++;
3086         NEXT_ITEM_OF_ACTION(act, actions, index);
3087         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3088                 rte_flow_error_set(error, EINVAL,
3089                                    RTE_FLOW_ERROR_TYPE_ACTION,
3090                                    act, "Invalid action.");
3091                 return -rte_errno;
3092         }
3093
3094         return 0;
3095 }
3096
3097 static int
3098 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3099                             const struct rte_flow_attr *attr,
3100                             const struct rte_flow_item pattern[],
3101                             const struct rte_flow_action actions[],
3102                             struct rte_flow_error *error,
3103                             union i40e_filter_t *filter)
3104 {
3105         struct i40e_fdir_filter_conf *fdir_filter =
3106                 &filter->fdir_filter;
3107         int ret;
3108
3109         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
3110         if (ret)
3111                 return ret;
3112
3113         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3114         if (ret)
3115                 return ret;
3116
3117         ret = i40e_flow_parse_attr(attr, error);
3118         if (ret)
3119                 return ret;
3120
3121         cons_filter_type = RTE_ETH_FILTER_FDIR;
3122
3123         if (dev->data->dev_conf.fdir_conf.mode !=
3124             RTE_FDIR_MODE_PERFECT) {
3125                 rte_flow_error_set(error, ENOTSUP,
3126                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3127                                    NULL,
3128                                    "Check the mode in fdir_conf.");
3129                 return -rte_errno;
3130         }
3131
3132         return 0;
3133 }
3134
3135 /* Parse to get the action info of a tunnel filter
3136  * Tunnel action only supports PF, VF and QUEUE.
3137  */
3138 static int
3139 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3140                               const struct rte_flow_action *actions,
3141                               struct rte_flow_error *error,
3142                               struct i40e_tunnel_filter_conf *filter)
3143 {
3144         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3145         const struct rte_flow_action *act;
3146         const struct rte_flow_action_queue *act_q;
3147         const struct rte_flow_action_vf *act_vf;
3148         uint32_t index = 0;
3149
3150         /* Check if the first non-void action is PF or VF. */
3151         NEXT_ITEM_OF_ACTION(act, actions, index);
3152         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3153             act->type != RTE_FLOW_ACTION_TYPE_VF) {
3154                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3155                                    act, "Not supported action.");
3156                 return -rte_errno;
3157         }
3158
3159         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3160                 act_vf = (const struct rte_flow_action_vf *)act->conf;
3161                 filter->vf_id = act_vf->id;
3162                 filter->is_to_vf = 1;
3163                 if (filter->vf_id >= pf->vf_num) {
3164                         rte_flow_error_set(error, EINVAL,
3165                                    RTE_FLOW_ERROR_TYPE_ACTION,
3166                                    act, "Invalid VF ID for tunnel filter");
3167                         return -rte_errno;
3168                 }
3169         }
3170
3171         /* Check if the next non-void item is QUEUE */
3172         index++;
3173         NEXT_ITEM_OF_ACTION(act, actions, index);
3174         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3175                 act_q = (const struct rte_flow_action_queue *)act->conf;
3176                 filter->queue_id = act_q->index;
3177                 if ((!filter->is_to_vf) &&
3178                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3179                         rte_flow_error_set(error, EINVAL,
3180                                    RTE_FLOW_ERROR_TYPE_ACTION,
3181                                    act, "Invalid queue ID for tunnel filter");
3182                         return -rte_errno;
3183                 } else if (filter->is_to_vf &&
3184                            (filter->queue_id >= pf->vf_nb_qps)) {
3185                         rte_flow_error_set(error, EINVAL,
3186                                    RTE_FLOW_ERROR_TYPE_ACTION,
3187                                    act, "Invalid queue ID for tunnel filter");
3188                         return -rte_errno;
3189                 }
3190         }
3191
3192         /* Check if the next non-void item is END */
3193         index++;
3194         NEXT_ITEM_OF_ACTION(act, actions, index);
3195         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3196                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3197                                    act, "Not supported action.");
3198                 return -rte_errno;
3199         }
3200
3201         return 0;
3202 }
3203
3204 static uint16_t i40e_supported_tunnel_filter_types[] = {
3205         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3206         ETH_TUNNEL_FILTER_IVLAN,
3207         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3208         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3209         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3210         ETH_TUNNEL_FILTER_IMAC,
3211         ETH_TUNNEL_FILTER_IMAC,
3212 };
3213
3214 static int
3215 i40e_check_tunnel_filter_type(uint8_t filter_type)
3216 {
3217         uint8_t i;
3218
3219         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3220                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3221                         return 0;
3222         }
3223
3224         return -1;
3225 }
3226
3227 /* 1. Last in item should be NULL as range is not supported.
3228  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3229  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3230  * 3. Mask of fields which need to be matched should be
3231  *    filled with 1.
3232  * 4. Mask of fields which needn't to be matched should be
3233  *    filled with 0.
3234  */
3235 static int
3236 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3237                               const struct rte_flow_item *pattern,
3238                               struct rte_flow_error *error,
3239                               struct i40e_tunnel_filter_conf *filter)
3240 {
3241         const struct rte_flow_item *item = pattern;
3242         const struct rte_flow_item_eth *eth_spec;
3243         const struct rte_flow_item_eth *eth_mask;
3244         const struct rte_flow_item_vxlan *vxlan_spec;
3245         const struct rte_flow_item_vxlan *vxlan_mask;
3246         const struct rte_flow_item_vlan *vlan_spec;
3247         const struct rte_flow_item_vlan *vlan_mask;
3248         uint8_t filter_type = 0;
3249         bool is_vni_masked = 0;
3250         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3251         enum rte_flow_item_type item_type;
3252         bool vxlan_flag = 0;
3253         uint32_t tenant_id_be = 0;
3254         int ret;
3255
3256         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3257                 if (item->last) {
3258                         rte_flow_error_set(error, EINVAL,
3259                                            RTE_FLOW_ERROR_TYPE_ITEM,
3260                                            item,
3261                                            "Not support range");
3262                         return -rte_errno;
3263                 }
3264                 item_type = item->type;
3265                 switch (item_type) {
3266                 case RTE_FLOW_ITEM_TYPE_ETH:
3267                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
3268                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
3269
3270                         /* Check if ETH item is used for place holder.
3271                          * If yes, both spec and mask should be NULL.
3272                          * If no, both spec and mask shouldn't be NULL.
3273                          */
3274                         if ((!eth_spec && eth_mask) ||
3275                             (eth_spec && !eth_mask)) {
3276                                 rte_flow_error_set(error, EINVAL,
3277                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3278                                                    item,
3279                                                    "Invalid ether spec/mask");
3280                                 return -rte_errno;
3281                         }
3282
3283                         if (eth_spec && eth_mask) {
3284                                 /* DST address of inner MAC shouldn't be masked.
3285                                  * SRC address of Inner MAC should be masked.
3286                                  */
3287                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
3288                                     !is_zero_ether_addr(&eth_mask->src) ||
3289                                     eth_mask->type) {
3290                                         rte_flow_error_set(error, EINVAL,
3291                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3292                                                    item,
3293                                                    "Invalid ether spec/mask");
3294                                         return -rte_errno;
3295                                 }
3296
3297                                 if (!vxlan_flag) {
3298                                         rte_memcpy(&filter->outer_mac,
3299                                                    &eth_spec->dst,
3300                                                    ETHER_ADDR_LEN);
3301                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3302                                 } else {
3303                                         rte_memcpy(&filter->inner_mac,
3304                                                    &eth_spec->dst,
3305                                                    ETHER_ADDR_LEN);
3306                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3307                                 }
3308                         }
3309                         break;
3310                 case RTE_FLOW_ITEM_TYPE_VLAN:
3311                         vlan_spec =
3312                                 (const struct rte_flow_item_vlan *)item->spec;
3313                         vlan_mask =
3314                                 (const struct rte_flow_item_vlan *)item->mask;
3315                         if (!(vlan_spec && vlan_mask)) {
3316                                 rte_flow_error_set(error, EINVAL,
3317                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3318                                                    item,
3319                                                    "Invalid vlan item");
3320                                 return -rte_errno;
3321                         }
3322
3323                         if (vlan_spec && vlan_mask) {
3324                                 if (vlan_mask->tci ==
3325                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3326                                         filter->inner_vlan =
3327                                               rte_be_to_cpu_16(vlan_spec->tci) &
3328                                               I40E_TCI_MASK;
3329                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3330                         }
3331                         break;
3332                 case RTE_FLOW_ITEM_TYPE_IPV4:
3333                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3334                         /* IPv4 is used to describe protocol,
3335                          * spec and mask should be NULL.
3336                          */
3337                         if (item->spec || item->mask) {
3338                                 rte_flow_error_set(error, EINVAL,
3339                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3340                                                    item,
3341                                                    "Invalid IPv4 item");
3342                                 return -rte_errno;
3343                         }
3344                         break;
3345                 case RTE_FLOW_ITEM_TYPE_IPV6:
3346                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3347                         /* IPv6 is used to describe protocol,
3348                          * spec and mask should be NULL.
3349                          */
3350                         if (item->spec || item->mask) {
3351                                 rte_flow_error_set(error, EINVAL,
3352                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3353                                                    item,
3354                                                    "Invalid IPv6 item");
3355                                 return -rte_errno;
3356                         }
3357                         break;
3358                 case RTE_FLOW_ITEM_TYPE_UDP:
3359                         /* UDP is used to describe protocol,
3360                          * spec and mask should be NULL.
3361                          */
3362                         if (item->spec || item->mask) {
3363                                 rte_flow_error_set(error, EINVAL,
3364                                            RTE_FLOW_ERROR_TYPE_ITEM,
3365                                            item,
3366                                            "Invalid UDP item");
3367                                 return -rte_errno;
3368                         }
3369                         break;
3370                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3371                         vxlan_spec =
3372                                 (const struct rte_flow_item_vxlan *)item->spec;
3373                         vxlan_mask =
3374                                 (const struct rte_flow_item_vxlan *)item->mask;
3375                         /* Check if VXLAN item is used to describe protocol.
3376                          * If yes, both spec and mask should be NULL.
3377                          * If no, both spec and mask shouldn't be NULL.
3378                          */
3379                         if ((!vxlan_spec && vxlan_mask) ||
3380                             (vxlan_spec && !vxlan_mask)) {
3381                                 rte_flow_error_set(error, EINVAL,
3382                                            RTE_FLOW_ERROR_TYPE_ITEM,
3383                                            item,
3384                                            "Invalid VXLAN item");
3385                                 return -rte_errno;
3386                         }
3387
3388                         /* Check if VNI is masked. */
3389                         if (vxlan_spec && vxlan_mask) {
3390                                 is_vni_masked =
3391                                         !!memcmp(vxlan_mask->vni, vni_mask,
3392                                                  RTE_DIM(vni_mask));
3393                                 if (is_vni_masked) {
3394                                         rte_flow_error_set(error, EINVAL,
3395                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3396                                                    item,
3397                                                    "Invalid VNI mask");
3398                                         return -rte_errno;
3399                                 }
3400
3401                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3402                                            vxlan_spec->vni, 3);
3403                                 filter->tenant_id =
3404                                         rte_be_to_cpu_32(tenant_id_be);
3405                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3406                         }
3407
3408                         vxlan_flag = 1;
3409                         break;
3410                 default:
3411                         break;
3412                 }
3413         }
3414
3415         ret = i40e_check_tunnel_filter_type(filter_type);
3416         if (ret < 0) {
3417                 rte_flow_error_set(error, EINVAL,
3418                                    RTE_FLOW_ERROR_TYPE_ITEM,
3419                                    NULL,
3420                                    "Invalid filter type");
3421                 return -rte_errno;
3422         }
3423         filter->filter_type = filter_type;
3424
3425         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3426
3427         return 0;
3428 }
3429
3430 static int
3431 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3432                              const struct rte_flow_attr *attr,
3433                              const struct rte_flow_item pattern[],
3434                              const struct rte_flow_action actions[],
3435                              struct rte_flow_error *error,
3436                              union i40e_filter_t *filter)
3437 {
3438         struct i40e_tunnel_filter_conf *tunnel_filter =
3439                 &filter->consistent_tunnel_filter;
3440         int ret;
3441
3442         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3443                                             error, tunnel_filter);
3444         if (ret)
3445                 return ret;
3446
3447         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3448         if (ret)
3449                 return ret;
3450
3451         ret = i40e_flow_parse_attr(attr, error);
3452         if (ret)
3453                 return ret;
3454
3455         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3456
3457         return ret;
3458 }
3459
3460 /* 1. Last in item should be NULL as range is not supported.
3461  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3462  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3463  * 3. Mask of fields which need to be matched should be
3464  *    filled with 1.
3465  * 4. Mask of fields which needn't to be matched should be
3466  *    filled with 0.
3467  */
3468 static int
3469 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3470                               const struct rte_flow_item *pattern,
3471                               struct rte_flow_error *error,
3472                               struct i40e_tunnel_filter_conf *filter)
3473 {
3474         const struct rte_flow_item *item = pattern;
3475         const struct rte_flow_item_eth *eth_spec;
3476         const struct rte_flow_item_eth *eth_mask;
3477         const struct rte_flow_item_nvgre *nvgre_spec;
3478         const struct rte_flow_item_nvgre *nvgre_mask;
3479         const struct rte_flow_item_vlan *vlan_spec;
3480         const struct rte_flow_item_vlan *vlan_mask;
3481         enum rte_flow_item_type item_type;
3482         uint8_t filter_type = 0;
3483         bool is_tni_masked = 0;
3484         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3485         bool nvgre_flag = 0;
3486         uint32_t tenant_id_be = 0;
3487         int ret;
3488
3489         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3490                 if (item->last) {
3491                         rte_flow_error_set(error, EINVAL,
3492                                            RTE_FLOW_ERROR_TYPE_ITEM,
3493                                            item,
3494                                            "Not support range");
3495                         return -rte_errno;
3496                 }
3497                 item_type = item->type;
3498                 switch (item_type) {
3499                 case RTE_FLOW_ITEM_TYPE_ETH:
3500                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
3501                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
3502
3503                         /* Check if ETH item is used for place holder.
3504                          * If yes, both spec and mask should be NULL.
3505                          * If no, both spec and mask shouldn't be NULL.
3506                          */
3507                         if ((!eth_spec && eth_mask) ||
3508                             (eth_spec && !eth_mask)) {
3509                                 rte_flow_error_set(error, EINVAL,
3510                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3511                                                    item,
3512                                                    "Invalid ether spec/mask");
3513                                 return -rte_errno;
3514                         }
3515
3516                         if (eth_spec && eth_mask) {
3517                                 /* DST address of inner MAC shouldn't be masked.
3518                                  * SRC address of Inner MAC should be masked.
3519                                  */
3520                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
3521                                     !is_zero_ether_addr(&eth_mask->src) ||
3522                                     eth_mask->type) {
3523                                         rte_flow_error_set(error, EINVAL,
3524                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3525                                                    item,
3526                                                    "Invalid ether spec/mask");
3527                                         return -rte_errno;
3528                                 }
3529
3530                                 if (!nvgre_flag) {
3531                                         rte_memcpy(&filter->outer_mac,
3532                                                    &eth_spec->dst,
3533                                                    ETHER_ADDR_LEN);
3534                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3535                                 } else {
3536                                         rte_memcpy(&filter->inner_mac,
3537                                                    &eth_spec->dst,
3538                                                    ETHER_ADDR_LEN);
3539                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3540                                 }
3541                         }
3542
3543                         break;
3544                 case RTE_FLOW_ITEM_TYPE_VLAN:
3545                         vlan_spec =
3546                                 (const struct rte_flow_item_vlan *)item->spec;
3547                         vlan_mask =
3548                                 (const struct rte_flow_item_vlan *)item->mask;
3549                         if (!(vlan_spec && vlan_mask)) {
3550                                 rte_flow_error_set(error, EINVAL,
3551                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3552                                                    item,
3553                                                    "Invalid vlan item");
3554                                 return -rte_errno;
3555                         }
3556
3557                         if (vlan_spec && vlan_mask) {
3558                                 if (vlan_mask->tci ==
3559                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3560                                         filter->inner_vlan =
3561                                               rte_be_to_cpu_16(vlan_spec->tci) &
3562                                               I40E_TCI_MASK;
3563                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3564                         }
3565                         break;
3566                 case RTE_FLOW_ITEM_TYPE_IPV4:
3567                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3568                         /* IPv4 is used to describe protocol,
3569                          * spec and mask should be NULL.
3570                          */
3571                         if (item->spec || item->mask) {
3572                                 rte_flow_error_set(error, EINVAL,
3573                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3574                                                    item,
3575                                                    "Invalid IPv4 item");
3576                                 return -rte_errno;
3577                         }
3578                         break;
3579                 case RTE_FLOW_ITEM_TYPE_IPV6:
3580                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3581                         /* IPv6 is used to describe protocol,
3582                          * spec and mask should be NULL.
3583                          */
3584                         if (item->spec || item->mask) {
3585                                 rte_flow_error_set(error, EINVAL,
3586                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3587                                                    item,
3588                                                    "Invalid IPv6 item");
3589                                 return -rte_errno;
3590                         }
3591                         break;
3592                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3593                         nvgre_spec =
3594                                 (const struct rte_flow_item_nvgre *)item->spec;
3595                         nvgre_mask =
3596                                 (const struct rte_flow_item_nvgre *)item->mask;
3597                         /* Check if NVGRE item is used to describe protocol.
3598                          * If yes, both spec and mask should be NULL.
3599                          * If no, both spec and mask shouldn't be NULL.
3600                          */
3601                         if ((!nvgre_spec && nvgre_mask) ||
3602                             (nvgre_spec && !nvgre_mask)) {
3603                                 rte_flow_error_set(error, EINVAL,
3604                                            RTE_FLOW_ERROR_TYPE_ITEM,
3605                                            item,
3606                                            "Invalid NVGRE item");
3607                                 return -rte_errno;
3608                         }
3609
3610                         if (nvgre_spec && nvgre_mask) {
3611                                 is_tni_masked =
3612                                         !!memcmp(nvgre_mask->tni, tni_mask,
3613                                                  RTE_DIM(tni_mask));
3614                                 if (is_tni_masked) {
3615                                         rte_flow_error_set(error, EINVAL,
3616                                                        RTE_FLOW_ERROR_TYPE_ITEM,
3617                                                        item,
3618                                                        "Invalid TNI mask");
3619                                         return -rte_errno;
3620                                 }
3621                                 if (nvgre_mask->protocol &&
3622                                         nvgre_mask->protocol != 0xFFFF) {
3623                                         rte_flow_error_set(error, EINVAL,
3624                                                 RTE_FLOW_ERROR_TYPE_ITEM,
3625                                                 item,
3626                                                 "Invalid NVGRE item");
3627                                         return -rte_errno;
3628                                 }
3629                                 if (nvgre_mask->c_k_s_rsvd0_ver &&
3630                                         nvgre_mask->c_k_s_rsvd0_ver !=
3631                                         rte_cpu_to_be_16(0xFFFF)) {
3632                                         rte_flow_error_set(error, EINVAL,
3633                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3634                                                    item,
3635                                                    "Invalid NVGRE item");
3636                                         return -rte_errno;
3637                                 }
3638                                 if (nvgre_spec->c_k_s_rsvd0_ver !=
3639                                         rte_cpu_to_be_16(0x2000) &&
3640                                         nvgre_mask->c_k_s_rsvd0_ver) {
3641                                         rte_flow_error_set(error, EINVAL,
3642                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3643                                                    item,
3644                                                    "Invalid NVGRE item");
3645                                         return -rte_errno;
3646                                 }
3647                                 if (nvgre_mask->protocol &&
3648                                         nvgre_spec->protocol !=
3649                                         rte_cpu_to_be_16(0x6558)) {
3650                                         rte_flow_error_set(error, EINVAL,
3651                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3652                                                    item,
3653                                                    "Invalid NVGRE item");
3654                                         return -rte_errno;
3655                                 }
3656                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3657                                            nvgre_spec->tni, 3);
3658                                 filter->tenant_id =
3659                                         rte_be_to_cpu_32(tenant_id_be);
3660                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3661                         }
3662
3663                         nvgre_flag = 1;
3664                         break;
3665                 default:
3666                         break;
3667                 }
3668         }
3669
3670         ret = i40e_check_tunnel_filter_type(filter_type);
3671         if (ret < 0) {
3672                 rte_flow_error_set(error, EINVAL,
3673                                    RTE_FLOW_ERROR_TYPE_ITEM,
3674                                    NULL,
3675                                    "Invalid filter type");
3676                 return -rte_errno;
3677         }
3678         filter->filter_type = filter_type;
3679
3680         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
3681
3682         return 0;
3683 }
3684
3685 static int
3686 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
3687                              const struct rte_flow_attr *attr,
3688                              const struct rte_flow_item pattern[],
3689                              const struct rte_flow_action actions[],
3690                              struct rte_flow_error *error,
3691                              union i40e_filter_t *filter)
3692 {
3693         struct i40e_tunnel_filter_conf *tunnel_filter =
3694                 &filter->consistent_tunnel_filter;
3695         int ret;
3696
3697         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
3698                                             error, tunnel_filter);
3699         if (ret)
3700                 return ret;
3701
3702         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3703         if (ret)
3704                 return ret;
3705
3706         ret = i40e_flow_parse_attr(attr, error);
3707         if (ret)
3708                 return ret;
3709
3710         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3711
3712         return ret;
3713 }
3714
3715 /* 1. Last in item should be NULL as range is not supported.
3716  * 2. Supported filter types: MPLS label.
3717  * 3. Mask of fields which need to be matched should be
3718  *    filled with 1.
3719  * 4. Mask of fields which needn't to be matched should be
3720  *    filled with 0.
3721  */
3722 static int
3723 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
3724                              const struct rte_flow_item *pattern,
3725                              struct rte_flow_error *error,
3726                              struct i40e_tunnel_filter_conf *filter)
3727 {
3728         const struct rte_flow_item *item = pattern;
3729         const struct rte_flow_item_mpls *mpls_spec;
3730         const struct rte_flow_item_mpls *mpls_mask;
3731         enum rte_flow_item_type item_type;
3732         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
3733         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
3734         uint32_t label_be = 0;
3735
3736         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3737                 if (item->last) {
3738                         rte_flow_error_set(error, EINVAL,
3739                                            RTE_FLOW_ERROR_TYPE_ITEM,
3740                                            item,
3741                                            "Not support range");
3742                         return -rte_errno;
3743                 }
3744                 item_type = item->type;
3745                 switch (item_type) {
3746                 case RTE_FLOW_ITEM_TYPE_ETH:
3747                         if (item->spec || item->mask) {
3748                                 rte_flow_error_set(error, EINVAL,
3749                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3750                                                    item,
3751                                                    "Invalid ETH item");
3752                                 return -rte_errno;
3753                         }
3754                         break;
3755                 case RTE_FLOW_ITEM_TYPE_IPV4:
3756                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3757                         /* IPv4 is used to describe protocol,
3758                          * spec and mask should be NULL.
3759                          */
3760                         if (item->spec || item->mask) {
3761                                 rte_flow_error_set(error, EINVAL,
3762                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3763                                                    item,
3764                                                    "Invalid IPv4 item");
3765                                 return -rte_errno;
3766                         }
3767                         break;
3768                 case RTE_FLOW_ITEM_TYPE_IPV6:
3769                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3770                         /* IPv6 is used to describe protocol,
3771                          * spec and mask should be NULL.
3772                          */
3773                         if (item->spec || item->mask) {
3774                                 rte_flow_error_set(error, EINVAL,
3775                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3776                                                    item,
3777                                                    "Invalid IPv6 item");
3778                                 return -rte_errno;
3779                         }
3780                         break;
3781                 case RTE_FLOW_ITEM_TYPE_UDP:
3782                         /* UDP is used to describe protocol,
3783                          * spec and mask should be NULL.
3784                          */
3785                         if (item->spec || item->mask) {
3786                                 rte_flow_error_set(error, EINVAL,
3787                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3788                                                    item,
3789                                                    "Invalid UDP item");
3790                                 return -rte_errno;
3791                         }
3792                         is_mplsoudp = 1;
3793                         break;
3794                 case RTE_FLOW_ITEM_TYPE_GRE:
3795                         /* GRE is used to describe protocol,
3796                          * spec and mask should be NULL.
3797                          */
3798                         if (item->spec || item->mask) {
3799                                 rte_flow_error_set(error, EINVAL,
3800                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3801                                                    item,
3802                                                    "Invalid GRE item");
3803                                 return -rte_errno;
3804                         }
3805                         break;
3806                 case RTE_FLOW_ITEM_TYPE_MPLS:
3807                         mpls_spec =
3808                                 (const struct rte_flow_item_mpls *)item->spec;
3809                         mpls_mask =
3810                                 (const struct rte_flow_item_mpls *)item->mask;
3811
3812                         if (!mpls_spec || !mpls_mask) {
3813                                 rte_flow_error_set(error, EINVAL,
3814                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3815                                                    item,
3816                                                    "Invalid MPLS item");
3817                                 return -rte_errno;
3818                         }
3819
3820                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
3821                                 rte_flow_error_set(error, EINVAL,
3822                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3823                                                    item,
3824                                                    "Invalid MPLS label mask");
3825                                 return -rte_errno;
3826                         }
3827                         rte_memcpy(((uint8_t *)&label_be + 1),
3828                                    mpls_spec->label_tc_s, 3);
3829                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
3830                         break;
3831                 default:
3832                         break;
3833                 }
3834         }
3835
3836         if (is_mplsoudp)
3837                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
3838         else
3839                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
3840
3841         return 0;
3842 }
3843
3844 static int
3845 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
3846                             const struct rte_flow_attr *attr,
3847                             const struct rte_flow_item pattern[],
3848                             const struct rte_flow_action actions[],
3849                             struct rte_flow_error *error,
3850                             union i40e_filter_t *filter)
3851 {
3852         struct i40e_tunnel_filter_conf *tunnel_filter =
3853                 &filter->consistent_tunnel_filter;
3854         int ret;
3855
3856         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
3857                                            error, tunnel_filter);
3858         if (ret)
3859                 return ret;
3860
3861         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3862         if (ret)
3863                 return ret;
3864
3865         ret = i40e_flow_parse_attr(attr, error);
3866         if (ret)
3867                 return ret;
3868
3869         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3870
3871         return ret;
3872 }
3873
3874 /* 1. Last in item should be NULL as range is not supported.
3875  * 2. Supported filter types: GTP TEID.
3876  * 3. Mask of fields which need to be matched should be
3877  *    filled with 1.
3878  * 4. Mask of fields which needn't to be matched should be
3879  *    filled with 0.
3880  * 5. GTP profile supports GTPv1 only.
3881  * 6. GTP-C response message ('source_port' = 2123) is not supported.
3882  */
3883 static int
3884 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
3885                             const struct rte_flow_item *pattern,
3886                             struct rte_flow_error *error,
3887                             struct i40e_tunnel_filter_conf *filter)
3888 {
3889         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3890         const struct rte_flow_item *item = pattern;
3891         const struct rte_flow_item_gtp *gtp_spec;
3892         const struct rte_flow_item_gtp *gtp_mask;
3893         enum rte_flow_item_type item_type;
3894
3895         if (!pf->gtp_support) {
3896                 rte_flow_error_set(error, EINVAL,
3897                                    RTE_FLOW_ERROR_TYPE_ITEM,
3898                                    item,
3899                                    "GTP is not supported by default.");
3900                 return -rte_errno;
3901         }
3902
3903         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3904                 if (item->last) {
3905                         rte_flow_error_set(error, EINVAL,
3906                                            RTE_FLOW_ERROR_TYPE_ITEM,
3907                                            item,
3908                                            "Not support range");
3909                         return -rte_errno;
3910                 }
3911                 item_type = item->type;
3912                 switch (item_type) {
3913                 case RTE_FLOW_ITEM_TYPE_ETH:
3914                         if (item->spec || item->mask) {
3915                                 rte_flow_error_set(error, EINVAL,
3916                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3917                                                    item,
3918                                                    "Invalid ETH item");
3919                                 return -rte_errno;
3920                         }
3921                         break;
3922                 case RTE_FLOW_ITEM_TYPE_IPV4:
3923                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3924                         /* IPv4 is used to describe protocol,
3925                          * spec and mask should be NULL.
3926                          */
3927                         if (item->spec || item->mask) {
3928                                 rte_flow_error_set(error, EINVAL,
3929                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3930                                                    item,
3931                                                    "Invalid IPv4 item");
3932                                 return -rte_errno;
3933                         }
3934                         break;
3935                 case RTE_FLOW_ITEM_TYPE_UDP:
3936                         if (item->spec || item->mask) {
3937                                 rte_flow_error_set(error, EINVAL,
3938                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3939                                                    item,
3940                                                    "Invalid UDP item");
3941                                 return -rte_errno;
3942                         }
3943                         break;
3944                 case RTE_FLOW_ITEM_TYPE_GTPC:
3945                 case RTE_FLOW_ITEM_TYPE_GTPU:
3946                         gtp_spec =
3947                                 (const struct rte_flow_item_gtp *)item->spec;
3948                         gtp_mask =
3949                                 (const struct rte_flow_item_gtp *)item->mask;
3950
3951                         if (!gtp_spec || !gtp_mask) {
3952                                 rte_flow_error_set(error, EINVAL,
3953                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3954                                                    item,
3955                                                    "Invalid GTP item");
3956                                 return -rte_errno;
3957                         }
3958
3959                         if (gtp_mask->v_pt_rsv_flags ||
3960                             gtp_mask->msg_type ||
3961                             gtp_mask->msg_len ||
3962                             gtp_mask->teid != UINT32_MAX) {
3963                                 rte_flow_error_set(error, EINVAL,
3964                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3965                                                    item,
3966                                                    "Invalid GTP mask");
3967                                 return -rte_errno;
3968                         }
3969
3970                         if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
3971                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
3972                         else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
3973                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
3974
3975                         filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
3976
3977                         break;
3978                 default:
3979                         break;
3980                 }
3981         }
3982
3983         return 0;
3984 }
3985
3986 static int
3987 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
3988                            const struct rte_flow_attr *attr,
3989                            const struct rte_flow_item pattern[],
3990                            const struct rte_flow_action actions[],
3991                            struct rte_flow_error *error,
3992                            union i40e_filter_t *filter)
3993 {
3994         struct i40e_tunnel_filter_conf *tunnel_filter =
3995                 &filter->consistent_tunnel_filter;
3996         int ret;
3997
3998         ret = i40e_flow_parse_gtp_pattern(dev, pattern,
3999                                           error, tunnel_filter);
4000         if (ret)
4001                 return ret;
4002
4003         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4004         if (ret)
4005                 return ret;
4006
4007         ret = i40e_flow_parse_attr(attr, error);
4008         if (ret)
4009                 return ret;
4010
4011         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4012
4013         return ret;
4014 }
4015
4016 /* 1. Last in item should be NULL as range is not supported.
4017  * 2. Supported filter types: QINQ.
4018  * 3. Mask of fields which need to be matched should be
4019  *    filled with 1.
4020  * 4. Mask of fields which needn't to be matched should be
4021  *    filled with 0.
4022  */
4023 static int
4024 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4025                               const struct rte_flow_item *pattern,
4026                               struct rte_flow_error *error,
4027                               struct i40e_tunnel_filter_conf *filter)
4028 {
4029         const struct rte_flow_item *item = pattern;
4030         const struct rte_flow_item_vlan *vlan_spec = NULL;
4031         const struct rte_flow_item_vlan *vlan_mask = NULL;
4032         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4033         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4034         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4035         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4036
4037         enum rte_flow_item_type item_type;
4038         bool vlan_flag = 0;
4039
4040         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4041                 if (item->last) {
4042                         rte_flow_error_set(error, EINVAL,
4043                                            RTE_FLOW_ERROR_TYPE_ITEM,
4044                                            item,
4045                                            "Not support range");
4046                         return -rte_errno;
4047                 }
4048                 item_type = item->type;
4049                 switch (item_type) {
4050                 case RTE_FLOW_ITEM_TYPE_ETH:
4051                         if (item->spec || item->mask) {
4052                                 rte_flow_error_set(error, EINVAL,
4053                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4054                                                    item,
4055                                                    "Invalid ETH item");
4056                                 return -rte_errno;
4057                         }
4058                         break;
4059                 case RTE_FLOW_ITEM_TYPE_VLAN:
4060                         vlan_spec =
4061                                 (const struct rte_flow_item_vlan *)item->spec;
4062                         vlan_mask =
4063                                 (const struct rte_flow_item_vlan *)item->mask;
4064
4065                         if (!(vlan_spec && vlan_mask)) {
4066                                 rte_flow_error_set(error, EINVAL,
4067                                            RTE_FLOW_ERROR_TYPE_ITEM,
4068                                            item,
4069                                            "Invalid vlan item");
4070                                 return -rte_errno;
4071                         }
4072
4073                         if (!vlan_flag) {
4074                                 o_vlan_spec = vlan_spec;
4075                                 o_vlan_mask = vlan_mask;
4076                                 vlan_flag = 1;
4077                         } else {
4078                                 i_vlan_spec = vlan_spec;
4079                                 i_vlan_mask = vlan_mask;
4080                                 vlan_flag = 0;
4081                         }
4082                         break;
4083
4084                 default:
4085                         break;
4086                 }
4087         }
4088
4089         /* Get filter specification */
4090         if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
4091                         rte_cpu_to_be_16(I40E_TCI_MASK)) &&
4092                         (i_vlan_mask != NULL) &&
4093                         (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
4094                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
4095                         & I40E_TCI_MASK;
4096                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
4097                         & I40E_TCI_MASK;
4098         } else {
4099                         rte_flow_error_set(error, EINVAL,
4100                                            RTE_FLOW_ERROR_TYPE_ITEM,
4101                                            NULL,
4102                                            "Invalid filter type");
4103                         return -rte_errno;
4104         }
4105
4106         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4107         return 0;
4108 }
4109
4110 static int
4111 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4112                               const struct rte_flow_attr *attr,
4113                               const struct rte_flow_item pattern[],
4114                               const struct rte_flow_action actions[],
4115                               struct rte_flow_error *error,
4116                               union i40e_filter_t *filter)
4117 {
4118         struct i40e_tunnel_filter_conf *tunnel_filter =
4119                 &filter->consistent_tunnel_filter;
4120         int ret;
4121
4122         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4123                                              error, tunnel_filter);
4124         if (ret)
4125                 return ret;
4126
4127         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4128         if (ret)
4129                 return ret;
4130
4131         ret = i40e_flow_parse_attr(attr, error);
4132         if (ret)
4133                 return ret;
4134
4135         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4136
4137         return ret;
4138 }
4139
4140 static int
4141 i40e_flow_validate(struct rte_eth_dev *dev,
4142                    const struct rte_flow_attr *attr,
4143                    const struct rte_flow_item pattern[],
4144                    const struct rte_flow_action actions[],
4145                    struct rte_flow_error *error)
4146 {
4147         struct rte_flow_item *items; /* internal pattern w/o VOID items */
4148         parse_filter_t parse_filter;
4149         uint32_t item_num = 0; /* non-void item number of pattern*/
4150         uint32_t i = 0;
4151         bool flag = false;
4152         int ret = I40E_NOT_SUPPORTED;
4153
4154         if (!pattern) {
4155                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4156                                    NULL, "NULL pattern.");
4157                 return -rte_errno;
4158         }
4159
4160         if (!actions) {
4161                 rte_flow_error_set(error, EINVAL,
4162                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
4163                                    NULL, "NULL action.");
4164                 return -rte_errno;
4165         }
4166
4167         if (!attr) {
4168                 rte_flow_error_set(error, EINVAL,
4169                                    RTE_FLOW_ERROR_TYPE_ATTR,
4170                                    NULL, "NULL attribute.");
4171                 return -rte_errno;
4172         }
4173
4174         memset(&cons_filter, 0, sizeof(cons_filter));
4175
4176         /* Get the non-void item number of pattern */
4177         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4178                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4179                         item_num++;
4180                 i++;
4181         }
4182         item_num++;
4183
4184         items = rte_zmalloc("i40e_pattern",
4185                             item_num * sizeof(struct rte_flow_item), 0);
4186         if (!items) {
4187                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4188                                    NULL, "No memory for PMD internal items.");
4189                 return -ENOMEM;
4190         }
4191
4192         i40e_pattern_skip_void_item(items, pattern);
4193
4194         i = 0;
4195         do {
4196                 parse_filter = i40e_find_parse_filter_func(items, &i);
4197                 if (!parse_filter && !flag) {
4198                         rte_flow_error_set(error, EINVAL,
4199                                            RTE_FLOW_ERROR_TYPE_ITEM,
4200                                            pattern, "Unsupported pattern");
4201                         rte_free(items);
4202                         return -rte_errno;
4203                 }
4204                 if (parse_filter)
4205                         ret = parse_filter(dev, attr, items, actions,
4206                                            error, &cons_filter);
4207                 flag = true;
4208         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
4209
4210         rte_free(items);
4211
4212         return ret;
4213 }
4214
4215 static struct rte_flow *
4216 i40e_flow_create(struct rte_eth_dev *dev,
4217                  const struct rte_flow_attr *attr,
4218                  const struct rte_flow_item pattern[],
4219                  const struct rte_flow_action actions[],
4220                  struct rte_flow_error *error)
4221 {
4222         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4223         struct rte_flow *flow;
4224         int ret;
4225
4226         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
4227         if (!flow) {
4228                 rte_flow_error_set(error, ENOMEM,
4229                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4230                                    "Failed to allocate memory");
4231                 return flow;
4232         }
4233
4234         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
4235         if (ret < 0)
4236                 return NULL;
4237
4238         switch (cons_filter_type) {
4239         case RTE_ETH_FILTER_ETHERTYPE:
4240                 ret = i40e_ethertype_filter_set(pf,
4241                                         &cons_filter.ethertype_filter, 1);
4242                 if (ret)
4243                         goto free_flow;
4244                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
4245                                         i40e_ethertype_filter_list);
4246                 break;
4247         case RTE_ETH_FILTER_FDIR:
4248                 ret = i40e_flow_add_del_fdir_filter(dev,
4249                                        &cons_filter.fdir_filter, 1);
4250                 if (ret)
4251                         goto free_flow;
4252                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
4253                                         i40e_fdir_filter_list);
4254                 break;
4255         case RTE_ETH_FILTER_TUNNEL:
4256                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
4257                             &cons_filter.consistent_tunnel_filter, 1);
4258                 if (ret)
4259                         goto free_flow;
4260                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
4261                                         i40e_tunnel_filter_list);
4262                 break;
4263         default:
4264                 goto free_flow;
4265         }
4266
4267         flow->filter_type = cons_filter_type;
4268         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
4269         return flow;
4270
4271 free_flow:
4272         rte_flow_error_set(error, -ret,
4273                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4274                            "Failed to create flow.");
4275         rte_free(flow);
4276         return NULL;
4277 }
4278
4279 static int
4280 i40e_flow_destroy(struct rte_eth_dev *dev,
4281                   struct rte_flow *flow,
4282                   struct rte_flow_error *error)
4283 {
4284         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4285         enum rte_filter_type filter_type = flow->filter_type;
4286         int ret = 0;
4287
4288         switch (filter_type) {
4289         case RTE_ETH_FILTER_ETHERTYPE:
4290                 ret = i40e_flow_destroy_ethertype_filter(pf,
4291                          (struct i40e_ethertype_filter *)flow->rule);
4292                 break;
4293         case RTE_ETH_FILTER_TUNNEL:
4294                 ret = i40e_flow_destroy_tunnel_filter(pf,
4295                               (struct i40e_tunnel_filter *)flow->rule);
4296                 break;
4297         case RTE_ETH_FILTER_FDIR:
4298                 ret = i40e_flow_add_del_fdir_filter(dev,
4299                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
4300                 break;
4301         default:
4302                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4303                             filter_type);
4304                 ret = -EINVAL;
4305                 break;
4306         }
4307
4308         if (!ret) {
4309                 TAILQ_REMOVE(&pf->flow_list, flow, node);
4310                 rte_free(flow);
4311         } else
4312                 rte_flow_error_set(error, -ret,
4313                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4314                                    "Failed to destroy flow.");
4315
4316         return ret;
4317 }
4318
4319 static int
4320 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
4321                                    struct i40e_ethertype_filter *filter)
4322 {
4323         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4324         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
4325         struct i40e_ethertype_filter *node;
4326         struct i40e_control_filter_stats stats;
4327         uint16_t flags = 0;
4328         int ret = 0;
4329
4330         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
4331                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
4332         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
4333                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
4334         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
4335
4336         memset(&stats, 0, sizeof(stats));
4337         ret = i40e_aq_add_rem_control_packet_filter(hw,
4338                                     filter->input.mac_addr.addr_bytes,
4339                                     filter->input.ether_type,
4340                                     flags, pf->main_vsi->seid,
4341                                     filter->queue, 0, &stats, NULL);
4342         if (ret < 0)
4343                 return ret;
4344
4345         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
4346         if (!node)
4347                 return -EINVAL;
4348
4349         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
4350
4351         return ret;
4352 }
4353
4354 static int
4355 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
4356                                 struct i40e_tunnel_filter *filter)
4357 {
4358         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4359         struct i40e_vsi *vsi;
4360         struct i40e_pf_vf *vf;
4361         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
4362         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
4363         struct i40e_tunnel_filter *node;
4364         bool big_buffer = 0;
4365         int ret = 0;
4366
4367         memset(&cld_filter, 0, sizeof(cld_filter));
4368         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
4369                         (struct ether_addr *)&cld_filter.element.outer_mac);
4370         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
4371                         (struct ether_addr *)&cld_filter.element.inner_mac);
4372         cld_filter.element.inner_vlan = filter->input.inner_vlan;
4373         cld_filter.element.flags = filter->input.flags;
4374         cld_filter.element.tenant_id = filter->input.tenant_id;
4375         cld_filter.element.queue_number = filter->queue;
4376         rte_memcpy(cld_filter.general_fields,
4377                    filter->input.general_fields,
4378                    sizeof(cld_filter.general_fields));
4379
4380         if (!filter->is_to_vf)
4381                 vsi = pf->main_vsi;
4382         else {
4383                 vf = &pf->vfs[filter->vf_id];
4384                 vsi = vf->vsi;
4385         }
4386
4387         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
4388             I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
4389             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
4390             I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
4391             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
4392             I40E_AQC_ADD_CLOUD_FILTER_0X10))
4393                 big_buffer = 1;
4394
4395         if (big_buffer)
4396                 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
4397                                                               &cld_filter, 1);
4398         else
4399                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4400                                                    &cld_filter.element, 1);
4401         if (ret < 0)
4402                 return -ENOTSUP;
4403
4404         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
4405         if (!node)
4406                 return -EINVAL;
4407
4408         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
4409
4410         return ret;
4411 }
4412
4413 static int
4414 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
4415 {
4416         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4417         int ret;
4418
4419         ret = i40e_flow_flush_fdir_filter(pf);
4420         if (ret) {
4421                 rte_flow_error_set(error, -ret,
4422                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4423                                    "Failed to flush FDIR flows.");
4424                 return -rte_errno;
4425         }
4426
4427         ret = i40e_flow_flush_ethertype_filter(pf);
4428         if (ret) {
4429                 rte_flow_error_set(error, -ret,
4430                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4431                                    "Failed to ethertype flush flows.");
4432                 return -rte_errno;
4433         }
4434
4435         ret = i40e_flow_flush_tunnel_filter(pf);
4436         if (ret) {
4437                 rte_flow_error_set(error, -ret,
4438                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4439                                    "Failed to flush tunnel flows.");
4440                 return -rte_errno;
4441         }
4442
4443         return ret;
4444 }
4445
4446 static int
4447 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
4448 {
4449         struct rte_eth_dev *dev = pf->adapter->eth_dev;
4450         struct i40e_fdir_info *fdir_info = &pf->fdir;
4451         struct i40e_fdir_filter *fdir_filter;
4452         enum i40e_filter_pctype pctype;
4453         struct rte_flow *flow;
4454         void *temp;
4455         int ret;
4456
4457         ret = i40e_fdir_flush(dev);
4458         if (!ret) {
4459                 /* Delete FDIR filters in FDIR list. */
4460                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
4461                         ret = i40e_sw_fdir_filter_del(pf,
4462                                                       &fdir_filter->fdir.input);
4463                         if (ret < 0)
4464                                 return ret;
4465                 }
4466
4467                 /* Delete FDIR flows in flow list. */
4468                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4469                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
4470                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
4471                                 rte_free(flow);
4472                         }
4473                 }
4474
4475                 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4476                      pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
4477                         pf->fdir.inset_flag[pctype] = 0;
4478         }
4479
4480         return ret;
4481 }
4482
4483 /* Flush all ethertype filters */
4484 static int
4485 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
4486 {
4487         struct i40e_ethertype_filter_list
4488                 *ethertype_list = &pf->ethertype.ethertype_list;
4489         struct i40e_ethertype_filter *filter;
4490         struct rte_flow *flow;
4491         void *temp;
4492         int ret = 0;
4493
4494         while ((filter = TAILQ_FIRST(ethertype_list))) {
4495                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
4496                 if (ret)
4497                         return ret;
4498         }
4499
4500         /* Delete ethertype flows in flow list. */
4501         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4502                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
4503                         TAILQ_REMOVE(&pf->flow_list, flow, node);
4504                         rte_free(flow);
4505                 }
4506         }
4507
4508         return ret;
4509 }
4510
4511 /* Flush all tunnel filters */
4512 static int
4513 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
4514 {
4515         struct i40e_tunnel_filter_list
4516                 *tunnel_list = &pf->tunnel.tunnel_list;
4517         struct i40e_tunnel_filter *filter;
4518         struct rte_flow *flow;
4519         void *temp;
4520         int ret = 0;
4521
4522         while ((filter = TAILQ_FIRST(tunnel_list))) {
4523                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
4524                 if (ret)
4525                         return ret;
4526         }
4527
4528         /* Delete tunnel flows in flow list. */
4529         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4530                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
4531                         TAILQ_REMOVE(&pf->flow_list, flow, node);
4532                         rte_free(flow);
4533                 }
4534         }
4535
4536         return ret;
4537 }