New upstream version 17.11.1
[deb_dpdk.git] / drivers / net / sfc / sfc_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (c) 2017 Solarflare Communications Inc.
5  * All rights reserved.
6  *
7  * This software was jointly developed between OKTET Labs (under contract
8  * for Solarflare) and Solarflare Communications, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright notice,
14  *    this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright notice,
16  *    this list of conditions and the following disclaimer in the documentation
17  *    and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 #include <rte_tailq.h>
33 #include <rte_common.h>
34 #include <rte_ethdev.h>
35 #include <rte_eth_ctrl.h>
36 #include <rte_ether.h>
37 #include <rte_flow.h>
38 #include <rte_flow_driver.h>
39
40 #include "efx.h"
41
42 #include "sfc.h"
43 #include "sfc_rx.h"
44 #include "sfc_filter.h"
45 #include "sfc_flow.h"
46 #include "sfc_log.h"
47
48 /*
49  * At now flow API is implemented in such a manner that each
50  * flow rule is converted to a hardware filter.
51  * All elements of flow rule (attributes, pattern items, actions)
52  * correspond to one or more fields in the efx_filter_spec_s structure
53  * that is responsible for the hardware filter.
54  */
55
56 enum sfc_flow_item_layers {
57         SFC_FLOW_ITEM_ANY_LAYER,
58         SFC_FLOW_ITEM_START_LAYER,
59         SFC_FLOW_ITEM_L2,
60         SFC_FLOW_ITEM_L3,
61         SFC_FLOW_ITEM_L4,
62 };
63
64 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
65                                   efx_filter_spec_t *spec,
66                                   struct rte_flow_error *error);
67
68 struct sfc_flow_item {
69         enum rte_flow_item_type type;           /* Type of item */
70         enum sfc_flow_item_layers layer;        /* Layer of item */
71         enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
72         sfc_flow_item_parse *parse;             /* Parsing function */
73 };
74
75 static sfc_flow_item_parse sfc_flow_parse_void;
76 static sfc_flow_item_parse sfc_flow_parse_eth;
77 static sfc_flow_item_parse sfc_flow_parse_vlan;
78 static sfc_flow_item_parse sfc_flow_parse_ipv4;
79 static sfc_flow_item_parse sfc_flow_parse_ipv6;
80 static sfc_flow_item_parse sfc_flow_parse_tcp;
81 static sfc_flow_item_parse sfc_flow_parse_udp;
82
83 static boolean_t
84 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
85 {
86         uint8_t sum = 0;
87         unsigned int i;
88
89         for (i = 0; i < size; i++)
90                 sum |= buf[i];
91
92         return (sum == 0) ? B_TRUE : B_FALSE;
93 }
94
95 /*
96  * Validate item and prepare structures spec and mask for parsing
97  */
98 static int
99 sfc_flow_parse_init(const struct rte_flow_item *item,
100                     const void **spec_ptr,
101                     const void **mask_ptr,
102                     const void *supp_mask,
103                     const void *def_mask,
104                     unsigned int size,
105                     struct rte_flow_error *error)
106 {
107         const uint8_t *spec;
108         const uint8_t *mask;
109         const uint8_t *last;
110         uint8_t match;
111         uint8_t supp;
112         unsigned int i;
113
114         if (item == NULL) {
115                 rte_flow_error_set(error, EINVAL,
116                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
117                                    "NULL item");
118                 return -rte_errno;
119         }
120
121         if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
122                 rte_flow_error_set(error, EINVAL,
123                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
124                                    "Mask or last is set without spec");
125                 return -rte_errno;
126         }
127
128         /*
129          * If "mask" is not set, default mask is used,
130          * but if default mask is NULL, "mask" should be set
131          */
132         if (item->mask == NULL) {
133                 if (def_mask == NULL) {
134                         rte_flow_error_set(error, EINVAL,
135                                 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
136                                 "Mask should be specified");
137                         return -rte_errno;
138                 }
139
140                 mask = (const uint8_t *)def_mask;
141         } else {
142                 mask = (const uint8_t *)item->mask;
143         }
144
145         spec = (const uint8_t *)item->spec;
146         last = (const uint8_t *)item->last;
147
148         if (spec == NULL)
149                 goto exit;
150
151         /*
152          * If field values in "last" are either 0 or equal to the corresponding
153          * values in "spec" then they are ignored
154          */
155         if (last != NULL &&
156             !sfc_flow_is_zero(last, size) &&
157             memcmp(last, spec, size) != 0) {
158                 rte_flow_error_set(error, ENOTSUP,
159                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
160                                    "Ranging is not supported");
161                 return -rte_errno;
162         }
163
164         if (supp_mask == NULL) {
165                 rte_flow_error_set(error, EINVAL,
166                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
167                         "Supported mask for item should be specified");
168                 return -rte_errno;
169         }
170
171         /* Check that mask and spec not asks for more match than supp_mask */
172         for (i = 0; i < size; i++) {
173                 match = spec[i] | mask[i];
174                 supp = ((const uint8_t *)supp_mask)[i];
175
176                 if ((match | supp) != supp) {
177                         rte_flow_error_set(error, ENOTSUP,
178                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
179                                            "Item's field is not supported");
180                         return -rte_errno;
181                 }
182         }
183
184 exit:
185         *spec_ptr = spec;
186         *mask_ptr = mask;
187         return 0;
188 }
189
190 /*
191  * Protocol parsers.
192  * Masking is not supported, so masks in items should be either
193  * full or empty (zeroed) and set only for supported fields which
194  * are specified in the supp_mask.
195  */
196
197 static int
198 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
199                     __rte_unused efx_filter_spec_t *efx_spec,
200                     __rte_unused struct rte_flow_error *error)
201 {
202         return 0;
203 }
204
205 /**
206  * Convert Ethernet item to EFX filter specification.
207  *
208  * @param item[in]
209  *   Item specification. Only source and destination addresses and
210  *   Ethernet type fields are supported. In addition to full and
211  *   empty masks of destination address, individual/group mask is
212  *   also supported. If the mask is NULL, default mask will be used.
213  *   Ranging is not supported.
214  * @param efx_spec[in, out]
215  *   EFX filter specification to update.
216  * @param[out] error
217  *   Perform verbose error reporting if not NULL.
218  */
219 static int
220 sfc_flow_parse_eth(const struct rte_flow_item *item,
221                    efx_filter_spec_t *efx_spec,
222                    struct rte_flow_error *error)
223 {
224         int rc;
225         const struct rte_flow_item_eth *spec = NULL;
226         const struct rte_flow_item_eth *mask = NULL;
227         const struct rte_flow_item_eth supp_mask = {
228                 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
229                 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
230                 .type = 0xffff,
231         };
232         const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
233                 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
234         };
235
236         rc = sfc_flow_parse_init(item,
237                                  (const void **)&spec,
238                                  (const void **)&mask,
239                                  &supp_mask,
240                                  &rte_flow_item_eth_mask,
241                                  sizeof(struct rte_flow_item_eth),
242                                  error);
243         if (rc != 0)
244                 return rc;
245
246         /* If "spec" is not set, could be any Ethernet */
247         if (spec == NULL)
248                 return 0;
249
250         if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
251                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
252                 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
253                            EFX_MAC_ADDR_LEN);
254         } else if (memcmp(mask->dst.addr_bytes, ig_mask,
255                           EFX_MAC_ADDR_LEN) == 0) {
256                 if (is_unicast_ether_addr(&spec->dst))
257                         efx_spec->efs_match_flags |=
258                                 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
259                 else
260                         efx_spec->efs_match_flags |=
261                                 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
262         } else if (!is_zero_ether_addr(&mask->dst)) {
263                 goto fail_bad_mask;
264         }
265
266         if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
267                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
268                 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
269                            EFX_MAC_ADDR_LEN);
270         } else if (!is_zero_ether_addr(&mask->src)) {
271                 goto fail_bad_mask;
272         }
273
274         /*
275          * Ether type is in big-endian byte order in item and
276          * in little-endian in efx_spec, so byte swap is used
277          */
278         if (mask->type == supp_mask.type) {
279                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
280                 efx_spec->efs_ether_type = rte_bswap16(spec->type);
281         } else if (mask->type != 0) {
282                 goto fail_bad_mask;
283         }
284
285         return 0;
286
287 fail_bad_mask:
288         rte_flow_error_set(error, EINVAL,
289                            RTE_FLOW_ERROR_TYPE_ITEM, item,
290                            "Bad mask in the ETH pattern item");
291         return -rte_errno;
292 }
293
294 /**
295  * Convert VLAN item to EFX filter specification.
296  *
297  * @param item[in]
298  *   Item specification. Only VID field is supported.
299  *   The mask can not be NULL. Ranging is not supported.
300  * @param efx_spec[in, out]
301  *   EFX filter specification to update.
302  * @param[out] error
303  *   Perform verbose error reporting if not NULL.
304  */
305 static int
306 sfc_flow_parse_vlan(const struct rte_flow_item *item,
307                     efx_filter_spec_t *efx_spec,
308                     struct rte_flow_error *error)
309 {
310         int rc;
311         uint16_t vid;
312         const struct rte_flow_item_vlan *spec = NULL;
313         const struct rte_flow_item_vlan *mask = NULL;
314         const struct rte_flow_item_vlan supp_mask = {
315                 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
316         };
317
318         rc = sfc_flow_parse_init(item,
319                                  (const void **)&spec,
320                                  (const void **)&mask,
321                                  &supp_mask,
322                                  NULL,
323                                  sizeof(struct rte_flow_item_vlan),
324                                  error);
325         if (rc != 0)
326                 return rc;
327
328         /*
329          * VID is in big-endian byte order in item and
330          * in little-endian in efx_spec, so byte swap is used.
331          * If two VLAN items are included, the first matches
332          * the outer tag and the next matches the inner tag.
333          */
334         if (mask->tci == supp_mask.tci) {
335                 vid = rte_bswap16(spec->tci);
336
337                 if (!(efx_spec->efs_match_flags &
338                       EFX_FILTER_MATCH_OUTER_VID)) {
339                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
340                         efx_spec->efs_outer_vid = vid;
341                 } else if (!(efx_spec->efs_match_flags &
342                              EFX_FILTER_MATCH_INNER_VID)) {
343                         efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
344                         efx_spec->efs_inner_vid = vid;
345                 } else {
346                         rte_flow_error_set(error, EINVAL,
347                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
348                                            "More than two VLAN items");
349                         return -rte_errno;
350                 }
351         } else {
352                 rte_flow_error_set(error, EINVAL,
353                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
354                                    "VLAN ID in TCI match is required");
355                 return -rte_errno;
356         }
357
358         return 0;
359 }
360
361 /**
362  * Convert IPv4 item to EFX filter specification.
363  *
364  * @param item[in]
365  *   Item specification. Only source and destination addresses and
366  *   protocol fields are supported. If the mask is NULL, default
367  *   mask will be used. Ranging is not supported.
368  * @param efx_spec[in, out]
369  *   EFX filter specification to update.
370  * @param[out] error
371  *   Perform verbose error reporting if not NULL.
372  */
373 static int
374 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
375                     efx_filter_spec_t *efx_spec,
376                     struct rte_flow_error *error)
377 {
378         int rc;
379         const struct rte_flow_item_ipv4 *spec = NULL;
380         const struct rte_flow_item_ipv4 *mask = NULL;
381         const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
382         const struct rte_flow_item_ipv4 supp_mask = {
383                 .hdr = {
384                         .src_addr = 0xffffffff,
385                         .dst_addr = 0xffffffff,
386                         .next_proto_id = 0xff,
387                 }
388         };
389
390         rc = sfc_flow_parse_init(item,
391                                  (const void **)&spec,
392                                  (const void **)&mask,
393                                  &supp_mask,
394                                  &rte_flow_item_ipv4_mask,
395                                  sizeof(struct rte_flow_item_ipv4),
396                                  error);
397         if (rc != 0)
398                 return rc;
399
400         /*
401          * Filtering by IPv4 source and destination addresses requires
402          * the appropriate ETHER_TYPE in hardware filters
403          */
404         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
405                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
406                 efx_spec->efs_ether_type = ether_type_ipv4;
407         } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
408                 rte_flow_error_set(error, EINVAL,
409                         RTE_FLOW_ERROR_TYPE_ITEM, item,
410                         "Ethertype in pattern with IPV4 item should be appropriate");
411                 return -rte_errno;
412         }
413
414         if (spec == NULL)
415                 return 0;
416
417         /*
418          * IPv4 addresses are in big-endian byte order in item and in
419          * efx_spec
420          */
421         if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
422                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
423                 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
424         } else if (mask->hdr.src_addr != 0) {
425                 goto fail_bad_mask;
426         }
427
428         if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
429                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
430                 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
431         } else if (mask->hdr.dst_addr != 0) {
432                 goto fail_bad_mask;
433         }
434
435         if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
436                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
437                 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
438         } else if (mask->hdr.next_proto_id != 0) {
439                 goto fail_bad_mask;
440         }
441
442         return 0;
443
444 fail_bad_mask:
445         rte_flow_error_set(error, EINVAL,
446                            RTE_FLOW_ERROR_TYPE_ITEM, item,
447                            "Bad mask in the IPV4 pattern item");
448         return -rte_errno;
449 }
450
451 /**
452  * Convert IPv6 item to EFX filter specification.
453  *
454  * @param item[in]
455  *   Item specification. Only source and destination addresses and
456  *   next header fields are supported. If the mask is NULL, default
457  *   mask will be used. Ranging is not supported.
458  * @param efx_spec[in, out]
459  *   EFX filter specification to update.
460  * @param[out] error
461  *   Perform verbose error reporting if not NULL.
462  */
463 static int
464 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
465                     efx_filter_spec_t *efx_spec,
466                     struct rte_flow_error *error)
467 {
468         int rc;
469         const struct rte_flow_item_ipv6 *spec = NULL;
470         const struct rte_flow_item_ipv6 *mask = NULL;
471         const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
472         const struct rte_flow_item_ipv6 supp_mask = {
473                 .hdr = {
474                         .src_addr = { 0xff, 0xff, 0xff, 0xff,
475                                       0xff, 0xff, 0xff, 0xff,
476                                       0xff, 0xff, 0xff, 0xff,
477                                       0xff, 0xff, 0xff, 0xff },
478                         .dst_addr = { 0xff, 0xff, 0xff, 0xff,
479                                       0xff, 0xff, 0xff, 0xff,
480                                       0xff, 0xff, 0xff, 0xff,
481                                       0xff, 0xff, 0xff, 0xff },
482                         .proto = 0xff,
483                 }
484         };
485
486         rc = sfc_flow_parse_init(item,
487                                  (const void **)&spec,
488                                  (const void **)&mask,
489                                  &supp_mask,
490                                  &rte_flow_item_ipv6_mask,
491                                  sizeof(struct rte_flow_item_ipv6),
492                                  error);
493         if (rc != 0)
494                 return rc;
495
496         /*
497          * Filtering by IPv6 source and destination addresses requires
498          * the appropriate ETHER_TYPE in hardware filters
499          */
500         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
501                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
502                 efx_spec->efs_ether_type = ether_type_ipv6;
503         } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
504                 rte_flow_error_set(error, EINVAL,
505                         RTE_FLOW_ERROR_TYPE_ITEM, item,
506                         "Ethertype in pattern with IPV6 item should be appropriate");
507                 return -rte_errno;
508         }
509
510         if (spec == NULL)
511                 return 0;
512
513         /*
514          * IPv6 addresses are in big-endian byte order in item and in
515          * efx_spec
516          */
517         if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
518                    sizeof(mask->hdr.src_addr)) == 0) {
519                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
520
521                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
522                                  sizeof(spec->hdr.src_addr));
523                 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
524                            sizeof(efx_spec->efs_rem_host));
525         } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
526                                      sizeof(mask->hdr.src_addr))) {
527                 goto fail_bad_mask;
528         }
529
530         if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
531                    sizeof(mask->hdr.dst_addr)) == 0) {
532                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
533
534                 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
535                                  sizeof(spec->hdr.dst_addr));
536                 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
537                            sizeof(efx_spec->efs_loc_host));
538         } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
539                                      sizeof(mask->hdr.dst_addr))) {
540                 goto fail_bad_mask;
541         }
542
543         if (mask->hdr.proto == supp_mask.hdr.proto) {
544                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
545                 efx_spec->efs_ip_proto = spec->hdr.proto;
546         } else if (mask->hdr.proto != 0) {
547                 goto fail_bad_mask;
548         }
549
550         return 0;
551
552 fail_bad_mask:
553         rte_flow_error_set(error, EINVAL,
554                            RTE_FLOW_ERROR_TYPE_ITEM, item,
555                            "Bad mask in the IPV6 pattern item");
556         return -rte_errno;
557 }
558
559 /**
560  * Convert TCP item to EFX filter specification.
561  *
562  * @param item[in]
563  *   Item specification. Only source and destination ports fields
564  *   are supported. If the mask is NULL, default mask will be used.
565  *   Ranging is not supported.
566  * @param efx_spec[in, out]
567  *   EFX filter specification to update.
568  * @param[out] error
569  *   Perform verbose error reporting if not NULL.
570  */
571 static int
572 sfc_flow_parse_tcp(const struct rte_flow_item *item,
573                    efx_filter_spec_t *efx_spec,
574                    struct rte_flow_error *error)
575 {
576         int rc;
577         const struct rte_flow_item_tcp *spec = NULL;
578         const struct rte_flow_item_tcp *mask = NULL;
579         const struct rte_flow_item_tcp supp_mask = {
580                 .hdr = {
581                         .src_port = 0xffff,
582                         .dst_port = 0xffff,
583                 }
584         };
585
586         rc = sfc_flow_parse_init(item,
587                                  (const void **)&spec,
588                                  (const void **)&mask,
589                                  &supp_mask,
590                                  &rte_flow_item_tcp_mask,
591                                  sizeof(struct rte_flow_item_tcp),
592                                  error);
593         if (rc != 0)
594                 return rc;
595
596         /*
597          * Filtering by TCP source and destination ports requires
598          * the appropriate IP_PROTO in hardware filters
599          */
600         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
601                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
602                 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
603         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
604                 rte_flow_error_set(error, EINVAL,
605                         RTE_FLOW_ERROR_TYPE_ITEM, item,
606                         "IP proto in pattern with TCP item should be appropriate");
607                 return -rte_errno;
608         }
609
610         if (spec == NULL)
611                 return 0;
612
613         /*
614          * Source and destination ports are in big-endian byte order in item and
615          * in little-endian in efx_spec, so byte swap is used
616          */
617         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
618                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
619                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
620         } else if (mask->hdr.src_port != 0) {
621                 goto fail_bad_mask;
622         }
623
624         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
625                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
626                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
627         } else if (mask->hdr.dst_port != 0) {
628                 goto fail_bad_mask;
629         }
630
631         return 0;
632
633 fail_bad_mask:
634         rte_flow_error_set(error, EINVAL,
635                            RTE_FLOW_ERROR_TYPE_ITEM, item,
636                            "Bad mask in the TCP pattern item");
637         return -rte_errno;
638 }
639
640 /**
641  * Convert UDP item to EFX filter specification.
642  *
643  * @param item[in]
644  *   Item specification. Only source and destination ports fields
645  *   are supported. If the mask is NULL, default mask will be used.
646  *   Ranging is not supported.
647  * @param efx_spec[in, out]
648  *   EFX filter specification to update.
649  * @param[out] error
650  *   Perform verbose error reporting if not NULL.
651  */
652 static int
653 sfc_flow_parse_udp(const struct rte_flow_item *item,
654                    efx_filter_spec_t *efx_spec,
655                    struct rte_flow_error *error)
656 {
657         int rc;
658         const struct rte_flow_item_udp *spec = NULL;
659         const struct rte_flow_item_udp *mask = NULL;
660         const struct rte_flow_item_udp supp_mask = {
661                 .hdr = {
662                         .src_port = 0xffff,
663                         .dst_port = 0xffff,
664                 }
665         };
666
667         rc = sfc_flow_parse_init(item,
668                                  (const void **)&spec,
669                                  (const void **)&mask,
670                                  &supp_mask,
671                                  &rte_flow_item_udp_mask,
672                                  sizeof(struct rte_flow_item_udp),
673                                  error);
674         if (rc != 0)
675                 return rc;
676
677         /*
678          * Filtering by UDP source and destination ports requires
679          * the appropriate IP_PROTO in hardware filters
680          */
681         if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
682                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
683                 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
684         } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
685                 rte_flow_error_set(error, EINVAL,
686                         RTE_FLOW_ERROR_TYPE_ITEM, item,
687                         "IP proto in pattern with UDP item should be appropriate");
688                 return -rte_errno;
689         }
690
691         if (spec == NULL)
692                 return 0;
693
694         /*
695          * Source and destination ports are in big-endian byte order in item and
696          * in little-endian in efx_spec, so byte swap is used
697          */
698         if (mask->hdr.src_port == supp_mask.hdr.src_port) {
699                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
700                 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
701         } else if (mask->hdr.src_port != 0) {
702                 goto fail_bad_mask;
703         }
704
705         if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
706                 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
707                 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
708         } else if (mask->hdr.dst_port != 0) {
709                 goto fail_bad_mask;
710         }
711
712         return 0;
713
714 fail_bad_mask:
715         rte_flow_error_set(error, EINVAL,
716                            RTE_FLOW_ERROR_TYPE_ITEM, item,
717                            "Bad mask in the UDP pattern item");
718         return -rte_errno;
719 }
720
721 static const struct sfc_flow_item sfc_flow_items[] = {
722         {
723                 .type = RTE_FLOW_ITEM_TYPE_VOID,
724                 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
725                 .layer = SFC_FLOW_ITEM_ANY_LAYER,
726                 .parse = sfc_flow_parse_void,
727         },
728         {
729                 .type = RTE_FLOW_ITEM_TYPE_ETH,
730                 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
731                 .layer = SFC_FLOW_ITEM_L2,
732                 .parse = sfc_flow_parse_eth,
733         },
734         {
735                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
736                 .prev_layer = SFC_FLOW_ITEM_L2,
737                 .layer = SFC_FLOW_ITEM_L2,
738                 .parse = sfc_flow_parse_vlan,
739         },
740         {
741                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
742                 .prev_layer = SFC_FLOW_ITEM_L2,
743                 .layer = SFC_FLOW_ITEM_L3,
744                 .parse = sfc_flow_parse_ipv4,
745         },
746         {
747                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
748                 .prev_layer = SFC_FLOW_ITEM_L2,
749                 .layer = SFC_FLOW_ITEM_L3,
750                 .parse = sfc_flow_parse_ipv6,
751         },
752         {
753                 .type = RTE_FLOW_ITEM_TYPE_TCP,
754                 .prev_layer = SFC_FLOW_ITEM_L3,
755                 .layer = SFC_FLOW_ITEM_L4,
756                 .parse = sfc_flow_parse_tcp,
757         },
758         {
759                 .type = RTE_FLOW_ITEM_TYPE_UDP,
760                 .prev_layer = SFC_FLOW_ITEM_L3,
761                 .layer = SFC_FLOW_ITEM_L4,
762                 .parse = sfc_flow_parse_udp,
763         },
764 };
765
766 /*
767  * Protocol-independent flow API support
768  */
769 static int
770 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
771                     struct rte_flow *flow,
772                     struct rte_flow_error *error)
773 {
774         if (attr == NULL) {
775                 rte_flow_error_set(error, EINVAL,
776                                    RTE_FLOW_ERROR_TYPE_ATTR, NULL,
777                                    "NULL attribute");
778                 return -rte_errno;
779         }
780         if (attr->group != 0) {
781                 rte_flow_error_set(error, ENOTSUP,
782                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
783                                    "Groups are not supported");
784                 return -rte_errno;
785         }
786         if (attr->priority != 0) {
787                 rte_flow_error_set(error, ENOTSUP,
788                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
789                                    "Priorities are not supported");
790                 return -rte_errno;
791         }
792         if (attr->egress != 0) {
793                 rte_flow_error_set(error, ENOTSUP,
794                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
795                                    "Egress is not supported");
796                 return -rte_errno;
797         }
798         if (attr->ingress == 0) {
799                 rte_flow_error_set(error, ENOTSUP,
800                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
801                                    "Only ingress is supported");
802                 return -rte_errno;
803         }
804
805         flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
806         flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
807
808         return 0;
809 }
810
811 /* Get item from array sfc_flow_items */
812 static const struct sfc_flow_item *
813 sfc_flow_get_item(enum rte_flow_item_type type)
814 {
815         unsigned int i;
816
817         for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
818                 if (sfc_flow_items[i].type == type)
819                         return &sfc_flow_items[i];
820
821         return NULL;
822 }
823
824 static int
825 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
826                        struct rte_flow *flow,
827                        struct rte_flow_error *error)
828 {
829         int rc;
830         unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
831         const struct sfc_flow_item *item;
832
833         if (pattern == NULL) {
834                 rte_flow_error_set(error, EINVAL,
835                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
836                                    "NULL pattern");
837                 return -rte_errno;
838         }
839
840         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
841                 item = sfc_flow_get_item(pattern->type);
842                 if (item == NULL) {
843                         rte_flow_error_set(error, ENOTSUP,
844                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
845                                            "Unsupported pattern item");
846                         return -rte_errno;
847                 }
848
849                 /*
850                  * Omitting one or several protocol layers at the beginning
851                  * of pattern is supported
852                  */
853                 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
854                     prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
855                     item->prev_layer != prev_layer) {
856                         rte_flow_error_set(error, ENOTSUP,
857                                            RTE_FLOW_ERROR_TYPE_ITEM, pattern,
858                                            "Unexpected sequence of pattern items");
859                         return -rte_errno;
860                 }
861
862                 rc = item->parse(pattern, &flow->spec, error);
863                 if (rc != 0)
864                         return rc;
865
866                 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
867                         prev_layer = item->layer;
868         }
869
870         return 0;
871 }
872
873 static int
874 sfc_flow_parse_queue(struct sfc_adapter *sa,
875                      const struct rte_flow_action_queue *queue,
876                      struct rte_flow *flow)
877 {
878         struct sfc_rxq *rxq;
879
880         if (queue->index >= sa->rxq_count)
881                 return -EINVAL;
882
883         rxq = sa->rxq_info[queue->index].rxq;
884         flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
885
886         return 0;
887 }
888
889 #if EFSYS_OPT_RX_SCALE
890 static int
891 sfc_flow_parse_rss(struct sfc_adapter *sa,
892                    const struct rte_flow_action_rss *rss,
893                    struct rte_flow *flow)
894 {
895         unsigned int rxq_sw_index;
896         struct sfc_rxq *rxq;
897         unsigned int rxq_hw_index_min;
898         unsigned int rxq_hw_index_max;
899         const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
900         uint64_t rss_hf;
901         uint8_t *rss_key = NULL;
902         struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
903         unsigned int i;
904
905         if (rss->num == 0)
906                 return -EINVAL;
907
908         rxq_sw_index = sa->rxq_count - 1;
909         rxq = sa->rxq_info[rxq_sw_index].rxq;
910         rxq_hw_index_min = rxq->hw_index;
911         rxq_hw_index_max = 0;
912
913         for (i = 0; i < rss->num; ++i) {
914                 rxq_sw_index = rss->queue[i];
915
916                 if (rxq_sw_index >= sa->rxq_count)
917                         return -EINVAL;
918
919                 rxq = sa->rxq_info[rxq_sw_index].rxq;
920
921                 if (rxq->hw_index < rxq_hw_index_min)
922                         rxq_hw_index_min = rxq->hw_index;
923
924                 if (rxq->hw_index > rxq_hw_index_max)
925                         rxq_hw_index_max = rxq->hw_index;
926         }
927
928         rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
929         if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
930                 return -EINVAL;
931
932         if (rss_conf != NULL) {
933                 if (rss_conf->rss_key_len != sizeof(sa->rss_key))
934                         return -EINVAL;
935
936                 rss_key = rss_conf->rss_key;
937         } else {
938                 rss_key = sa->rss_key;
939         }
940
941         flow->rss = B_TRUE;
942
943         sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
944         sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
945         sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
946         rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
947
948         for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
949                 unsigned int rxq_sw_index = rss->queue[i % rss->num];
950                 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
951
952                 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
953         }
954
955         return 0;
956 }
957 #endif /* EFSYS_OPT_RX_SCALE */
958
959 static int
960 sfc_flow_filter_insert(struct sfc_adapter *sa,
961                        struct rte_flow *flow)
962 {
963         efx_filter_spec_t *spec = &flow->spec;
964
965 #if EFSYS_OPT_RX_SCALE
966         struct sfc_flow_rss *rss = &flow->rss_conf;
967         int rc = 0;
968
969         if (flow->rss) {
970                 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
971                                               rss->rxq_hw_index_min + 1,
972                                               EFX_MAXRSS);
973
974                 rc = efx_rx_scale_context_alloc(sa->nic,
975                                                 EFX_RX_SCALE_EXCLUSIVE,
976                                                 rss_spread,
977                                                 &spec->efs_rss_context);
978                 if (rc != 0)
979                         goto fail_scale_context_alloc;
980
981                 rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context,
982                                            EFX_RX_HASHALG_TOEPLITZ,
983                                            rss->rss_hash_types, B_TRUE);
984                 if (rc != 0)
985                         goto fail_scale_mode_set;
986
987                 rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context,
988                                           rss->rss_key,
989                                           sizeof(sa->rss_key));
990                 if (rc != 0)
991                         goto fail_scale_key_set;
992
993                 spec->efs_dmaq_id = rss->rxq_hw_index_min;
994                 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
995         }
996
997         rc = efx_filter_insert(sa->nic, spec);
998         if (rc != 0)
999                 goto fail_filter_insert;
1000
1001         if (flow->rss) {
1002                 /*
1003                  * Scale table is set after filter insertion because
1004                  * the table entries are relative to the base RxQ ID
1005                  * and the latter is submitted to the HW by means of
1006                  * inserting a filter, so by the time of the request
1007                  * the HW knows all the information needed to verify
1008                  * the table entries, and the operation will succeed
1009                  */
1010                 rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context,
1011                                           rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1012                 if (rc != 0)
1013                         goto fail_scale_tbl_set;
1014         }
1015
1016         return 0;
1017
1018 fail_scale_tbl_set:
1019         efx_filter_remove(sa->nic, spec);
1020
1021 fail_filter_insert:
1022 fail_scale_key_set:
1023 fail_scale_mode_set:
1024         if (flow->rss)
1025                 efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1026
1027 fail_scale_context_alloc:
1028         return rc;
1029 #else /* !EFSYS_OPT_RX_SCALE */
1030         return efx_filter_insert(sa->nic, spec);
1031 #endif /* EFSYS_OPT_RX_SCALE */
1032 }
1033
1034 static int
1035 sfc_flow_filter_remove(struct sfc_adapter *sa,
1036                        struct rte_flow *flow)
1037 {
1038         efx_filter_spec_t *spec = &flow->spec;
1039         int rc = 0;
1040
1041         rc = efx_filter_remove(sa->nic, spec);
1042         if (rc != 0)
1043                 return rc;
1044
1045 #if EFSYS_OPT_RX_SCALE
1046         if (flow->rss)
1047                 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1048 #endif /* EFSYS_OPT_RX_SCALE */
1049
1050         return rc;
1051 }
1052
1053 static int
1054 sfc_flow_parse_actions(struct sfc_adapter *sa,
1055                        const struct rte_flow_action actions[],
1056                        struct rte_flow *flow,
1057                        struct rte_flow_error *error)
1058 {
1059         int rc;
1060         boolean_t is_specified = B_FALSE;
1061
1062         if (actions == NULL) {
1063                 rte_flow_error_set(error, EINVAL,
1064                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1065                                    "NULL actions");
1066                 return -rte_errno;
1067         }
1068
1069         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1070                 switch (actions->type) {
1071                 case RTE_FLOW_ACTION_TYPE_VOID:
1072                         break;
1073
1074                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1075                         rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1076                         if (rc != 0) {
1077                                 rte_flow_error_set(error, EINVAL,
1078                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1079                                         "Bad QUEUE action");
1080                                 return -rte_errno;
1081                         }
1082
1083                         is_specified = B_TRUE;
1084                         break;
1085
1086 #if EFSYS_OPT_RX_SCALE
1087                 case RTE_FLOW_ACTION_TYPE_RSS:
1088                         rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1089                         if (rc != 0) {
1090                                 rte_flow_error_set(error, rc,
1091                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
1092                                         "Bad RSS action");
1093                                 return -rte_errno;
1094                         }
1095
1096                         is_specified = B_TRUE;
1097                         break;
1098 #endif /* EFSYS_OPT_RX_SCALE */
1099
1100                 default:
1101                         rte_flow_error_set(error, ENOTSUP,
1102                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1103                                            "Action is not supported");
1104                         return -rte_errno;
1105                 }
1106         }
1107
1108         if (!is_specified) {
1109                 rte_flow_error_set(error, EINVAL,
1110                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
1111                                    "Action is unspecified");
1112                 return -rte_errno;
1113         }
1114
1115         return 0;
1116 }
1117
1118 static int
1119 sfc_flow_parse(struct rte_eth_dev *dev,
1120                const struct rte_flow_attr *attr,
1121                const struct rte_flow_item pattern[],
1122                const struct rte_flow_action actions[],
1123                struct rte_flow *flow,
1124                struct rte_flow_error *error)
1125 {
1126         struct sfc_adapter *sa = dev->data->dev_private;
1127         int rc;
1128
1129         rc = sfc_flow_parse_attr(attr, flow, error);
1130         if (rc != 0)
1131                 goto fail_bad_value;
1132
1133         rc = sfc_flow_parse_pattern(pattern, flow, error);
1134         if (rc != 0)
1135                 goto fail_bad_value;
1136
1137         rc = sfc_flow_parse_actions(sa, actions, flow, error);
1138         if (rc != 0)
1139                 goto fail_bad_value;
1140
1141         if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
1142                 rte_flow_error_set(error, ENOTSUP,
1143                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1144                                    "Flow rule pattern is not supported");
1145                 return -rte_errno;
1146         }
1147
1148 fail_bad_value:
1149         return rc;
1150 }
1151
1152 static int
1153 sfc_flow_validate(struct rte_eth_dev *dev,
1154                   const struct rte_flow_attr *attr,
1155                   const struct rte_flow_item pattern[],
1156                   const struct rte_flow_action actions[],
1157                   struct rte_flow_error *error)
1158 {
1159         struct rte_flow flow;
1160
1161         memset(&flow, 0, sizeof(flow));
1162
1163         return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
1164 }
1165
1166 static struct rte_flow *
1167 sfc_flow_create(struct rte_eth_dev *dev,
1168                 const struct rte_flow_attr *attr,
1169                 const struct rte_flow_item pattern[],
1170                 const struct rte_flow_action actions[],
1171                 struct rte_flow_error *error)
1172 {
1173         struct sfc_adapter *sa = dev->data->dev_private;
1174         struct rte_flow *flow = NULL;
1175         int rc;
1176
1177         flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
1178         if (flow == NULL) {
1179                 rte_flow_error_set(error, ENOMEM,
1180                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1181                                    "Failed to allocate memory");
1182                 goto fail_no_mem;
1183         }
1184
1185         rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
1186         if (rc != 0)
1187                 goto fail_bad_value;
1188
1189         TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
1190
1191         sfc_adapter_lock(sa);
1192
1193         if (sa->state == SFC_ADAPTER_STARTED) {
1194                 rc = sfc_flow_filter_insert(sa, flow);
1195                 if (rc != 0) {
1196                         rte_flow_error_set(error, rc,
1197                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1198                                 "Failed to insert filter");
1199                         goto fail_filter_insert;
1200                 }
1201         }
1202
1203         sfc_adapter_unlock(sa);
1204
1205         return flow;
1206
1207 fail_filter_insert:
1208         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1209
1210 fail_bad_value:
1211         rte_free(flow);
1212         sfc_adapter_unlock(sa);
1213
1214 fail_no_mem:
1215         return NULL;
1216 }
1217
1218 static int
1219 sfc_flow_remove(struct sfc_adapter *sa,
1220                 struct rte_flow *flow,
1221                 struct rte_flow_error *error)
1222 {
1223         int rc = 0;
1224
1225         SFC_ASSERT(sfc_adapter_is_locked(sa));
1226
1227         if (sa->state == SFC_ADAPTER_STARTED) {
1228                 rc = sfc_flow_filter_remove(sa, flow);
1229                 if (rc != 0)
1230                         rte_flow_error_set(error, rc,
1231                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1232                                 "Failed to destroy flow rule");
1233         }
1234
1235         TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1236         rte_free(flow);
1237
1238         return rc;
1239 }
1240
1241 static int
1242 sfc_flow_destroy(struct rte_eth_dev *dev,
1243                  struct rte_flow *flow,
1244                  struct rte_flow_error *error)
1245 {
1246         struct sfc_adapter *sa = dev->data->dev_private;
1247         struct rte_flow *flow_ptr;
1248         int rc = EINVAL;
1249
1250         sfc_adapter_lock(sa);
1251
1252         TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
1253                 if (flow_ptr == flow)
1254                         rc = 0;
1255         }
1256         if (rc != 0) {
1257                 rte_flow_error_set(error, rc,
1258                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1259                                    "Failed to find flow rule to destroy");
1260                 goto fail_bad_value;
1261         }
1262
1263         rc = sfc_flow_remove(sa, flow, error);
1264
1265 fail_bad_value:
1266         sfc_adapter_unlock(sa);
1267
1268         return -rc;
1269 }
1270
1271 static int
1272 sfc_flow_flush(struct rte_eth_dev *dev,
1273                struct rte_flow_error *error)
1274 {
1275         struct sfc_adapter *sa = dev->data->dev_private;
1276         struct rte_flow *flow;
1277         int rc = 0;
1278         int ret = 0;
1279
1280         sfc_adapter_lock(sa);
1281
1282         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1283                 rc = sfc_flow_remove(sa, flow, error);
1284                 if (rc != 0)
1285                         ret = rc;
1286         }
1287
1288         sfc_adapter_unlock(sa);
1289
1290         return -ret;
1291 }
1292
1293 static int
1294 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
1295                  struct rte_flow_error *error)
1296 {
1297         struct sfc_adapter *sa = dev->data->dev_private;
1298         struct sfc_port *port = &sa->port;
1299         int ret = 0;
1300
1301         sfc_adapter_lock(sa);
1302         if (sa->state != SFC_ADAPTER_INITIALIZED) {
1303                 rte_flow_error_set(error, EBUSY,
1304                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1305                                    NULL, "please close the port first");
1306                 ret = -rte_errno;
1307         } else {
1308                 port->isolated = (enable) ? B_TRUE : B_FALSE;
1309         }
1310         sfc_adapter_unlock(sa);
1311
1312         return ret;
1313 }
1314
1315 const struct rte_flow_ops sfc_flow_ops = {
1316         .validate = sfc_flow_validate,
1317         .create = sfc_flow_create,
1318         .destroy = sfc_flow_destroy,
1319         .flush = sfc_flow_flush,
1320         .query = NULL,
1321         .isolate = sfc_flow_isolate,
1322 };
1323
1324 void
1325 sfc_flow_init(struct sfc_adapter *sa)
1326 {
1327         SFC_ASSERT(sfc_adapter_is_locked(sa));
1328
1329         TAILQ_INIT(&sa->filter.flow_list);
1330 }
1331
1332 void
1333 sfc_flow_fini(struct sfc_adapter *sa)
1334 {
1335         struct rte_flow *flow;
1336
1337         SFC_ASSERT(sfc_adapter_is_locked(sa));
1338
1339         while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1340                 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1341                 rte_free(flow);
1342         }
1343 }
1344
1345 void
1346 sfc_flow_stop(struct sfc_adapter *sa)
1347 {
1348         struct rte_flow *flow;
1349
1350         SFC_ASSERT(sfc_adapter_is_locked(sa));
1351
1352         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
1353                 sfc_flow_filter_remove(sa, flow);
1354 }
1355
1356 int
1357 sfc_flow_start(struct sfc_adapter *sa)
1358 {
1359         struct rte_flow *flow;
1360         int rc = 0;
1361
1362         sfc_log_init(sa, "entry");
1363
1364         SFC_ASSERT(sfc_adapter_is_locked(sa));
1365
1366         TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
1367                 rc = sfc_flow_filter_insert(sa, flow);
1368                 if (rc != 0)
1369                         goto fail_bad_flow;
1370         }
1371
1372         sfc_log_init(sa, "done");
1373
1374 fail_bad_flow:
1375         return rc;
1376 }