New upstream version 17.11-rc3
[deb_dpdk.git] / lib / librte_flow_classify / rte_flow_classify_parse.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_flow_classify.h>
35 #include "rte_flow_classify_parse.h"
36 #include <rte_flow_driver.h>
37
38 struct classify_valid_pattern {
39         enum rte_flow_item_type *items;
40         parse_filter_t parse_filter;
41 };
42
43 static struct rte_flow_action action;
44
45 /* Pattern for IPv4 5-tuple UDP filter */
46 static enum rte_flow_item_type pattern_ntuple_1[] = {
47         RTE_FLOW_ITEM_TYPE_ETH,
48         RTE_FLOW_ITEM_TYPE_IPV4,
49         RTE_FLOW_ITEM_TYPE_UDP,
50         RTE_FLOW_ITEM_TYPE_END,
51 };
52
53 /* Pattern for IPv4 5-tuple TCP filter */
54 static enum rte_flow_item_type pattern_ntuple_2[] = {
55         RTE_FLOW_ITEM_TYPE_ETH,
56         RTE_FLOW_ITEM_TYPE_IPV4,
57         RTE_FLOW_ITEM_TYPE_TCP,
58         RTE_FLOW_ITEM_TYPE_END,
59 };
60
61 /* Pattern for IPv4 5-tuple SCTP filter */
62 static enum rte_flow_item_type pattern_ntuple_3[] = {
63         RTE_FLOW_ITEM_TYPE_ETH,
64         RTE_FLOW_ITEM_TYPE_IPV4,
65         RTE_FLOW_ITEM_TYPE_SCTP,
66         RTE_FLOW_ITEM_TYPE_END,
67 };
68
69 static int
70 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
71                          const struct rte_flow_item pattern[],
72                          const struct rte_flow_action actions[],
73                          struct rte_eth_ntuple_filter *filter,
74                          struct rte_flow_error *error);
75
76 static struct classify_valid_pattern classify_supported_patterns[] = {
77         /* ntuple */
78         { pattern_ntuple_1, classify_parse_ntuple_filter },
79         { pattern_ntuple_2, classify_parse_ntuple_filter },
80         { pattern_ntuple_3, classify_parse_ntuple_filter },
81 };
82
83 struct rte_flow_action *
84 classify_get_flow_action(void)
85 {
86         return &action;
87 }
88
89 /* Find the first VOID or non-VOID item pointer */
90 const struct rte_flow_item *
91 classify_find_first_item(const struct rte_flow_item *item, bool is_void)
92 {
93         bool is_find;
94
95         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
96                 if (is_void)
97                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
98                 else
99                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
100                 if (is_find)
101                         break;
102                 item++;
103         }
104         return item;
105 }
106
107 /* Skip all VOID items of the pattern */
108 void
109 classify_pattern_skip_void_item(struct rte_flow_item *items,
110                             const struct rte_flow_item *pattern)
111 {
112         uint32_t cpy_count = 0;
113         const struct rte_flow_item *pb = pattern, *pe = pattern;
114
115         for (;;) {
116                 /* Find a non-void item first */
117                 pb = classify_find_first_item(pb, false);
118                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
119                         pe = pb;
120                         break;
121                 }
122
123                 /* Find a void item */
124                 pe = classify_find_first_item(pb + 1, true);
125
126                 cpy_count = pe - pb;
127                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
128
129                 items += cpy_count;
130
131                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
132                         pb = pe;
133                         break;
134                 }
135
136                 pb = pe + 1;
137         }
138         /* Copy the END item. */
139         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
140 }
141
142 /* Check if the pattern matches a supported item type array */
143 static bool
144 classify_match_pattern(enum rte_flow_item_type *item_array,
145                    struct rte_flow_item *pattern)
146 {
147         struct rte_flow_item *item = pattern;
148
149         while ((*item_array == item->type) &&
150                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
151                 item_array++;
152                 item++;
153         }
154
155         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
156                 item->type == RTE_FLOW_ITEM_TYPE_END);
157 }
158
159 /* Find if there's parse filter function matched */
160 parse_filter_t
161 classify_find_parse_filter_func(struct rte_flow_item *pattern)
162 {
163         parse_filter_t parse_filter = NULL;
164         uint8_t i = 0;
165
166         for (; i < RTE_DIM(classify_supported_patterns); i++) {
167                 if (classify_match_pattern(classify_supported_patterns[i].items,
168                                         pattern)) {
169                         parse_filter =
170                                 classify_supported_patterns[i].parse_filter;
171                         break;
172                 }
173         }
174
175         return parse_filter;
176 }
177
178 #define FLOW_RULE_MIN_PRIORITY 8
179 #define FLOW_RULE_MAX_PRIORITY 0
180
181 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
182         do {\
183                 item = pattern + index;\
184                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
185                         index++;\
186                         item = pattern + index;\
187                 } \
188         } while (0)
189
190 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
191         do {\
192                 act = actions + index;\
193                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
194                         index++;\
195                         act = actions + index;\
196                 } \
197         } while (0)
198
199 /**
200  * Please aware there's an assumption for all the parsers.
201  * rte_flow_item is using big endian, rte_flow_attr and
202  * rte_flow_action are using CPU order.
203  * Because the pattern is used to describe the packets,
204  * normally the packets should use network order.
205  */
206
207 /**
208  * Parse the rule to see if it is a n-tuple rule.
209  * And get the n-tuple filter info BTW.
210  * pattern:
211  * The first not void item can be ETH or IPV4.
212  * The second not void item must be IPV4 if the first one is ETH.
213  * The third not void item must be UDP or TCP.
214  * The next not void item must be END.
215  * action:
216  * The first not void action should be QUEUE.
217  * The next not void action should be END.
218  * pattern example:
219  * ITEM         Spec                    Mask
220  * ETH          NULL                    NULL
221  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
222  *                      dst_addr 192.167.3.50   0xFFFFFFFF
223  *                      next_proto_id   17      0xFF
224  * UDP/TCP/     src_port        80      0xFFFF
225  * SCTP         dst_port        80      0xFFFF
226  * END
227  * other members in mask and spec should set to 0x00.
228  * item->last should be NULL.
229  */
230 static int
231 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
232                          const struct rte_flow_item pattern[],
233                          const struct rte_flow_action actions[],
234                          struct rte_eth_ntuple_filter *filter,
235                          struct rte_flow_error *error)
236 {
237         const struct rte_flow_item *item;
238         const struct rte_flow_action *act;
239         const struct rte_flow_item_ipv4 *ipv4_spec;
240         const struct rte_flow_item_ipv4 *ipv4_mask;
241         const struct rte_flow_item_tcp *tcp_spec;
242         const struct rte_flow_item_tcp *tcp_mask;
243         const struct rte_flow_item_udp *udp_spec;
244         const struct rte_flow_item_udp *udp_mask;
245         const struct rte_flow_item_sctp *sctp_spec;
246         const struct rte_flow_item_sctp *sctp_mask;
247         uint32_t index;
248
249         if (!pattern) {
250                 rte_flow_error_set(error,
251                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
252                         NULL, "NULL pattern.");
253                 return -EINVAL;
254         }
255
256         if (!actions) {
257                 rte_flow_error_set(error, EINVAL,
258                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
259                                    NULL, "NULL action.");
260                 return -EINVAL;
261         }
262         if (!attr) {
263                 rte_flow_error_set(error, EINVAL,
264                                    RTE_FLOW_ERROR_TYPE_ATTR,
265                                    NULL, "NULL attribute.");
266                 return -EINVAL;
267         }
268
269         /* parse pattern */
270         index = 0;
271
272         /* the first not void item can be MAC or IPv4 */
273         NEXT_ITEM_OF_PATTERN(item, pattern, index);
274
275         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
276             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
277                 rte_flow_error_set(error, EINVAL,
278                         RTE_FLOW_ERROR_TYPE_ITEM,
279                         item, "Not supported by ntuple filter");
280                 return -EINVAL;
281         }
282         /* Skip Ethernet */
283         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
284                 /*Not supported last point for range*/
285                 if (item->last) {
286                         rte_flow_error_set(error, EINVAL,
287                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
288                                         item,
289                                         "Not supported last point for range");
290                         return -EINVAL;
291
292                 }
293                 /* if the first item is MAC, the content should be NULL */
294                 if (item->spec || item->mask) {
295                         rte_flow_error_set(error, EINVAL,
296                                         RTE_FLOW_ERROR_TYPE_ITEM,
297                                         item,
298                                         "Not supported by ntuple filter");
299                         return -EINVAL;
300                 }
301                 /* check if the next not void item is IPv4 */
302                 index++;
303                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
304                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
305                         rte_flow_error_set(error, EINVAL,
306                                         RTE_FLOW_ERROR_TYPE_ITEM,
307                                         item,
308                                         "Not supported by ntuple filter");
309                         return -EINVAL;
310                 }
311         }
312
313         /* get the IPv4 info */
314         if (!item->spec || !item->mask) {
315                 rte_flow_error_set(error, EINVAL,
316                         RTE_FLOW_ERROR_TYPE_ITEM,
317                         item, "Invalid ntuple mask");
318                 return -EINVAL;
319         }
320         /*Not supported last point for range*/
321         if (item->last) {
322                 rte_flow_error_set(error, EINVAL,
323                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
324                         item, "Not supported last point for range");
325                 return -EINVAL;
326
327         }
328
329         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
330         /**
331          * Only support src & dst addresses, protocol,
332          * others should be masked.
333          */
334         if (ipv4_mask->hdr.version_ihl ||
335                 ipv4_mask->hdr.type_of_service ||
336                 ipv4_mask->hdr.total_length ||
337                 ipv4_mask->hdr.packet_id ||
338                 ipv4_mask->hdr.fragment_offset ||
339                 ipv4_mask->hdr.time_to_live ||
340                 ipv4_mask->hdr.hdr_checksum) {
341                 rte_flow_error_set(error,
342                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
343                         item, "Not supported by ntuple filter");
344                 return -EINVAL;
345         }
346
347         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
348         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
349         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
350
351         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
352         filter->dst_ip = ipv4_spec->hdr.dst_addr;
353         filter->src_ip = ipv4_spec->hdr.src_addr;
354         filter->proto  = ipv4_spec->hdr.next_proto_id;
355
356         /* check if the next not void item is TCP or UDP or SCTP */
357         index++;
358         NEXT_ITEM_OF_PATTERN(item, pattern, index);
359         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
360             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
361             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
362                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
363                 rte_flow_error_set(error, EINVAL,
364                         RTE_FLOW_ERROR_TYPE_ITEM,
365                         item, "Not supported by ntuple filter");
366                 return -EINVAL;
367         }
368
369         /* get the TCP/UDP info */
370         if (!item->spec || !item->mask) {
371                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
372                 rte_flow_error_set(error, EINVAL,
373                         RTE_FLOW_ERROR_TYPE_ITEM,
374                         item, "Invalid ntuple mask");
375                 return -EINVAL;
376         }
377
378         /*Not supported last point for range*/
379         if (item->last) {
380                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
381                 rte_flow_error_set(error, EINVAL,
382                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
383                         item, "Not supported last point for range");
384                 return -EINVAL;
385
386         }
387
388         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
389                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
390
391                 /**
392                  * Only support src & dst ports, tcp flags,
393                  * others should be masked.
394                  */
395                 if (tcp_mask->hdr.sent_seq ||
396                     tcp_mask->hdr.recv_ack ||
397                     tcp_mask->hdr.data_off ||
398                     tcp_mask->hdr.rx_win ||
399                     tcp_mask->hdr.cksum ||
400                     tcp_mask->hdr.tcp_urp) {
401                         memset(filter, 0,
402                                 sizeof(struct rte_eth_ntuple_filter));
403                         rte_flow_error_set(error, EINVAL,
404                                 RTE_FLOW_ERROR_TYPE_ITEM,
405                                 item, "Not supported by ntuple filter");
406                         return -EINVAL;
407                 }
408
409                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
410                 filter->src_port_mask  = tcp_mask->hdr.src_port;
411                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
412                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
413                 } else if (!tcp_mask->hdr.tcp_flags) {
414                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
415                 } else {
416                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417                         rte_flow_error_set(error, EINVAL,
418                                 RTE_FLOW_ERROR_TYPE_ITEM,
419                                 item, "Not supported by ntuple filter");
420                         return -EINVAL;
421                 }
422
423                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
424                 filter->dst_port  = tcp_spec->hdr.dst_port;
425                 filter->src_port  = tcp_spec->hdr.src_port;
426                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
427         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
428                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
429
430                 /**
431                  * Only support src & dst ports,
432                  * others should be masked.
433                  */
434                 if (udp_mask->hdr.dgram_len ||
435                     udp_mask->hdr.dgram_cksum) {
436                         memset(filter, 0,
437                                 sizeof(struct rte_eth_ntuple_filter));
438                         rte_flow_error_set(error, EINVAL,
439                                 RTE_FLOW_ERROR_TYPE_ITEM,
440                                 item, "Not supported by ntuple filter");
441                         return -EINVAL;
442                 }
443
444                 filter->dst_port_mask = udp_mask->hdr.dst_port;
445                 filter->src_port_mask = udp_mask->hdr.src_port;
446
447                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
448                 filter->dst_port = udp_spec->hdr.dst_port;
449                 filter->src_port = udp_spec->hdr.src_port;
450         } else {
451                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
452
453                 /**
454                  * Only support src & dst ports,
455                  * others should be masked.
456                  */
457                 if (sctp_mask->hdr.tag ||
458                     sctp_mask->hdr.cksum) {
459                         memset(filter, 0,
460                                 sizeof(struct rte_eth_ntuple_filter));
461                         rte_flow_error_set(error, EINVAL,
462                                 RTE_FLOW_ERROR_TYPE_ITEM,
463                                 item, "Not supported by ntuple filter");
464                         return -EINVAL;
465                 }
466
467                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
468                 filter->src_port_mask = sctp_mask->hdr.src_port;
469
470                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
471                 filter->dst_port = sctp_spec->hdr.dst_port;
472                 filter->src_port = sctp_spec->hdr.src_port;
473         }
474
475         /* check if the next not void item is END */
476         index++;
477         NEXT_ITEM_OF_PATTERN(item, pattern, index);
478         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
479                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
480                 rte_flow_error_set(error, EINVAL,
481                         RTE_FLOW_ERROR_TYPE_ITEM,
482                         item, "Not supported by ntuple filter");
483                 return -EINVAL;
484         }
485
486         /* parse action */
487         index = 0;
488
489         /**
490          * n-tuple only supports count,
491          * check if the first not void action is COUNT.
492          */
493         memset(&action, 0, sizeof(action));
494         NEXT_ITEM_OF_ACTION(act, actions, index);
495         if (act->type != RTE_FLOW_ACTION_TYPE_COUNT) {
496                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
497                 rte_flow_error_set(error, EINVAL,
498                         RTE_FLOW_ERROR_TYPE_ACTION,
499                         item, "Not supported action.");
500                 return -EINVAL;
501         }
502         action.type = RTE_FLOW_ACTION_TYPE_COUNT;
503
504         /* check if the next not void item is END */
505         index++;
506         NEXT_ITEM_OF_ACTION(act, actions, index);
507         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
508                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
509                 rte_flow_error_set(error, EINVAL,
510                         RTE_FLOW_ERROR_TYPE_ACTION,
511                         act, "Not supported action.");
512                 return -EINVAL;
513         }
514
515         /* parse attr */
516         /* must be input direction */
517         if (!attr->ingress) {
518                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
519                 rte_flow_error_set(error, EINVAL,
520                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
521                                    attr, "Only support ingress.");
522                 return -EINVAL;
523         }
524
525         /* not supported */
526         if (attr->egress) {
527                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
528                 rte_flow_error_set(error, EINVAL,
529                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
530                                    attr, "Not support egress.");
531                 return -EINVAL;
532         }
533
534         if (attr->priority > 0xFFFF) {
535                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
536                 rte_flow_error_set(error, EINVAL,
537                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
538                                    attr, "Error priority.");
539                 return -EINVAL;
540         }
541         filter->priority = (uint16_t)attr->priority;
542         if (attr->priority >  FLOW_RULE_MIN_PRIORITY)
543                 filter->priority = FLOW_RULE_MAX_PRIORITY;
544
545         return 0;
546 }