New upstream version 18.08
[deb_dpdk.git] / lib / librte_flow_classify / rte_flow_classify_parse.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <rte_flow_classify.h>
6 #include "rte_flow_classify_parse.h"
7 #include <rte_flow_driver.h>
8
9 struct classify_valid_pattern {
10         enum rte_flow_item_type *items;
11         parse_filter_t parse_filter;
12 };
13
14 static struct classify_action action;
15
16 /* Pattern for IPv4 5-tuple UDP filter */
17 static enum rte_flow_item_type pattern_ntuple_1[] = {
18         RTE_FLOW_ITEM_TYPE_ETH,
19         RTE_FLOW_ITEM_TYPE_IPV4,
20         RTE_FLOW_ITEM_TYPE_UDP,
21         RTE_FLOW_ITEM_TYPE_END,
22 };
23
24 /* Pattern for IPv4 5-tuple TCP filter */
25 static enum rte_flow_item_type pattern_ntuple_2[] = {
26         RTE_FLOW_ITEM_TYPE_ETH,
27         RTE_FLOW_ITEM_TYPE_IPV4,
28         RTE_FLOW_ITEM_TYPE_TCP,
29         RTE_FLOW_ITEM_TYPE_END,
30 };
31
32 /* Pattern for IPv4 5-tuple SCTP filter */
33 static enum rte_flow_item_type pattern_ntuple_3[] = {
34         RTE_FLOW_ITEM_TYPE_ETH,
35         RTE_FLOW_ITEM_TYPE_IPV4,
36         RTE_FLOW_ITEM_TYPE_SCTP,
37         RTE_FLOW_ITEM_TYPE_END,
38 };
39
40 static int
41 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
42                          const struct rte_flow_item pattern[],
43                          const struct rte_flow_action actions[],
44                          struct rte_eth_ntuple_filter *filter,
45                          struct rte_flow_error *error);
46
47 static struct classify_valid_pattern classify_supported_patterns[] = {
48         /* ntuple */
49         { pattern_ntuple_1, classify_parse_ntuple_filter },
50         { pattern_ntuple_2, classify_parse_ntuple_filter },
51         { pattern_ntuple_3, classify_parse_ntuple_filter },
52 };
53
54 struct classify_action *
55 classify_get_flow_action(void)
56 {
57         return &action;
58 }
59
60 /* Find the first VOID or non-VOID item pointer */
61 const struct rte_flow_item *
62 classify_find_first_item(const struct rte_flow_item *item, bool is_void)
63 {
64         bool is_find;
65
66         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
67                 if (is_void)
68                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
69                 else
70                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
71                 if (is_find)
72                         break;
73                 item++;
74         }
75         return item;
76 }
77
78 /* Skip all VOID items of the pattern */
79 void
80 classify_pattern_skip_void_item(struct rte_flow_item *items,
81                             const struct rte_flow_item *pattern)
82 {
83         uint32_t cpy_count = 0;
84         const struct rte_flow_item *pb = pattern, *pe = pattern;
85
86         for (;;) {
87                 /* Find a non-void item first */
88                 pb = classify_find_first_item(pb, false);
89                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
90                         pe = pb;
91                         break;
92                 }
93
94                 /* Find a void item */
95                 pe = classify_find_first_item(pb + 1, true);
96
97                 cpy_count = pe - pb;
98                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
99
100                 items += cpy_count;
101
102                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
103                         pb = pe;
104                         break;
105                 }
106
107                 pb = pe + 1;
108         }
109         /* Copy the END item. */
110         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
111 }
112
113 /* Check if the pattern matches a supported item type array */
114 static bool
115 classify_match_pattern(enum rte_flow_item_type *item_array,
116                    struct rte_flow_item *pattern)
117 {
118         struct rte_flow_item *item = pattern;
119
120         while ((*item_array == item->type) &&
121                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
122                 item_array++;
123                 item++;
124         }
125
126         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
127                 item->type == RTE_FLOW_ITEM_TYPE_END);
128 }
129
130 /* Find if there's parse filter function matched */
131 parse_filter_t
132 classify_find_parse_filter_func(struct rte_flow_item *pattern)
133 {
134         parse_filter_t parse_filter = NULL;
135         uint8_t i = 0;
136
137         for (; i < RTE_DIM(classify_supported_patterns); i++) {
138                 if (classify_match_pattern(classify_supported_patterns[i].items,
139                                         pattern)) {
140                         parse_filter =
141                                 classify_supported_patterns[i].parse_filter;
142                         break;
143                 }
144         }
145
146         return parse_filter;
147 }
148
149 #define FLOW_RULE_MIN_PRIORITY 8
150 #define FLOW_RULE_MAX_PRIORITY 0
151
152 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
153         do {\
154                 item = pattern + index;\
155                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
156                         index++;\
157                         item = pattern + index;\
158                 } \
159         } while (0)
160
161 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
162         do {\
163                 act = actions + index;\
164                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
165                         index++;\
166                         act = actions + index;\
167                 } \
168         } while (0)
169
170 /**
171  * Please aware there's an assumption for all the parsers.
172  * rte_flow_item is using big endian, rte_flow_attr and
173  * rte_flow_action are using CPU order.
174  * Because the pattern is used to describe the packets,
175  * normally the packets should use network order.
176  */
177
178 /**
179  * Parse the rule to see if it is a n-tuple rule.
180  * And get the n-tuple filter info BTW.
181  * pattern:
182  * The first not void item can be ETH or IPV4.
183  * The second not void item must be IPV4 if the first one is ETH.
184  * The third not void item must be UDP or TCP.
185  * The next not void item must be END.
186  * action:
187  * The first not void action should be QUEUE.
188  * The next not void action should be END.
189  * pattern example:
190  * ITEM         Spec                    Mask
191  * ETH          NULL                    NULL
192  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
193  *                      dst_addr 192.167.3.50   0xFFFFFFFF
194  *                      next_proto_id   17      0xFF
195  * UDP/TCP/     src_port        80      0xFFFF
196  * SCTP         dst_port        80      0xFFFF
197  * END
198  * other members in mask and spec should set to 0x00.
199  * item->last should be NULL.
200  */
201 static int
202 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
203                          const struct rte_flow_item pattern[],
204                          const struct rte_flow_action actions[],
205                          struct rte_eth_ntuple_filter *filter,
206                          struct rte_flow_error *error)
207 {
208         const struct rte_flow_item *item;
209         const struct rte_flow_action *act;
210         const struct rte_flow_item_ipv4 *ipv4_spec;
211         const struct rte_flow_item_ipv4 *ipv4_mask;
212         const struct rte_flow_item_tcp *tcp_spec;
213         const struct rte_flow_item_tcp *tcp_mask;
214         const struct rte_flow_item_udp *udp_spec;
215         const struct rte_flow_item_udp *udp_mask;
216         const struct rte_flow_item_sctp *sctp_spec;
217         const struct rte_flow_item_sctp *sctp_mask;
218         const struct rte_flow_action_count *count;
219         const struct rte_flow_action_mark *mark_spec;
220         uint32_t index;
221
222         /* parse pattern */
223         index = 0;
224
225         /* the first not void item can be MAC or IPv4 */
226         NEXT_ITEM_OF_PATTERN(item, pattern, index);
227
228         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
229             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
230                 rte_flow_error_set(error, EINVAL,
231                         RTE_FLOW_ERROR_TYPE_ITEM,
232                         item, "Not supported by ntuple filter");
233                 return -EINVAL;
234         }
235         /* Skip Ethernet */
236         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
237                 /*Not supported last point for range*/
238                 if (item->last) {
239                         rte_flow_error_set(error, EINVAL,
240                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
241                                         item,
242                                         "Not supported last point for range");
243                         return -EINVAL;
244
245                 }
246                 /* if the first item is MAC, the content should be NULL */
247                 if (item->spec || item->mask) {
248                         rte_flow_error_set(error, EINVAL,
249                                         RTE_FLOW_ERROR_TYPE_ITEM,
250                                         item,
251                                         "Not supported by ntuple filter");
252                         return -EINVAL;
253                 }
254                 /* check if the next not void item is IPv4 */
255                 index++;
256                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
257                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
258                         rte_flow_error_set(error, EINVAL,
259                                         RTE_FLOW_ERROR_TYPE_ITEM,
260                                         item,
261                                         "Not supported by ntuple filter");
262                         return -EINVAL;
263                 }
264         }
265
266         /* get the IPv4 info */
267         if (!item->spec || !item->mask) {
268                 rte_flow_error_set(error, EINVAL,
269                         RTE_FLOW_ERROR_TYPE_ITEM,
270                         item, "Invalid ntuple mask");
271                 return -EINVAL;
272         }
273         /*Not supported last point for range*/
274         if (item->last) {
275                 rte_flow_error_set(error, EINVAL,
276                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
277                         item, "Not supported last point for range");
278                 return -EINVAL;
279
280         }
281
282         ipv4_mask = item->mask;
283         /**
284          * Only support src & dst addresses, protocol,
285          * others should be masked.
286          */
287         if (ipv4_mask->hdr.version_ihl ||
288                 ipv4_mask->hdr.type_of_service ||
289                 ipv4_mask->hdr.total_length ||
290                 ipv4_mask->hdr.packet_id ||
291                 ipv4_mask->hdr.fragment_offset ||
292                 ipv4_mask->hdr.time_to_live ||
293                 ipv4_mask->hdr.hdr_checksum) {
294                 rte_flow_error_set(error,
295                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
296                         item, "Not supported by ntuple filter");
297                 return -EINVAL;
298         }
299
300         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
301         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
302         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
303
304         ipv4_spec = item->spec;
305         filter->dst_ip = ipv4_spec->hdr.dst_addr;
306         filter->src_ip = ipv4_spec->hdr.src_addr;
307         filter->proto  = ipv4_spec->hdr.next_proto_id;
308
309         /* check if the next not void item is TCP or UDP or SCTP */
310         index++;
311         NEXT_ITEM_OF_PATTERN(item, pattern, index);
312         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
313             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
314             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
315                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
316                 rte_flow_error_set(error, EINVAL,
317                         RTE_FLOW_ERROR_TYPE_ITEM,
318                         item, "Not supported by ntuple filter");
319                 return -EINVAL;
320         }
321
322         /* get the TCP/UDP info */
323         if (!item->spec || !item->mask) {
324                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325                 rte_flow_error_set(error, EINVAL,
326                         RTE_FLOW_ERROR_TYPE_ITEM,
327                         item, "Invalid ntuple mask");
328                 return -EINVAL;
329         }
330
331         /*Not supported last point for range*/
332         if (item->last) {
333                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
334                 rte_flow_error_set(error, EINVAL,
335                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
336                         item, "Not supported last point for range");
337                 return -EINVAL;
338
339         }
340
341         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
342                 tcp_mask = item->mask;
343
344                 /**
345                  * Only support src & dst ports, tcp flags,
346                  * others should be masked.
347                  */
348                 if (tcp_mask->hdr.sent_seq ||
349                     tcp_mask->hdr.recv_ack ||
350                     tcp_mask->hdr.data_off ||
351                     tcp_mask->hdr.rx_win ||
352                     tcp_mask->hdr.cksum ||
353                     tcp_mask->hdr.tcp_urp) {
354                         memset(filter, 0,
355                                 sizeof(struct rte_eth_ntuple_filter));
356                         rte_flow_error_set(error, EINVAL,
357                                 RTE_FLOW_ERROR_TYPE_ITEM,
358                                 item, "Not supported by ntuple filter");
359                         return -EINVAL;
360                 }
361
362                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
363                 filter->src_port_mask  = tcp_mask->hdr.src_port;
364                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
365                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
366                 } else if (!tcp_mask->hdr.tcp_flags) {
367                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
368                 } else {
369                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
370                         rte_flow_error_set(error, EINVAL,
371                                 RTE_FLOW_ERROR_TYPE_ITEM,
372                                 item, "Not supported by ntuple filter");
373                         return -EINVAL;
374                 }
375
376                 tcp_spec = item->spec;
377                 filter->dst_port  = tcp_spec->hdr.dst_port;
378                 filter->src_port  = tcp_spec->hdr.src_port;
379                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
380         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
381                 udp_mask = item->mask;
382
383                 /**
384                  * Only support src & dst ports,
385                  * others should be masked.
386                  */
387                 if (udp_mask->hdr.dgram_len ||
388                     udp_mask->hdr.dgram_cksum) {
389                         memset(filter, 0,
390                                 sizeof(struct rte_eth_ntuple_filter));
391                         rte_flow_error_set(error, EINVAL,
392                                 RTE_FLOW_ERROR_TYPE_ITEM,
393                                 item, "Not supported by ntuple filter");
394                         return -EINVAL;
395                 }
396
397                 filter->dst_port_mask = udp_mask->hdr.dst_port;
398                 filter->src_port_mask = udp_mask->hdr.src_port;
399
400                 udp_spec = item->spec;
401                 filter->dst_port = udp_spec->hdr.dst_port;
402                 filter->src_port = udp_spec->hdr.src_port;
403         } else {
404                 sctp_mask = item->mask;
405
406                 /**
407                  * Only support src & dst ports,
408                  * others should be masked.
409                  */
410                 if (sctp_mask->hdr.tag ||
411                     sctp_mask->hdr.cksum) {
412                         memset(filter, 0,
413                                 sizeof(struct rte_eth_ntuple_filter));
414                         rte_flow_error_set(error, EINVAL,
415                                 RTE_FLOW_ERROR_TYPE_ITEM,
416                                 item, "Not supported by ntuple filter");
417                         return -EINVAL;
418                 }
419
420                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
421                 filter->src_port_mask = sctp_mask->hdr.src_port;
422
423                 sctp_spec = item->spec;
424                 filter->dst_port = sctp_spec->hdr.dst_port;
425                 filter->src_port = sctp_spec->hdr.src_port;
426         }
427
428         /* check if the next not void item is END */
429         index++;
430         NEXT_ITEM_OF_PATTERN(item, pattern, index);
431         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
432                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
433                 rte_flow_error_set(error, EINVAL,
434                         RTE_FLOW_ERROR_TYPE_ITEM,
435                         item, "Not supported by ntuple filter");
436                 return -EINVAL;
437         }
438
439         table_type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
440
441         /* parse attr */
442         /* must be input direction */
443         if (!attr->ingress) {
444                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
445                 rte_flow_error_set(error, EINVAL,
446                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
447                                    attr, "Only support ingress.");
448                 return -EINVAL;
449         }
450
451         /* not supported */
452         if (attr->egress) {
453                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
454                 rte_flow_error_set(error, EINVAL,
455                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
456                                    attr, "Not support egress.");
457                 return -EINVAL;
458         }
459
460         if (attr->priority > 0xFFFF) {
461                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
462                 rte_flow_error_set(error, EINVAL,
463                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
464                                    attr, "Error priority.");
465                 return -EINVAL;
466         }
467         filter->priority = (uint16_t)attr->priority;
468         if (attr->priority >  FLOW_RULE_MIN_PRIORITY)
469                 filter->priority = FLOW_RULE_MAX_PRIORITY;
470
471         /* parse action */
472         index = 0;
473
474         /**
475          * n-tuple only supports count and Mark,
476          * check if the first not void action is COUNT or MARK.
477          */
478         memset(&action, 0, sizeof(action));
479         NEXT_ITEM_OF_ACTION(act, actions, index);
480         switch (act->type) {
481         case RTE_FLOW_ACTION_TYPE_COUNT:
482                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
483                 count = act->conf;
484                 memcpy(&action.act.counter, count, sizeof(action.act.counter));
485                 break;
486         case RTE_FLOW_ACTION_TYPE_MARK:
487                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
488                 mark_spec = act->conf;
489                 memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
490                 break;
491         default:
492                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
493                 rte_flow_error_set(error, EINVAL,
494                    RTE_FLOW_ERROR_TYPE_ACTION, act,
495                    "Invalid action.");
496                 return -EINVAL;
497         }
498
499         /* check if the next not void item is MARK or COUNT or END */
500         index++;
501         NEXT_ITEM_OF_ACTION(act, actions, index);
502         switch (act->type) {
503         case RTE_FLOW_ACTION_TYPE_COUNT:
504                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
505                 count = act->conf;
506                 memcpy(&action.act.counter, count, sizeof(action.act.counter));
507                 break;
508         case RTE_FLOW_ACTION_TYPE_MARK:
509                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
510                 mark_spec = act->conf;
511                 memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
512                 break;
513         case RTE_FLOW_ACTION_TYPE_END:
514                 return 0;
515         default:
516                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
517                 rte_flow_error_set(error, EINVAL,
518                    RTE_FLOW_ERROR_TYPE_ACTION, act,
519                    "Invalid action.");
520                 return -EINVAL;
521         }
522
523         /* check if the next not void item is END */
524         index++;
525         NEXT_ITEM_OF_ACTION(act, actions, index);
526         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
527                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
528                 rte_flow_error_set(error, EINVAL,
529                    RTE_FLOW_ERROR_TYPE_ACTION, act,
530                    "Invalid action.");
531                 return -EINVAL;
532         }
533
534         return 0;
535 }