4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <arpa/inet.h>
42 #include <sys/socket.h>
44 #include <rte_common.h>
45 #include <rte_ethdev.h>
46 #include <rte_byteorder.h>
47 #include <cmdline_parse.h>
48 #include <cmdline_parse_etheraddr.h>
53 /** Parser token indices. */
73 /* Top-level command. */
76 /* Sub-level commands. */
84 /* Destroy arguments. */
87 /* Query arguments. */
93 /* Validate/create arguments. */
99 /* Validate/create pattern. */
163 ITEM_E_TAG_GRP_ECID_B,
171 /* Validate/create actions. */
195 /** Size of pattern[] field in struct rte_flow_item_raw. */
196 #define ITEM_RAW_PATTERN_SIZE 36
198 /** Storage size for struct rte_flow_item_raw including pattern. */
199 #define ITEM_RAW_SIZE \
200 (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
202 /** Number of queue[] entries in struct rte_flow_action_rss. */
203 #define ACTION_RSS_NUM 32
205 /** Storage size for struct rte_flow_action_rss including queues. */
206 #define ACTION_RSS_SIZE \
207 (offsetof(struct rte_flow_action_rss, queue) + \
208 sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
210 /** Maximum number of subsequent tokens and arguments on the stack. */
211 #define CTX_STACK_SIZE 16
213 /** Parser context. */
215 /** Stack of subsequent token lists to process. */
216 const enum index *next[CTX_STACK_SIZE];
217 /** Arguments for stacked tokens. */
218 const void *args[CTX_STACK_SIZE];
219 enum index curr; /**< Current token index. */
220 enum index prev; /**< Index of the last token seen. */
221 int next_num; /**< Number of entries in next[]. */
222 int args_num; /**< Number of entries in args[]. */
223 uint32_t eol:1; /**< EOL has been detected. */
224 uint32_t last:1; /**< No more arguments. */
225 uint16_t port; /**< Current port ID (for completions). */
226 uint32_t objdata; /**< Object-specific data. */
227 void *object; /**< Address of current object for relative offsets. */
228 void *objmask; /**< Object a full mask must be written to. */
231 /** Token argument. */
233 uint32_t hton:1; /**< Use network byte ordering. */
234 uint32_t sign:1; /**< Value is signed. */
235 uint32_t offset; /**< Relative offset from ctx->object. */
236 uint32_t size; /**< Field size. */
237 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
240 /** Parser token definition. */
242 /** Type displayed during completion (defaults to "TOKEN"). */
244 /** Help displayed during completion (defaults to token name). */
246 /** Private data used by parser functions. */
249 * Lists of subsequent tokens to push on the stack. Each call to the
250 * parser consumes the last entry of that stack.
252 const enum index *const *next;
253 /** Arguments stack for subsequent tokens that need them. */
254 const struct arg *const *args;
256 * Token-processing callback, returns -1 in case of error, the
257 * length of the matched string otherwise. If NULL, attempts to
258 * match the token name.
260 * If buf is not NULL, the result should be stored in it according
261 * to context. An error is returned if not large enough.
263 int (*call)(struct context *ctx, const struct token *token,
264 const char *str, unsigned int len,
265 void *buf, unsigned int size);
267 * Callback that provides possible values for this token, used for
268 * completion. Returns -1 in case of error, the number of possible
269 * values otherwise. If NULL, the token name is used.
271 * If buf is not NULL, entry index ent is written to buf and the
272 * full length of the entry is returned (same behavior as
275 int (*comp)(struct context *ctx, const struct token *token,
276 unsigned int ent, char *buf, unsigned int size);
277 /** Mandatory token name, no default value. */
281 /** Static initializer for the next field. */
282 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
284 /** Static initializer for a NEXT() entry. */
285 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
287 /** Static initializer for the args field. */
288 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
290 /** Static initializer for ARGS() to target a field. */
291 #define ARGS_ENTRY(s, f) \
292 (&(const struct arg){ \
293 .offset = offsetof(s, f), \
294 .size = sizeof(((s *)0)->f), \
297 /** Static initializer for ARGS() to target a bit-field. */
298 #define ARGS_ENTRY_BF(s, f, b) \
299 (&(const struct arg){ \
301 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
304 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
305 #define ARGS_ENTRY_MASK(s, f, m) \
306 (&(const struct arg){ \
307 .offset = offsetof(s, f), \
308 .size = sizeof(((s *)0)->f), \
309 .mask = (const void *)(m), \
312 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
313 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
314 (&(const struct arg){ \
316 .offset = offsetof(s, f), \
317 .size = sizeof(((s *)0)->f), \
318 .mask = (const void *)(m), \
321 /** Static initializer for ARGS() to target a pointer. */
322 #define ARGS_ENTRY_PTR(s, f) \
323 (&(const struct arg){ \
324 .size = sizeof(*((s *)0)->f), \
327 /** Static initializer for ARGS() with arbitrary size. */
328 #define ARGS_ENTRY_USZ(s, f, sz) \
329 (&(const struct arg){ \
330 .offset = offsetof(s, f), \
334 /** Same as ARGS_ENTRY() using network byte ordering. */
335 #define ARGS_ENTRY_HTON(s, f) \
336 (&(const struct arg){ \
338 .offset = offsetof(s, f), \
339 .size = sizeof(((s *)0)->f), \
342 /** Parser output buffer layout expected by cmd_flow_parsed(). */
344 enum index command; /**< Flow command. */
345 uint16_t port; /**< Affected port ID. */
348 struct rte_flow_attr attr;
349 struct rte_flow_item *pattern;
350 struct rte_flow_action *actions;
354 } vc; /**< Validate/create arguments. */
358 } destroy; /**< Destroy arguments. */
361 enum rte_flow_action_type action;
362 } query; /**< Query arguments. */
366 } list; /**< List arguments. */
367 } args; /**< Command arguments. */
370 /** Private data for pattern items. */
371 struct parse_item_priv {
372 enum rte_flow_item_type type; /**< Item type. */
373 uint32_t size; /**< Size of item specification structure. */
376 #define PRIV_ITEM(t, s) \
377 (&(const struct parse_item_priv){ \
378 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
382 /** Private data for actions. */
383 struct parse_action_priv {
384 enum rte_flow_action_type type; /**< Action type. */
385 uint32_t size; /**< Size of action configuration structure. */
388 #define PRIV_ACTION(t, s) \
389 (&(const struct parse_action_priv){ \
390 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
394 static const enum index next_vc_attr[] = {
403 static const enum index next_destroy_attr[] = {
409 static const enum index next_list_attr[] = {
415 static const enum index item_param[] = {
424 static const enum index next_item[] = {
449 static const enum index item_any[] = {
455 static const enum index item_vf[] = {
461 static const enum index item_port[] = {
467 static const enum index item_raw[] = {
477 static const enum index item_eth[] = {
485 static const enum index item_vlan[] = {
495 static const enum index item_ipv4[] = {
505 static const enum index item_ipv6[] = {
516 static const enum index item_icmp[] = {
523 static const enum index item_udp[] = {
530 static const enum index item_tcp[] = {
537 static const enum index item_sctp[] = {
546 static const enum index item_vxlan[] = {
552 static const enum index item_e_tag[] = {
553 ITEM_E_TAG_GRP_ECID_B,
558 static const enum index item_nvgre[] = {
564 static const enum index item_mpls[] = {
570 static const enum index item_gre[] = {
576 static const enum index next_action[] = {
592 static const enum index action_mark[] = {
598 static const enum index action_queue[] = {
604 static const enum index action_dup[] = {
610 static const enum index action_rss[] = {
616 static const enum index action_vf[] = {
623 static int parse_init(struct context *, const struct token *,
624 const char *, unsigned int,
625 void *, unsigned int);
626 static int parse_vc(struct context *, const struct token *,
627 const char *, unsigned int,
628 void *, unsigned int);
629 static int parse_vc_spec(struct context *, const struct token *,
630 const char *, unsigned int, void *, unsigned int);
631 static int parse_vc_conf(struct context *, const struct token *,
632 const char *, unsigned int, void *, unsigned int);
633 static int parse_vc_action_rss_queue(struct context *, const struct token *,
634 const char *, unsigned int, void *,
636 static int parse_destroy(struct context *, const struct token *,
637 const char *, unsigned int,
638 void *, unsigned int);
639 static int parse_flush(struct context *, const struct token *,
640 const char *, unsigned int,
641 void *, unsigned int);
642 static int parse_query(struct context *, const struct token *,
643 const char *, unsigned int,
644 void *, unsigned int);
645 static int parse_action(struct context *, const struct token *,
646 const char *, unsigned int,
647 void *, unsigned int);
648 static int parse_list(struct context *, const struct token *,
649 const char *, unsigned int,
650 void *, unsigned int);
651 static int parse_int(struct context *, const struct token *,
652 const char *, unsigned int,
653 void *, unsigned int);
654 static int parse_prefix(struct context *, const struct token *,
655 const char *, unsigned int,
656 void *, unsigned int);
657 static int parse_boolean(struct context *, const struct token *,
658 const char *, unsigned int,
659 void *, unsigned int);
660 static int parse_string(struct context *, const struct token *,
661 const char *, unsigned int,
662 void *, unsigned int);
663 static int parse_mac_addr(struct context *, const struct token *,
664 const char *, unsigned int,
665 void *, unsigned int);
666 static int parse_ipv4_addr(struct context *, const struct token *,
667 const char *, unsigned int,
668 void *, unsigned int);
669 static int parse_ipv6_addr(struct context *, const struct token *,
670 const char *, unsigned int,
671 void *, unsigned int);
672 static int parse_port(struct context *, const struct token *,
673 const char *, unsigned int,
674 void *, unsigned int);
675 static int comp_none(struct context *, const struct token *,
676 unsigned int, char *, unsigned int);
677 static int comp_boolean(struct context *, const struct token *,
678 unsigned int, char *, unsigned int);
679 static int comp_action(struct context *, const struct token *,
680 unsigned int, char *, unsigned int);
681 static int comp_port(struct context *, const struct token *,
682 unsigned int, char *, unsigned int);
683 static int comp_rule_id(struct context *, const struct token *,
684 unsigned int, char *, unsigned int);
685 static int comp_vc_action_rss_queue(struct context *, const struct token *,
686 unsigned int, char *, unsigned int);
688 /** Token definitions. */
689 static const struct token token_list[] = {
690 /* Special tokens. */
693 .help = "null entry, abused as the entry point",
694 .next = NEXT(NEXT_ENTRY(FLOW)),
699 .help = "command may end here",
705 .help = "integer value",
710 .name = "{unsigned}",
712 .help = "unsigned integer value",
719 .help = "prefix length for bit-mask",
720 .call = parse_prefix,
726 .help = "any boolean value",
727 .call = parse_boolean,
728 .comp = comp_boolean,
733 .help = "fixed string",
734 .call = parse_string,
738 .name = "{MAC address}",
740 .help = "standard MAC address notation",
741 .call = parse_mac_addr,
745 .name = "{IPv4 address}",
746 .type = "IPV4 ADDRESS",
747 .help = "standard IPv4 address notation",
748 .call = parse_ipv4_addr,
752 .name = "{IPv6 address}",
753 .type = "IPV6 ADDRESS",
754 .help = "standard IPv6 address notation",
755 .call = parse_ipv6_addr,
761 .help = "rule identifier",
763 .comp = comp_rule_id,
768 .help = "port identifier",
773 .name = "{group_id}",
775 .help = "group identifier",
782 .help = "priority level",
786 /* Top-level command. */
789 .type = "{command} {port_id} [{arg} [...]]",
790 .help = "manage ingress/egress flow rules",
791 .next = NEXT(NEXT_ENTRY
800 /* Sub-level commands. */
803 .help = "check whether a flow rule can be created",
804 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
805 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
810 .help = "create a flow rule",
811 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
812 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
817 .help = "destroy specific flow rules",
818 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
819 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
820 .call = parse_destroy,
824 .help = "destroy all flow rules",
825 .next = NEXT(NEXT_ENTRY(PORT_ID)),
826 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
831 .help = "query an existing flow rule",
832 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
834 NEXT_ENTRY(PORT_ID)),
835 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
836 ARGS_ENTRY(struct buffer, args.query.rule),
837 ARGS_ENTRY(struct buffer, port)),
842 .help = "list existing flow rules",
843 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
844 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
847 /* Destroy arguments. */
850 .help = "specify a rule identifier",
851 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
852 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
853 .call = parse_destroy,
855 /* Query arguments. */
859 .help = "action to query, must be part of the rule",
860 .call = parse_action,
863 /* List arguments. */
866 .help = "specify a group",
867 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
868 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
871 /* Validate/create attributes. */
874 .help = "specify a group",
875 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
876 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
881 .help = "specify a priority level",
882 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
883 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
888 .help = "affect rule to ingress",
889 .next = NEXT(next_vc_attr),
894 .help = "affect rule to egress",
895 .next = NEXT(next_vc_attr),
898 /* Validate/create pattern. */
901 .help = "submit a list of pattern items",
902 .next = NEXT(next_item),
907 .help = "match value perfectly (with full bit-mask)",
908 .call = parse_vc_spec,
910 [ITEM_PARAM_SPEC] = {
912 .help = "match value according to configured bit-mask",
913 .call = parse_vc_spec,
915 [ITEM_PARAM_LAST] = {
917 .help = "specify upper bound to establish a range",
918 .call = parse_vc_spec,
920 [ITEM_PARAM_MASK] = {
922 .help = "specify bit-mask with relevant bits set to one",
923 .call = parse_vc_spec,
925 [ITEM_PARAM_PREFIX] = {
927 .help = "generate bit-mask from a prefix length",
928 .call = parse_vc_spec,
932 .help = "specify next pattern item",
933 .next = NEXT(next_item),
937 .help = "end list of pattern items",
938 .priv = PRIV_ITEM(END, 0),
939 .next = NEXT(NEXT_ENTRY(ACTIONS)),
944 .help = "no-op pattern item",
945 .priv = PRIV_ITEM(VOID, 0),
946 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
951 .help = "perform actions when pattern does not match",
952 .priv = PRIV_ITEM(INVERT, 0),
953 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
958 .help = "match any protocol for the current layer",
959 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
960 .next = NEXT(item_any),
965 .help = "number of layers covered",
966 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
967 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
971 .help = "match packets addressed to the physical function",
972 .priv = PRIV_ITEM(PF, 0),
973 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
978 .help = "match packets addressed to a virtual function ID",
979 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
980 .next = NEXT(item_vf),
985 .help = "destination VF ID",
986 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
987 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
991 .help = "device-specific physical port index to use",
992 .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
993 .next = NEXT(item_port),
996 [ITEM_PORT_INDEX] = {
998 .help = "physical port index",
999 .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
1000 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
1004 .help = "match an arbitrary byte string",
1005 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1006 .next = NEXT(item_raw),
1009 [ITEM_RAW_RELATIVE] = {
1011 .help = "look for pattern after the previous item",
1012 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1013 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1016 [ITEM_RAW_SEARCH] = {
1018 .help = "search pattern from offset (see also limit)",
1019 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1020 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1023 [ITEM_RAW_OFFSET] = {
1025 .help = "absolute or relative offset for pattern",
1026 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1027 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1029 [ITEM_RAW_LIMIT] = {
1031 .help = "search area limit for start of pattern",
1032 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1033 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1035 [ITEM_RAW_PATTERN] = {
1037 .help = "byte string to look for",
1038 .next = NEXT(item_raw,
1040 NEXT_ENTRY(ITEM_PARAM_IS,
1043 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
1044 ARGS_ENTRY_USZ(struct rte_flow_item_raw,
1046 ITEM_RAW_PATTERN_SIZE)),
1050 .help = "match Ethernet header",
1051 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1052 .next = NEXT(item_eth),
1057 .help = "destination MAC",
1058 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1059 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1063 .help = "source MAC",
1064 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1065 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1069 .help = "EtherType",
1070 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1071 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1075 .help = "match 802.1Q/ad VLAN tag",
1076 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1077 .next = NEXT(item_vlan),
1080 [ITEM_VLAN_TPID] = {
1082 .help = "tag protocol identifier",
1083 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1084 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
1088 .help = "tag control information",
1089 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1090 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1094 .help = "priority code point",
1095 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1096 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1101 .help = "drop eligible indicator",
1102 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1103 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1108 .help = "VLAN identifier",
1109 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1110 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1115 .help = "match IPv4 header",
1116 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1117 .next = NEXT(item_ipv4),
1122 .help = "type of service",
1123 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1124 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1125 hdr.type_of_service)),
1129 .help = "time to live",
1130 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1131 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1134 [ITEM_IPV4_PROTO] = {
1136 .help = "next protocol ID",
1137 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1138 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1139 hdr.next_proto_id)),
1143 .help = "source address",
1144 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1145 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1150 .help = "destination address",
1151 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1152 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1157 .help = "match IPv6 header",
1158 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1159 .next = NEXT(item_ipv6),
1164 .help = "traffic class",
1165 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1166 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1168 "\x0f\xf0\x00\x00")),
1170 [ITEM_IPV6_FLOW] = {
1172 .help = "flow label",
1173 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1174 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1176 "\x00\x0f\xff\xff")),
1178 [ITEM_IPV6_PROTO] = {
1180 .help = "protocol (next header)",
1181 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1182 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1187 .help = "hop limit",
1188 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1189 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1194 .help = "source address",
1195 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1196 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1201 .help = "destination address",
1202 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1203 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1208 .help = "match ICMP header",
1209 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1210 .next = NEXT(item_icmp),
1213 [ITEM_ICMP_TYPE] = {
1215 .help = "ICMP packet type",
1216 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1217 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1220 [ITEM_ICMP_CODE] = {
1222 .help = "ICMP packet code",
1223 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1224 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1229 .help = "match UDP header",
1230 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1231 .next = NEXT(item_udp),
1236 .help = "UDP source port",
1237 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1238 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1243 .help = "UDP destination port",
1244 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1245 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1250 .help = "match TCP header",
1251 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1252 .next = NEXT(item_tcp),
1257 .help = "TCP source port",
1258 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1259 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1264 .help = "TCP destination port",
1265 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1266 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1271 .help = "match SCTP header",
1272 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1273 .next = NEXT(item_sctp),
1278 .help = "SCTP source port",
1279 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1280 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1285 .help = "SCTP destination port",
1286 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1287 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1292 .help = "validation tag",
1293 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1294 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1297 [ITEM_SCTP_CKSUM] = {
1300 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1301 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1306 .help = "match VXLAN header",
1307 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1308 .next = NEXT(item_vxlan),
1311 [ITEM_VXLAN_VNI] = {
1313 .help = "VXLAN identifier",
1314 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1315 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1319 .help = "match E-Tag header",
1320 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1321 .next = NEXT(item_e_tag),
1324 [ITEM_E_TAG_GRP_ECID_B] = {
1325 .name = "grp_ecid_b",
1326 .help = "GRP and E-CID base",
1327 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1328 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1334 .help = "match NVGRE header",
1335 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1336 .next = NEXT(item_nvgre),
1339 [ITEM_NVGRE_TNI] = {
1341 .help = "virtual subnet ID",
1342 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1343 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1347 .help = "match MPLS header",
1348 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1349 .next = NEXT(item_mpls),
1352 [ITEM_MPLS_LABEL] = {
1354 .help = "MPLS label",
1355 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1356 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1362 .help = "match GRE header",
1363 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1364 .next = NEXT(item_gre),
1367 [ITEM_GRE_PROTO] = {
1369 .help = "GRE protocol type",
1370 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1371 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1374 /* Validate/create actions. */
1377 .help = "submit a list of associated actions",
1378 .next = NEXT(next_action),
1383 .help = "specify next action",
1384 .next = NEXT(next_action),
1388 .help = "end list of actions",
1389 .priv = PRIV_ACTION(END, 0),
1394 .help = "no-op action",
1395 .priv = PRIV_ACTION(VOID, 0),
1396 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1399 [ACTION_PASSTHRU] = {
1401 .help = "let subsequent rule process matched packets",
1402 .priv = PRIV_ACTION(PASSTHRU, 0),
1403 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1408 .help = "attach 32 bit value to packets",
1409 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1410 .next = NEXT(action_mark),
1413 [ACTION_MARK_ID] = {
1415 .help = "32 bit value to return with packets",
1416 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
1417 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
1418 .call = parse_vc_conf,
1422 .help = "flag packets",
1423 .priv = PRIV_ACTION(FLAG, 0),
1424 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1429 .help = "assign packets to a given queue index",
1430 .priv = PRIV_ACTION(QUEUE,
1431 sizeof(struct rte_flow_action_queue)),
1432 .next = NEXT(action_queue),
1435 [ACTION_QUEUE_INDEX] = {
1437 .help = "queue index to use",
1438 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
1439 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
1440 .call = parse_vc_conf,
1444 .help = "drop packets (note: passthru has priority)",
1445 .priv = PRIV_ACTION(DROP, 0),
1446 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1451 .help = "enable counters for this rule",
1452 .priv = PRIV_ACTION(COUNT, 0),
1453 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1458 .help = "duplicate packets to a given queue index",
1459 .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1460 .next = NEXT(action_dup),
1463 [ACTION_DUP_INDEX] = {
1465 .help = "queue index to duplicate packets to",
1466 .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
1467 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
1468 .call = parse_vc_conf,
1472 .help = "spread packets among several queues",
1473 .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
1474 .next = NEXT(action_rss),
1477 [ACTION_RSS_QUEUES] = {
1479 .help = "queue indices to use",
1480 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
1481 .call = parse_vc_conf,
1483 [ACTION_RSS_QUEUE] = {
1485 .help = "queue index",
1486 .call = parse_vc_action_rss_queue,
1487 .comp = comp_vc_action_rss_queue,
1491 .help = "redirect packets to physical device function",
1492 .priv = PRIV_ACTION(PF, 0),
1493 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
1498 .help = "redirect packets to virtual device function",
1499 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1500 .next = NEXT(action_vf),
1503 [ACTION_VF_ORIGINAL] = {
1505 .help = "use original VF ID if possible",
1506 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
1507 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
1509 .call = parse_vc_conf,
1513 .help = "VF ID to redirect packets to",
1514 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
1515 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
1516 .call = parse_vc_conf,
1520 /** Remove and return last entry from argument stack. */
1521 static const struct arg *
1522 pop_args(struct context *ctx)
1524 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
1527 /** Add entry on top of the argument stack. */
1529 push_args(struct context *ctx, const struct arg *arg)
1531 if (ctx->args_num == CTX_STACK_SIZE)
1533 ctx->args[ctx->args_num++] = arg;
1537 /** Spread value into buffer according to bit-mask. */
1539 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
1541 uint32_t i = arg->size;
1549 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1558 unsigned int shift = 0;
1559 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
1561 for (shift = 0; arg->mask[i] >> shift; ++shift) {
1562 if (!(arg->mask[i] & (1 << shift)))
1567 *buf &= ~(1 << shift);
1568 *buf |= (val & 1) << shift;
1576 /** Compare a string with a partial one of a given length. */
1578 strcmp_partial(const char *full, const char *partial, size_t partial_len)
1580 int r = strncmp(full, partial, partial_len);
1584 if (strlen(full) <= partial_len)
1586 return full[partial_len];
1590 * Parse a prefix length and generate a bit-mask.
1592 * Last argument (ctx->args) is retrieved to determine mask size, storage
1593 * location and whether the result must use network byte ordering.
1596 parse_prefix(struct context *ctx, const struct token *token,
1597 const char *str, unsigned int len,
1598 void *buf, unsigned int size)
1600 const struct arg *arg = pop_args(ctx);
1601 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
1608 /* Argument is expected. */
1612 u = strtoumax(str, &end, 0);
1613 if (errno || (size_t)(end - str) != len)
1618 extra = arg_entry_bf_fill(NULL, 0, arg);
1627 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
1628 !arg_entry_bf_fill(ctx->objmask, -1, arg))
1635 if (bytes > size || bytes + !!extra > size)
1639 buf = (uint8_t *)ctx->object + arg->offset;
1640 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1642 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
1643 memset(buf, 0x00, size - bytes);
1645 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
1649 memset(buf, 0xff, bytes);
1650 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
1652 ((uint8_t *)buf)[bytes] = conv[extra];
1655 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
1658 push_args(ctx, arg);
1662 /** Default parsing function for token name matching. */
1664 parse_default(struct context *ctx, const struct token *token,
1665 const char *str, unsigned int len,
1666 void *buf, unsigned int size)
1671 if (strcmp_partial(token->name, str, len))
1676 /** Parse flow command, initialize output buffer for subsequent tokens. */
1678 parse_init(struct context *ctx, const struct token *token,
1679 const char *str, unsigned int len,
1680 void *buf, unsigned int size)
1682 struct buffer *out = buf;
1684 /* Token name must match. */
1685 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1687 /* Nothing else to do if there is no buffer. */
1690 /* Make sure buffer is large enough. */
1691 if (size < sizeof(*out))
1693 /* Initialize buffer. */
1694 memset(out, 0x00, sizeof(*out));
1695 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
1698 ctx->objmask = NULL;
1702 /** Parse tokens for validate/create commands. */
1704 parse_vc(struct context *ctx, const struct token *token,
1705 const char *str, unsigned int len,
1706 void *buf, unsigned int size)
1708 struct buffer *out = buf;
1712 /* Token name must match. */
1713 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1715 /* Nothing else to do if there is no buffer. */
1718 if (!out->command) {
1719 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
1721 if (sizeof(*out) > size)
1723 out->command = ctx->curr;
1726 ctx->objmask = NULL;
1727 out->args.vc.data = (uint8_t *)out + size;
1731 ctx->object = &out->args.vc.attr;
1732 ctx->objmask = NULL;
1733 switch (ctx->curr) {
1738 out->args.vc.attr.ingress = 1;
1741 out->args.vc.attr.egress = 1;
1744 out->args.vc.pattern =
1745 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1747 ctx->object = out->args.vc.pattern;
1748 ctx->objmask = NULL;
1751 out->args.vc.actions =
1752 (void *)RTE_ALIGN_CEIL((uintptr_t)
1753 (out->args.vc.pattern +
1754 out->args.vc.pattern_n),
1756 ctx->object = out->args.vc.actions;
1757 ctx->objmask = NULL;
1764 if (!out->args.vc.actions) {
1765 const struct parse_item_priv *priv = token->priv;
1766 struct rte_flow_item *item =
1767 out->args.vc.pattern + out->args.vc.pattern_n;
1769 data_size = priv->size * 3; /* spec, last, mask */
1770 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1771 (out->args.vc.data - data_size),
1773 if ((uint8_t *)item + sizeof(*item) > data)
1775 *item = (struct rte_flow_item){
1778 ++out->args.vc.pattern_n;
1780 ctx->objmask = NULL;
1782 const struct parse_action_priv *priv = token->priv;
1783 struct rte_flow_action *action =
1784 out->args.vc.actions + out->args.vc.actions_n;
1786 data_size = priv->size; /* configuration */
1787 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
1788 (out->args.vc.data - data_size),
1790 if ((uint8_t *)action + sizeof(*action) > data)
1792 *action = (struct rte_flow_action){
1795 ++out->args.vc.actions_n;
1796 ctx->object = action;
1797 ctx->objmask = NULL;
1799 memset(data, 0, data_size);
1800 out->args.vc.data = data;
1801 ctx->objdata = data_size;
1805 /** Parse pattern item parameter type. */
1807 parse_vc_spec(struct context *ctx, const struct token *token,
1808 const char *str, unsigned int len,
1809 void *buf, unsigned int size)
1811 struct buffer *out = buf;
1812 struct rte_flow_item *item;
1818 /* Token name must match. */
1819 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1821 /* Parse parameter types. */
1822 switch (ctx->curr) {
1823 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
1829 case ITEM_PARAM_SPEC:
1832 case ITEM_PARAM_LAST:
1835 case ITEM_PARAM_PREFIX:
1836 /* Modify next token to expect a prefix. */
1837 if (ctx->next_num < 2)
1839 ctx->next[ctx->next_num - 2] = prefix;
1841 case ITEM_PARAM_MASK:
1847 /* Nothing else to do if there is no buffer. */
1850 if (!out->args.vc.pattern_n)
1852 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
1853 data_size = ctx->objdata / 3; /* spec, last, mask */
1854 /* Point to selected object. */
1855 ctx->object = out->args.vc.data + (data_size * index);
1857 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
1858 item->mask = ctx->objmask;
1860 ctx->objmask = NULL;
1861 /* Update relevant item pointer. */
1862 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
1867 /** Parse action configuration field. */
1869 parse_vc_conf(struct context *ctx, const struct token *token,
1870 const char *str, unsigned int len,
1871 void *buf, unsigned int size)
1873 struct buffer *out = buf;
1874 struct rte_flow_action *action;
1877 /* Token name must match. */
1878 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1880 /* Nothing else to do if there is no buffer. */
1883 if (!out->args.vc.actions_n)
1885 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
1886 /* Point to selected object. */
1887 ctx->object = out->args.vc.data;
1888 ctx->objmask = NULL;
1889 /* Update configuration pointer. */
1890 action->conf = ctx->object;
1895 * Parse queue field for RSS action.
1897 * Valid tokens are queue indices and the "end" token.
1900 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
1901 const char *str, unsigned int len,
1902 void *buf, unsigned int size)
1904 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
1911 if (ctx->curr != ACTION_RSS_QUEUE)
1913 i = ctx->objdata >> 16;
1914 if (!strcmp_partial("end", str, len)) {
1915 ctx->objdata &= 0xffff;
1918 if (i >= ACTION_RSS_NUM)
1920 if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
1922 ret = parse_int(ctx, token, str, len, NULL, 0);
1928 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
1930 if (ctx->next_num == RTE_DIM(ctx->next))
1932 ctx->next[ctx->next_num++] = next;
1935 ((struct rte_flow_action_rss *)ctx->object)->num = i;
1939 /** Parse tokens for destroy command. */
1941 parse_destroy(struct context *ctx, const struct token *token,
1942 const char *str, unsigned int len,
1943 void *buf, unsigned int size)
1945 struct buffer *out = buf;
1947 /* Token name must match. */
1948 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1950 /* Nothing else to do if there is no buffer. */
1953 if (!out->command) {
1954 if (ctx->curr != DESTROY)
1956 if (sizeof(*out) > size)
1958 out->command = ctx->curr;
1961 ctx->objmask = NULL;
1962 out->args.destroy.rule =
1963 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
1967 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
1968 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
1971 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
1972 ctx->objmask = NULL;
1976 /** Parse tokens for flush command. */
1978 parse_flush(struct context *ctx, const struct token *token,
1979 const char *str, unsigned int len,
1980 void *buf, unsigned int size)
1982 struct buffer *out = buf;
1984 /* Token name must match. */
1985 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
1987 /* Nothing else to do if there is no buffer. */
1990 if (!out->command) {
1991 if (ctx->curr != FLUSH)
1993 if (sizeof(*out) > size)
1995 out->command = ctx->curr;
1998 ctx->objmask = NULL;
2003 /** Parse tokens for query command. */
2005 parse_query(struct context *ctx, const struct token *token,
2006 const char *str, unsigned int len,
2007 void *buf, unsigned int size)
2009 struct buffer *out = buf;
2011 /* Token name must match. */
2012 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2014 /* Nothing else to do if there is no buffer. */
2017 if (!out->command) {
2018 if (ctx->curr != QUERY)
2020 if (sizeof(*out) > size)
2022 out->command = ctx->curr;
2025 ctx->objmask = NULL;
2030 /** Parse action names. */
2032 parse_action(struct context *ctx, const struct token *token,
2033 const char *str, unsigned int len,
2034 void *buf, unsigned int size)
2036 struct buffer *out = buf;
2037 const struct arg *arg = pop_args(ctx);
2041 /* Argument is expected. */
2044 /* Parse action name. */
2045 for (i = 0; next_action[i]; ++i) {
2046 const struct parse_action_priv *priv;
2048 token = &token_list[next_action[i]];
2049 if (strcmp_partial(token->name, str, len))
2055 memcpy((uint8_t *)ctx->object + arg->offset,
2061 push_args(ctx, arg);
2065 /** Parse tokens for list command. */
2067 parse_list(struct context *ctx, const struct token *token,
2068 const char *str, unsigned int len,
2069 void *buf, unsigned int size)
2071 struct buffer *out = buf;
2073 /* Token name must match. */
2074 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
2076 /* Nothing else to do if there is no buffer. */
2079 if (!out->command) {
2080 if (ctx->curr != LIST)
2082 if (sizeof(*out) > size)
2084 out->command = ctx->curr;
2087 ctx->objmask = NULL;
2088 out->args.list.group =
2089 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
2093 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
2094 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
2097 ctx->object = out->args.list.group + out->args.list.group_n++;
2098 ctx->objmask = NULL;
2103 * Parse signed/unsigned integers 8 to 64-bit long.
2105 * Last argument (ctx->args) is retrieved to determine integer type and
2109 parse_int(struct context *ctx, const struct token *token,
2110 const char *str, unsigned int len,
2111 void *buf, unsigned int size)
2113 const struct arg *arg = pop_args(ctx);
2118 /* Argument is expected. */
2123 (uintmax_t)strtoimax(str, &end, 0) :
2124 strtoumax(str, &end, 0);
2125 if (errno || (size_t)(end - str) != len)
2130 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
2131 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2135 buf = (uint8_t *)ctx->object + arg->offset;
2139 case sizeof(uint8_t):
2140 *(uint8_t *)buf = u;
2142 case sizeof(uint16_t):
2143 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
2145 case sizeof(uint8_t [3]):
2146 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2148 ((uint8_t *)buf)[0] = u;
2149 ((uint8_t *)buf)[1] = u >> 8;
2150 ((uint8_t *)buf)[2] = u >> 16;
2154 ((uint8_t *)buf)[0] = u >> 16;
2155 ((uint8_t *)buf)[1] = u >> 8;
2156 ((uint8_t *)buf)[2] = u;
2158 case sizeof(uint32_t):
2159 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
2161 case sizeof(uint64_t):
2162 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
2167 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
2169 buf = (uint8_t *)ctx->objmask + arg->offset;
2174 push_args(ctx, arg);
2181 * Two arguments (ctx->args) are retrieved from the stack to store data and
2182 * its length (in that order).
2185 parse_string(struct context *ctx, const struct token *token,
2186 const char *str, unsigned int len,
2187 void *buf, unsigned int size)
2189 const struct arg *arg_data = pop_args(ctx);
2190 const struct arg *arg_len = pop_args(ctx);
2191 char tmp[16]; /* Ought to be enough. */
2194 /* Arguments are expected. */
2198 push_args(ctx, arg_data);
2201 size = arg_data->size;
2202 /* Bit-mask fill is not supported. */
2203 if (arg_data->mask || size < len)
2207 /* Let parse_int() fill length information first. */
2208 ret = snprintf(tmp, sizeof(tmp), "%u", len);
2211 push_args(ctx, arg_len);
2212 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
2217 buf = (uint8_t *)ctx->object + arg_data->offset;
2218 /* Output buffer is not necessarily NUL-terminated. */
2219 memcpy(buf, str, len);
2220 memset((uint8_t *)buf + len, 0x55, size - len);
2222 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
2225 push_args(ctx, arg_len);
2226 push_args(ctx, arg_data);
2231 * Parse a MAC address.
2233 * Last argument (ctx->args) is retrieved to determine storage size and
2237 parse_mac_addr(struct context *ctx, const struct token *token,
2238 const char *str, unsigned int len,
2239 void *buf, unsigned int size)
2241 const struct arg *arg = pop_args(ctx);
2242 struct ether_addr tmp;
2246 /* Argument is expected. */
2250 /* Bit-mask fill is not supported. */
2251 if (arg->mask || size != sizeof(tmp))
2253 /* Only network endian is supported. */
2256 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
2257 if (ret < 0 || (unsigned int)ret != len)
2261 buf = (uint8_t *)ctx->object + arg->offset;
2262 memcpy(buf, &tmp, size);
2264 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2267 push_args(ctx, arg);
2272 * Parse an IPv4 address.
2274 * Last argument (ctx->args) is retrieved to determine storage size and
2278 parse_ipv4_addr(struct context *ctx, const struct token *token,
2279 const char *str, unsigned int len,
2280 void *buf, unsigned int size)
2282 const struct arg *arg = pop_args(ctx);
2287 /* Argument is expected. */
2291 /* Bit-mask fill is not supported. */
2292 if (arg->mask || size != sizeof(tmp))
2294 /* Only network endian is supported. */
2297 memcpy(str2, str, len);
2299 ret = inet_pton(AF_INET, str2, &tmp);
2301 /* Attempt integer parsing. */
2302 push_args(ctx, arg);
2303 return parse_int(ctx, token, str, len, buf, size);
2307 buf = (uint8_t *)ctx->object + arg->offset;
2308 memcpy(buf, &tmp, size);
2310 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2313 push_args(ctx, arg);
2318 * Parse an IPv6 address.
2320 * Last argument (ctx->args) is retrieved to determine storage size and
2324 parse_ipv6_addr(struct context *ctx, const struct token *token,
2325 const char *str, unsigned int len,
2326 void *buf, unsigned int size)
2328 const struct arg *arg = pop_args(ctx);
2330 struct in6_addr tmp;
2334 /* Argument is expected. */
2338 /* Bit-mask fill is not supported. */
2339 if (arg->mask || size != sizeof(tmp))
2341 /* Only network endian is supported. */
2344 memcpy(str2, str, len);
2346 ret = inet_pton(AF_INET6, str2, &tmp);
2351 buf = (uint8_t *)ctx->object + arg->offset;
2352 memcpy(buf, &tmp, size);
2354 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2357 push_args(ctx, arg);
2361 /** Boolean values (even indices stand for false). */
2362 static const char *const boolean_name[] = {
2371 * Parse a boolean value.
2373 * Last argument (ctx->args) is retrieved to determine storage size and
2377 parse_boolean(struct context *ctx, const struct token *token,
2378 const char *str, unsigned int len,
2379 void *buf, unsigned int size)
2381 const struct arg *arg = pop_args(ctx);
2385 /* Argument is expected. */
2388 for (i = 0; boolean_name[i]; ++i)
2389 if (!strcmp_partial(boolean_name[i], str, len))
2391 /* Process token as integer. */
2392 if (boolean_name[i])
2393 str = i & 1 ? "1" : "0";
2394 push_args(ctx, arg);
2395 ret = parse_int(ctx, token, str, strlen(str), buf, size);
2396 return ret > 0 ? (int)len : ret;
2399 /** Parse port and update context. */
2401 parse_port(struct context *ctx, const struct token *token,
2402 const char *str, unsigned int len,
2403 void *buf, unsigned int size)
2405 struct buffer *out = &(struct buffer){ .port = 0 };
2413 ctx->objmask = NULL;
2414 size = sizeof(*out);
2416 ret = parse_int(ctx, token, str, len, out, size);
2418 ctx->port = out->port;
2424 /** No completion. */
2426 comp_none(struct context *ctx, const struct token *token,
2427 unsigned int ent, char *buf, unsigned int size)
2437 /** Complete boolean values. */
2439 comp_boolean(struct context *ctx, const struct token *token,
2440 unsigned int ent, char *buf, unsigned int size)
2446 for (i = 0; boolean_name[i]; ++i)
2447 if (buf && i == ent)
2448 return snprintf(buf, size, "%s", boolean_name[i]);
2454 /** Complete action names. */
2456 comp_action(struct context *ctx, const struct token *token,
2457 unsigned int ent, char *buf, unsigned int size)
2463 for (i = 0; next_action[i]; ++i)
2464 if (buf && i == ent)
2465 return snprintf(buf, size, "%s",
2466 token_list[next_action[i]].name);
2472 /** Complete available ports. */
2474 comp_port(struct context *ctx, const struct token *token,
2475 unsigned int ent, char *buf, unsigned int size)
2482 RTE_ETH_FOREACH_DEV(p) {
2483 if (buf && i == ent)
2484 return snprintf(buf, size, "%u", p);
2492 /** Complete available rule IDs. */
2494 comp_rule_id(struct context *ctx, const struct token *token,
2495 unsigned int ent, char *buf, unsigned int size)
2498 struct rte_port *port;
2499 struct port_flow *pf;
2502 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
2503 ctx->port == (uint16_t)RTE_PORT_ALL)
2505 port = &ports[ctx->port];
2506 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2507 if (buf && i == ent)
2508 return snprintf(buf, size, "%u", pf->id);
2516 /** Complete queue field for RSS action. */
2518 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
2519 unsigned int ent, char *buf, unsigned int size)
2521 static const char *const str[] = { "", "end", NULL };
2526 for (i = 0; str[i] != NULL; ++i)
2527 if (buf && i == ent)
2528 return snprintf(buf, size, "%s", str[i]);
2534 /** Internal context. */
2535 static struct context cmd_flow_context;
2537 /** Global parser instance (cmdline API). */
2538 cmdline_parse_inst_t cmd_flow;
2540 /** Initialize context. */
2542 cmd_flow_context_init(struct context *ctx)
2544 /* A full memset() is not necessary. */
2554 ctx->objmask = NULL;
2557 /** Parse a token (cmdline API). */
2559 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
2562 struct context *ctx = &cmd_flow_context;
2563 const struct token *token;
2564 const enum index *list;
2569 token = &token_list[ctx->curr];
2570 /* Check argument length. */
2573 for (len = 0; src[len]; ++len)
2574 if (src[len] == '#' || isspace(src[len]))
2578 /* Last argument and EOL detection. */
2579 for (i = len; src[i]; ++i)
2580 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
2582 else if (!isspace(src[i])) {
2587 if (src[i] == '\r' || src[i] == '\n') {
2591 /* Initialize context if necessary. */
2592 if (!ctx->next_num) {
2595 ctx->next[ctx->next_num++] = token->next[0];
2597 /* Process argument through candidates. */
2598 ctx->prev = ctx->curr;
2599 list = ctx->next[ctx->next_num - 1];
2600 for (i = 0; list[i]; ++i) {
2601 const struct token *next = &token_list[list[i]];
2604 ctx->curr = list[i];
2606 tmp = next->call(ctx, next, src, len, result, size);
2608 tmp = parse_default(ctx, next, src, len, result, size);
2609 if (tmp == -1 || tmp != len)
2617 /* Push subsequent tokens if any. */
2619 for (i = 0; token->next[i]; ++i) {
2620 if (ctx->next_num == RTE_DIM(ctx->next))
2622 ctx->next[ctx->next_num++] = token->next[i];
2624 /* Push arguments if any. */
2626 for (i = 0; token->args[i]; ++i) {
2627 if (ctx->args_num == RTE_DIM(ctx->args))
2629 ctx->args[ctx->args_num++] = token->args[i];
2634 /** Return number of completion entries (cmdline API). */
2636 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
2638 struct context *ctx = &cmd_flow_context;
2639 const struct token *token = &token_list[ctx->curr];
2640 const enum index *list;
2644 /* Count number of tokens in current list. */
2646 list = ctx->next[ctx->next_num - 1];
2648 list = token->next[0];
2649 for (i = 0; list[i]; ++i)
2654 * If there is a single token, use its completion callback, otherwise
2655 * return the number of entries.
2657 token = &token_list[list[0]];
2658 if (i == 1 && token->comp) {
2659 /* Save index for cmd_flow_get_help(). */
2660 ctx->prev = list[0];
2661 return token->comp(ctx, token, 0, NULL, 0);
2666 /** Return a completion entry (cmdline API). */
2668 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
2669 char *dst, unsigned int size)
2671 struct context *ctx = &cmd_flow_context;
2672 const struct token *token = &token_list[ctx->curr];
2673 const enum index *list;
2677 /* Count number of tokens in current list. */
2679 list = ctx->next[ctx->next_num - 1];
2681 list = token->next[0];
2682 for (i = 0; list[i]; ++i)
2686 /* If there is a single token, use its completion callback. */
2687 token = &token_list[list[0]];
2688 if (i == 1 && token->comp) {
2689 /* Save index for cmd_flow_get_help(). */
2690 ctx->prev = list[0];
2691 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
2693 /* Otherwise make sure the index is valid and use defaults. */
2696 token = &token_list[list[index]];
2697 snprintf(dst, size, "%s", token->name);
2698 /* Save index for cmd_flow_get_help(). */
2699 ctx->prev = list[index];
2703 /** Populate help strings for current token (cmdline API). */
2705 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
2707 struct context *ctx = &cmd_flow_context;
2708 const struct token *token = &token_list[ctx->prev];
2713 /* Set token type and update global help with details. */
2714 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
2716 cmd_flow.help_str = token->help;
2718 cmd_flow.help_str = token->name;
2722 /** Token definition template (cmdline API). */
2723 static struct cmdline_token_hdr cmd_flow_token_hdr = {
2724 .ops = &(struct cmdline_token_ops){
2725 .parse = cmd_flow_parse,
2726 .complete_get_nb = cmd_flow_complete_get_nb,
2727 .complete_get_elt = cmd_flow_complete_get_elt,
2728 .get_help = cmd_flow_get_help,
2733 /** Populate the next dynamic token. */
2735 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
2736 cmdline_parse_token_hdr_t **hdr_inst)
2738 struct context *ctx = &cmd_flow_context;
2740 /* Always reinitialize context before requesting the first token. */
2741 if (!(hdr_inst - cmd_flow.tokens))
2742 cmd_flow_context_init(ctx);
2743 /* Return NULL when no more tokens are expected. */
2744 if (!ctx->next_num && ctx->curr) {
2748 /* Determine if command should end here. */
2749 if (ctx->eol && ctx->last && ctx->next_num) {
2750 const enum index *list = ctx->next[ctx->next_num - 1];
2753 for (i = 0; list[i]; ++i) {
2760 *hdr = &cmd_flow_token_hdr;
2763 /** Dispatch parsed buffer to function calls. */
2765 cmd_flow_parsed(const struct buffer *in)
2767 switch (in->command) {
2769 port_flow_validate(in->port, &in->args.vc.attr,
2770 in->args.vc.pattern, in->args.vc.actions);
2773 port_flow_create(in->port, &in->args.vc.attr,
2774 in->args.vc.pattern, in->args.vc.actions);
2777 port_flow_destroy(in->port, in->args.destroy.rule_n,
2778 in->args.destroy.rule);
2781 port_flow_flush(in->port);
2784 port_flow_query(in->port, in->args.query.rule,
2785 in->args.query.action);
2788 port_flow_list(in->port, in->args.list.group_n,
2789 in->args.list.group);
2796 /** Token generator and output processing callback (cmdline API). */
2798 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
2801 cmd_flow_tok(arg0, arg2);
2803 cmd_flow_parsed(arg0);
2806 /** Global parser instance (cmdline API). */
2807 cmdline_parse_inst_t cmd_flow = {
2809 .data = NULL, /**< Unused. */
2810 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
2813 }, /**< Tokens are returned by cmd_flow_tok(). */