New upstream version 18.02
[deb_dpdk.git] / lib / librte_ethdev / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
16 #include "rte_flow.h"
17
18 /**
19  * Flow elements description tables.
20  */
21 struct rte_flow_desc_data {
22         const char *name;
23         size_t size;
24 };
25
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
29                 .name = # t, \
30                 .size = s, \
31         }
32
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
35         MK_FLOW_ITEM(END, 0),
36         MK_FLOW_ITEM(VOID, 0),
37         MK_FLOW_ITEM(INVERT, 0),
38         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
39         MK_FLOW_ITEM(PF, 0),
40         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41         MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
42         MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
43         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
44         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
45         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
46         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
47         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
48         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
49         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
50         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
51         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
52         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
53         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
54         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
55         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
56         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
57         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
58         MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
59         MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
60         MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
61         MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
62         MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
63         MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
64         MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
65         MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
66                      sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
67         MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
68                      sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
69 };
70
71 /** Generate flow_action[] entry. */
72 #define MK_FLOW_ACTION(t, s) \
73         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
74                 .name = # t, \
75                 .size = s, \
76         }
77
78 /** Information about known flow actions. */
79 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
80         MK_FLOW_ACTION(END, 0),
81         MK_FLOW_ACTION(VOID, 0),
82         MK_FLOW_ACTION(PASSTHRU, 0),
83         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
84         MK_FLOW_ACTION(FLAG, 0),
85         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
86         MK_FLOW_ACTION(DROP, 0),
87         MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
88         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
89         MK_FLOW_ACTION(PF, 0),
90         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
91         MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
92         MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
93         MK_FLOW_ACTION(OF_SET_MPLS_TTL,
94                        sizeof(struct rte_flow_action_of_set_mpls_ttl)),
95         MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
96         MK_FLOW_ACTION(OF_SET_NW_TTL,
97                        sizeof(struct rte_flow_action_of_set_nw_ttl)),
98         MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
99         MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
100         MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
101         MK_FLOW_ACTION(OF_POP_VLAN, 0),
102         MK_FLOW_ACTION(OF_PUSH_VLAN,
103                        sizeof(struct rte_flow_action_of_push_vlan)),
104         MK_FLOW_ACTION(OF_SET_VLAN_VID,
105                        sizeof(struct rte_flow_action_of_set_vlan_vid)),
106         MK_FLOW_ACTION(OF_SET_VLAN_PCP,
107                        sizeof(struct rte_flow_action_of_set_vlan_pcp)),
108         MK_FLOW_ACTION(OF_POP_MPLS,
109                        sizeof(struct rte_flow_action_of_pop_mpls)),
110         MK_FLOW_ACTION(OF_PUSH_MPLS,
111                        sizeof(struct rte_flow_action_of_push_mpls)),
112 };
113
114 static int
115 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
116 {
117         if (ret == 0)
118                 return 0;
119         if (rte_eth_dev_is_removed(port_id))
120                 return rte_flow_error_set(error, EIO,
121                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
122                                           NULL, rte_strerror(EIO));
123         return ret;
124 }
125
126 /* Get generic flow operations structure from a port. */
127 const struct rte_flow_ops *
128 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
129 {
130         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
131         const struct rte_flow_ops *ops;
132         int code;
133
134         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
135                 code = ENODEV;
136         else if (unlikely(!dev->dev_ops->filter_ctrl ||
137                           dev->dev_ops->filter_ctrl(dev,
138                                                     RTE_ETH_FILTER_GENERIC,
139                                                     RTE_ETH_FILTER_GET,
140                                                     &ops) ||
141                           !ops))
142                 code = ENOSYS;
143         else
144                 return ops;
145         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
146                            NULL, rte_strerror(code));
147         return NULL;
148 }
149
150 /* Check whether a flow rule can be created on a given port. */
151 int
152 rte_flow_validate(uint16_t port_id,
153                   const struct rte_flow_attr *attr,
154                   const struct rte_flow_item pattern[],
155                   const struct rte_flow_action actions[],
156                   struct rte_flow_error *error)
157 {
158         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
159         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
160
161         if (unlikely(!ops))
162                 return -rte_errno;
163         if (likely(!!ops->validate))
164                 return flow_err(port_id, ops->validate(dev, attr, pattern,
165                                                        actions, error), error);
166         return rte_flow_error_set(error, ENOSYS,
167                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
168                                   NULL, rte_strerror(ENOSYS));
169 }
170
171 /* Create a flow rule on a given port. */
172 struct rte_flow *
173 rte_flow_create(uint16_t port_id,
174                 const struct rte_flow_attr *attr,
175                 const struct rte_flow_item pattern[],
176                 const struct rte_flow_action actions[],
177                 struct rte_flow_error *error)
178 {
179         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
180         struct rte_flow *flow;
181         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
182
183         if (unlikely(!ops))
184                 return NULL;
185         if (likely(!!ops->create)) {
186                 flow = ops->create(dev, attr, pattern, actions, error);
187                 if (flow == NULL)
188                         flow_err(port_id, -rte_errno, error);
189                 return flow;
190         }
191         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
192                            NULL, rte_strerror(ENOSYS));
193         return NULL;
194 }
195
196 /* Destroy a flow rule on a given port. */
197 int
198 rte_flow_destroy(uint16_t port_id,
199                  struct rte_flow *flow,
200                  struct rte_flow_error *error)
201 {
202         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
203         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
204
205         if (unlikely(!ops))
206                 return -rte_errno;
207         if (likely(!!ops->destroy))
208                 return flow_err(port_id, ops->destroy(dev, flow, error),
209                                 error);
210         return rte_flow_error_set(error, ENOSYS,
211                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
212                                   NULL, rte_strerror(ENOSYS));
213 }
214
215 /* Destroy all flow rules associated with a port. */
216 int
217 rte_flow_flush(uint16_t port_id,
218                struct rte_flow_error *error)
219 {
220         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
221         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
222
223         if (unlikely(!ops))
224                 return -rte_errno;
225         if (likely(!!ops->flush))
226                 return flow_err(port_id, ops->flush(dev, error), error);
227         return rte_flow_error_set(error, ENOSYS,
228                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
229                                   NULL, rte_strerror(ENOSYS));
230 }
231
232 /* Query an existing flow rule. */
233 int
234 rte_flow_query(uint16_t port_id,
235                struct rte_flow *flow,
236                const struct rte_flow_action *action,
237                void *data,
238                struct rte_flow_error *error)
239 {
240         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
241         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
242
243         if (!ops)
244                 return -rte_errno;
245         if (likely(!!ops->query))
246                 return flow_err(port_id, ops->query(dev, flow, action, data,
247                                                     error), error);
248         return rte_flow_error_set(error, ENOSYS,
249                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
250                                   NULL, rte_strerror(ENOSYS));
251 }
252
253 /* Restrict ingress traffic to the defined flow rules. */
254 int
255 rte_flow_isolate(uint16_t port_id,
256                  int set,
257                  struct rte_flow_error *error)
258 {
259         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
260         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
261
262         if (!ops)
263                 return -rte_errno;
264         if (likely(!!ops->isolate))
265                 return flow_err(port_id, ops->isolate(dev, set, error), error);
266         return rte_flow_error_set(error, ENOSYS,
267                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
268                                   NULL, rte_strerror(ENOSYS));
269 }
270
271 /* Initialize flow error structure. */
272 int
273 rte_flow_error_set(struct rte_flow_error *error,
274                    int code,
275                    enum rte_flow_error_type type,
276                    const void *cause,
277                    const char *message)
278 {
279         if (error) {
280                 *error = (struct rte_flow_error){
281                         .type = type,
282                         .cause = cause,
283                         .message = message,
284                 };
285         }
286         rte_errno = code;
287         return -code;
288 }
289
290 /** Pattern item specification types. */
291 enum item_spec_type {
292         ITEM_SPEC,
293         ITEM_LAST,
294         ITEM_MASK,
295 };
296
297 /** Compute storage space needed by item specification and copy it. */
298 static size_t
299 flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
300                     enum item_spec_type type)
301 {
302         size_t size = 0;
303         const void *data =
304                 type == ITEM_SPEC ? item->spec :
305                 type == ITEM_LAST ? item->last :
306                 type == ITEM_MASK ? item->mask :
307                 NULL;
308
309         if (!item->spec || !data)
310                 goto empty;
311         switch (item->type) {
312                 union {
313                         const struct rte_flow_item_raw *raw;
314                 } spec;
315                 union {
316                         const struct rte_flow_item_raw *raw;
317                 } last;
318                 union {
319                         const struct rte_flow_item_raw *raw;
320                 } mask;
321                 union {
322                         const struct rte_flow_item_raw *raw;
323                 } src;
324                 union {
325                         struct rte_flow_item_raw *raw;
326                 } dst;
327                 size_t off;
328
329         case RTE_FLOW_ITEM_TYPE_RAW:
330                 spec.raw = item->spec;
331                 last.raw = item->last ? item->last : item->spec;
332                 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
333                 src.raw = data;
334                 dst.raw = buf;
335                 off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
336                                      sizeof(*src.raw->pattern));
337                 if (type == ITEM_SPEC ||
338                     (type == ITEM_MASK &&
339                      ((spec.raw->length & mask.raw->length) >=
340                       (last.raw->length & mask.raw->length))))
341                         size = spec.raw->length & mask.raw->length;
342                 else
343                         size = last.raw->length & mask.raw->length;
344                 size = off + size * sizeof(*src.raw->pattern);
345                 if (dst.raw) {
346                         memcpy(dst.raw, src.raw, sizeof(*src.raw));
347                         dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
348                                                   src.raw->pattern,
349                                                   size - off);
350                 }
351                 break;
352         default:
353                 size = rte_flow_desc_item[item->type].size;
354                 if (buf)
355                         memcpy(buf, data, size);
356                 break;
357         }
358 empty:
359         return RTE_ALIGN_CEIL(size, sizeof(double));
360 }
361
362 /** Compute storage space needed by action configuration and copy it. */
363 static size_t
364 flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
365 {
366         size_t size = 0;
367
368         if (!action->conf)
369                 goto empty;
370         switch (action->type) {
371                 union {
372                         const struct rte_flow_action_rss *rss;
373                 } src;
374                 union {
375                         struct rte_flow_action_rss *rss;
376                 } dst;
377                 size_t off;
378
379         case RTE_FLOW_ACTION_TYPE_RSS:
380                 src.rss = action->conf;
381                 dst.rss = buf;
382                 off = 0;
383                 if (dst.rss)
384                         *dst.rss = (struct rte_flow_action_rss){
385                                 .func = src.rss->func,
386                                 .level = src.rss->level,
387                                 .types = src.rss->types,
388                                 .key_len = src.rss->key_len,
389                                 .queue_num = src.rss->queue_num,
390                         };
391                 off += sizeof(*src.rss);
392                 if (src.rss->key_len) {
393                         off = RTE_ALIGN_CEIL(off, sizeof(double));
394                         size = sizeof(*src.rss->key) * src.rss->key_len;
395                         if (dst.rss)
396                                 dst.rss->key = memcpy
397                                         ((void *)((uintptr_t)dst.rss + off),
398                                          src.rss->key, size);
399                         off += size;
400                 }
401                 if (src.rss->queue_num) {
402                         off = RTE_ALIGN_CEIL(off, sizeof(double));
403                         size = sizeof(*src.rss->queue) * src.rss->queue_num;
404                         if (dst.rss)
405                                 dst.rss->queue = memcpy
406                                         ((void *)((uintptr_t)dst.rss + off),
407                                          src.rss->queue, size);
408                         off += size;
409                 }
410                 size = off;
411                 break;
412         default:
413                 size = rte_flow_desc_action[action->type].size;
414                 if (buf)
415                         memcpy(buf, action->conf, size);
416                 break;
417         }
418 empty:
419         return RTE_ALIGN_CEIL(size, sizeof(double));
420 }
421
422 /** Store a full rte_flow description. */
423 size_t
424 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
425               const struct rte_flow_attr *attr,
426               const struct rte_flow_item *items,
427               const struct rte_flow_action *actions)
428 {
429         struct rte_flow_desc *fd = NULL;
430         size_t tmp;
431         size_t off1 = 0;
432         size_t off2 = 0;
433         size_t size = 0;
434
435 store:
436         if (items) {
437                 const struct rte_flow_item *item;
438
439                 item = items;
440                 if (fd)
441                         fd->items = (void *)&fd->data[off1];
442                 do {
443                         struct rte_flow_item *dst = NULL;
444
445                         if ((size_t)item->type >=
446                                 RTE_DIM(rte_flow_desc_item) ||
447                             !rte_flow_desc_item[item->type].name) {
448                                 rte_errno = ENOTSUP;
449                                 return 0;
450                         }
451                         if (fd)
452                                 dst = memcpy(fd->data + off1, item,
453                                              sizeof(*item));
454                         off1 += sizeof(*item);
455                         if (item->spec) {
456                                 if (fd)
457                                         dst->spec = fd->data + off2;
458                                 off2 += flow_item_spec_copy
459                                         (fd ? fd->data + off2 : NULL, item,
460                                          ITEM_SPEC);
461                         }
462                         if (item->last) {
463                                 if (fd)
464                                         dst->last = fd->data + off2;
465                                 off2 += flow_item_spec_copy
466                                         (fd ? fd->data + off2 : NULL, item,
467                                          ITEM_LAST);
468                         }
469                         if (item->mask) {
470                                 if (fd)
471                                         dst->mask = fd->data + off2;
472                                 off2 += flow_item_spec_copy
473                                         (fd ? fd->data + off2 : NULL, item,
474                                          ITEM_MASK);
475                         }
476                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
477                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
478                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
479         }
480         if (actions) {
481                 const struct rte_flow_action *action;
482
483                 action = actions;
484                 if (fd)
485                         fd->actions = (void *)&fd->data[off1];
486                 do {
487                         struct rte_flow_action *dst = NULL;
488
489                         if ((size_t)action->type >=
490                                 RTE_DIM(rte_flow_desc_action) ||
491                             !rte_flow_desc_action[action->type].name) {
492                                 rte_errno = ENOTSUP;
493                                 return 0;
494                         }
495                         if (fd)
496                                 dst = memcpy(fd->data + off1, action,
497                                              sizeof(*action));
498                         off1 += sizeof(*action);
499                         if (action->conf) {
500                                 if (fd)
501                                         dst->conf = fd->data + off2;
502                                 off2 += flow_action_conf_copy
503                                         (fd ? fd->data + off2 : NULL, action);
504                         }
505                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
506                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
507         }
508         if (fd != NULL)
509                 return size;
510         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
511         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
512                              sizeof(double));
513         size = tmp + off1 + off2;
514         if (size > len)
515                 return size;
516         fd = desc;
517         if (fd != NULL) {
518                 *fd = (const struct rte_flow_desc) {
519                         .size = size,
520                         .attr = *attr,
521                 };
522                 tmp -= offsetof(struct rte_flow_desc, data);
523                 off2 = tmp + off1;
524                 off1 = tmp;
525                 goto store;
526         }
527         return 0;
528 }
529
530 /**
531  * Expand RSS flows into several possible flows according to the RSS hash
532  * fields requested and the driver capabilities.
533  */
534 int __rte_experimental
535 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
536                     const struct rte_flow_item *pattern, uint64_t types,
537                     const struct rte_flow_expand_node graph[],
538                     int graph_root_index)
539 {
540         const int elt_n = 8;
541         const struct rte_flow_item *item;
542         const struct rte_flow_expand_node *node = &graph[graph_root_index];
543         const int *next_node;
544         const int *stack[elt_n];
545         int stack_pos = 0;
546         struct rte_flow_item flow_items[elt_n];
547         unsigned int i;
548         size_t lsize;
549         size_t user_pattern_size = 0;
550         void *addr = NULL;
551
552         lsize = offsetof(struct rte_flow_expand_rss, entry) +
553                 elt_n * sizeof(buf->entry[0]);
554         if (lsize <= size) {
555                 buf->entry[0].priority = 0;
556                 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
557                 buf->entries = 0;
558                 addr = buf->entry[0].pattern;
559         }
560         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
561                 const struct rte_flow_expand_node *next = NULL;
562
563                 for (i = 0; node->next && node->next[i]; ++i) {
564                         next = &graph[node->next[i]];
565                         if (next->type == item->type)
566                                 break;
567                 }
568                 if (next)
569                         node = next;
570                 user_pattern_size += sizeof(*item);
571         }
572         user_pattern_size += sizeof(*item); /* Handle END item. */
573         lsize += user_pattern_size;
574         /* Copy the user pattern in the first entry of the buffer. */
575         if (lsize <= size) {
576                 rte_memcpy(addr, pattern, user_pattern_size);
577                 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
578                 buf->entries = 1;
579         }
580         /* Start expanding. */
581         memset(flow_items, 0, sizeof(flow_items));
582         user_pattern_size -= sizeof(*item);
583         next_node = node->next;
584         stack[stack_pos] = next_node;
585         node = next_node ? &graph[*next_node] : NULL;
586         while (node) {
587                 flow_items[stack_pos].type = node->type;
588                 if (node->rss_types & types) {
589                         /*
590                          * compute the number of items to copy from the
591                          * expansion and copy it.
592                          * When the stack_pos is 0, there are 1 element in it,
593                          * plus the addition END item.
594                          */
595                         int elt = stack_pos + 2;
596
597                         flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
598                         lsize += elt * sizeof(*item) + user_pattern_size;
599                         if (lsize <= size) {
600                                 size_t n = elt * sizeof(*item);
601
602                                 buf->entry[buf->entries].priority =
603                                         stack_pos + 1;
604                                 buf->entry[buf->entries].pattern = addr;
605                                 buf->entries++;
606                                 rte_memcpy(addr, buf->entry[0].pattern,
607                                            user_pattern_size);
608                                 addr = (void *)(((uintptr_t)addr) +
609                                                 user_pattern_size);
610                                 rte_memcpy(addr, flow_items, n);
611                                 addr = (void *)(((uintptr_t)addr) + n);
612                         }
613                 }
614                 /* Go deeper. */
615                 if (node->next) {
616                         next_node = node->next;
617                         if (stack_pos++ == elt_n) {
618                                 rte_errno = E2BIG;
619                                 return -rte_errno;
620                         }
621                         stack[stack_pos] = next_node;
622                 } else if (*(next_node + 1)) {
623                         /* Follow up with the next possibility. */
624                         ++next_node;
625                 } else {
626                         /* Move to the next path. */
627                         if (stack_pos)
628                                 next_node = stack[--stack_pos];
629                         next_node++;
630                         stack[stack_pos] = next_node;
631                 }
632                 node = *next_node ? &graph[*next_node] : NULL;
633         };
634         return lsize;
635 }