New upstream version 18.02
[deb_dpdk.git] / lib / librte_ether / rte_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox.
4  */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include "rte_ethdev.h"
15 #include "rte_flow_driver.h"
16 #include "rte_flow.h"
17
18 /**
19  * Flow elements description tables.
20  */
21 struct rte_flow_desc_data {
22         const char *name;
23         size_t size;
24 };
25
26 /** Generate flow_item[] entry. */
27 #define MK_FLOW_ITEM(t, s) \
28         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
29                 .name = # t, \
30                 .size = s, \
31         }
32
33 /** Information about known flow pattern items. */
34 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
35         MK_FLOW_ITEM(END, 0),
36         MK_FLOW_ITEM(VOID, 0),
37         MK_FLOW_ITEM(INVERT, 0),
38         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
39         MK_FLOW_ITEM(PF, 0),
40         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
41         MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
42         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
43         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
44         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
45         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
46         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
47         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
48         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
49         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
50         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
51         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
52         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
53         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
54         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
55         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
56         MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
57 };
58
59 /** Generate flow_action[] entry. */
60 #define MK_FLOW_ACTION(t, s) \
61         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
62                 .name = # t, \
63                 .size = s, \
64         }
65
66 /** Information about known flow actions. */
67 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
68         MK_FLOW_ACTION(END, 0),
69         MK_FLOW_ACTION(VOID, 0),
70         MK_FLOW_ACTION(PASSTHRU, 0),
71         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
72         MK_FLOW_ACTION(FLAG, 0),
73         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
74         MK_FLOW_ACTION(DROP, 0),
75         MK_FLOW_ACTION(COUNT, 0),
76         MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
77         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
78         MK_FLOW_ACTION(PF, 0),
79         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
80 };
81
82 static int
83 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
84 {
85         if (ret == 0)
86                 return 0;
87         if (rte_eth_dev_is_removed(port_id))
88                 return rte_flow_error_set(error, EIO,
89                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
90                                           NULL, rte_strerror(EIO));
91         return ret;
92 }
93
94 /* Get generic flow operations structure from a port. */
95 const struct rte_flow_ops *
96 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
97 {
98         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
99         const struct rte_flow_ops *ops;
100         int code;
101
102         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
103                 code = ENODEV;
104         else if (unlikely(!dev->dev_ops->filter_ctrl ||
105                           dev->dev_ops->filter_ctrl(dev,
106                                                     RTE_ETH_FILTER_GENERIC,
107                                                     RTE_ETH_FILTER_GET,
108                                                     &ops) ||
109                           !ops))
110                 code = ENOSYS;
111         else
112                 return ops;
113         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
114                            NULL, rte_strerror(code));
115         return NULL;
116 }
117
118 /* Check whether a flow rule can be created on a given port. */
119 int
120 rte_flow_validate(uint16_t port_id,
121                   const struct rte_flow_attr *attr,
122                   const struct rte_flow_item pattern[],
123                   const struct rte_flow_action actions[],
124                   struct rte_flow_error *error)
125 {
126         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
127         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
128
129         if (unlikely(!ops))
130                 return -rte_errno;
131         if (likely(!!ops->validate))
132                 return flow_err(port_id, ops->validate(dev, attr, pattern,
133                                                        actions, error), error);
134         return rte_flow_error_set(error, ENOSYS,
135                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
136                                   NULL, rte_strerror(ENOSYS));
137 }
138
139 /* Create a flow rule on a given port. */
140 struct rte_flow *
141 rte_flow_create(uint16_t port_id,
142                 const struct rte_flow_attr *attr,
143                 const struct rte_flow_item pattern[],
144                 const struct rte_flow_action actions[],
145                 struct rte_flow_error *error)
146 {
147         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
148         struct rte_flow *flow;
149         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
150
151         if (unlikely(!ops))
152                 return NULL;
153         if (likely(!!ops->create)) {
154                 flow = ops->create(dev, attr, pattern, actions, error);
155                 if (flow == NULL)
156                         flow_err(port_id, -rte_errno, error);
157                 return flow;
158         }
159         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
160                            NULL, rte_strerror(ENOSYS));
161         return NULL;
162 }
163
164 /* Destroy a flow rule on a given port. */
165 int
166 rte_flow_destroy(uint16_t port_id,
167                  struct rte_flow *flow,
168                  struct rte_flow_error *error)
169 {
170         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
171         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
172
173         if (unlikely(!ops))
174                 return -rte_errno;
175         if (likely(!!ops->destroy))
176                 return flow_err(port_id, ops->destroy(dev, flow, error),
177                                 error);
178         return rte_flow_error_set(error, ENOSYS,
179                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
180                                   NULL, rte_strerror(ENOSYS));
181 }
182
183 /* Destroy all flow rules associated with a port. */
184 int
185 rte_flow_flush(uint16_t port_id,
186                struct rte_flow_error *error)
187 {
188         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
189         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
190
191         if (unlikely(!ops))
192                 return -rte_errno;
193         if (likely(!!ops->flush))
194                 return flow_err(port_id, ops->flush(dev, error), error);
195         return rte_flow_error_set(error, ENOSYS,
196                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
197                                   NULL, rte_strerror(ENOSYS));
198 }
199
200 /* Query an existing flow rule. */
201 int
202 rte_flow_query(uint16_t port_id,
203                struct rte_flow *flow,
204                enum rte_flow_action_type action,
205                void *data,
206                struct rte_flow_error *error)
207 {
208         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
209         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
210
211         if (!ops)
212                 return -rte_errno;
213         if (likely(!!ops->query))
214                 return flow_err(port_id, ops->query(dev, flow, action, data,
215                                                     error), error);
216         return rte_flow_error_set(error, ENOSYS,
217                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
218                                   NULL, rte_strerror(ENOSYS));
219 }
220
221 /* Restrict ingress traffic to the defined flow rules. */
222 int
223 rte_flow_isolate(uint16_t port_id,
224                  int set,
225                  struct rte_flow_error *error)
226 {
227         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
228         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
229
230         if (!ops)
231                 return -rte_errno;
232         if (likely(!!ops->isolate))
233                 return flow_err(port_id, ops->isolate(dev, set, error), error);
234         return rte_flow_error_set(error, ENOSYS,
235                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
236                                   NULL, rte_strerror(ENOSYS));
237 }
238
239 /* Initialize flow error structure. */
240 int
241 rte_flow_error_set(struct rte_flow_error *error,
242                    int code,
243                    enum rte_flow_error_type type,
244                    const void *cause,
245                    const char *message)
246 {
247         if (error) {
248                 *error = (struct rte_flow_error){
249                         .type = type,
250                         .cause = cause,
251                         .message = message,
252                 };
253         }
254         rte_errno = code;
255         return -code;
256 }
257
258 /** Compute storage space needed by item specification. */
259 static void
260 flow_item_spec_size(const struct rte_flow_item *item,
261                     size_t *size, size_t *pad)
262 {
263         if (!item->spec) {
264                 *size = 0;
265                 goto empty;
266         }
267         switch (item->type) {
268                 union {
269                         const struct rte_flow_item_raw *raw;
270                 } spec;
271
272         /* Not a fall-through */
273         case RTE_FLOW_ITEM_TYPE_RAW:
274                 spec.raw = item->spec;
275                 *size = offsetof(struct rte_flow_item_raw, pattern) +
276                         spec.raw->length * sizeof(*spec.raw->pattern);
277                 break;
278         default:
279                 *size = rte_flow_desc_item[item->type].size;
280                 break;
281         }
282 empty:
283         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
284 }
285
286 /** Compute storage space needed by action configuration. */
287 static void
288 flow_action_conf_size(const struct rte_flow_action *action,
289                       size_t *size, size_t *pad)
290 {
291         if (!action->conf) {
292                 *size = 0;
293                 goto empty;
294         }
295         switch (action->type) {
296                 union {
297                         const struct rte_flow_action_rss *rss;
298                 } conf;
299
300         /* Not a fall-through. */
301         case RTE_FLOW_ACTION_TYPE_RSS:
302                 conf.rss = action->conf;
303                 *size = offsetof(struct rte_flow_action_rss, queue) +
304                         conf.rss->num * sizeof(*conf.rss->queue);
305                 break;
306         default:
307                 *size = rte_flow_desc_action[action->type].size;
308                 break;
309         }
310 empty:
311         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
312 }
313
314 /** Store a full rte_flow description. */
315 size_t
316 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
317               const struct rte_flow_attr *attr,
318               const struct rte_flow_item *items,
319               const struct rte_flow_action *actions)
320 {
321         struct rte_flow_desc *fd = NULL;
322         size_t tmp;
323         size_t pad;
324         size_t off1 = 0;
325         size_t off2 = 0;
326         size_t size = 0;
327
328 store:
329         if (items) {
330                 const struct rte_flow_item *item;
331
332                 item = items;
333                 if (fd)
334                         fd->items = (void *)&fd->data[off1];
335                 do {
336                         struct rte_flow_item *dst = NULL;
337
338                         if ((size_t)item->type >=
339                                 RTE_DIM(rte_flow_desc_item) ||
340                             !rte_flow_desc_item[item->type].name) {
341                                 rte_errno = ENOTSUP;
342                                 return 0;
343                         }
344                         if (fd)
345                                 dst = memcpy(fd->data + off1, item,
346                                              sizeof(*item));
347                         off1 += sizeof(*item);
348                         flow_item_spec_size(item, &tmp, &pad);
349                         if (item->spec) {
350                                 if (fd)
351                                         dst->spec = memcpy(fd->data + off2,
352                                                            item->spec, tmp);
353                                 off2 += tmp + pad;
354                         }
355                         if (item->last) {
356                                 if (fd)
357                                         dst->last = memcpy(fd->data + off2,
358                                                            item->last, tmp);
359                                 off2 += tmp + pad;
360                         }
361                         if (item->mask) {
362                                 if (fd)
363                                         dst->mask = memcpy(fd->data + off2,
364                                                            item->mask, tmp);
365                                 off2 += tmp + pad;
366                         }
367                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
368                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
369                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
370         }
371         if (actions) {
372                 const struct rte_flow_action *action;
373
374                 action = actions;
375                 if (fd)
376                         fd->actions = (void *)&fd->data[off1];
377                 do {
378                         struct rte_flow_action *dst = NULL;
379
380                         if ((size_t)action->type >=
381                                 RTE_DIM(rte_flow_desc_action) ||
382                             !rte_flow_desc_action[action->type].name) {
383                                 rte_errno = ENOTSUP;
384                                 return 0;
385                         }
386                         if (fd)
387                                 dst = memcpy(fd->data + off1, action,
388                                              sizeof(*action));
389                         off1 += sizeof(*action);
390                         flow_action_conf_size(action, &tmp, &pad);
391                         if (action->conf) {
392                                 if (fd)
393                                         dst->conf = memcpy(fd->data + off2,
394                                                            action->conf, tmp);
395                                 off2 += tmp + pad;
396                         }
397                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
398                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
399         }
400         if (fd != NULL)
401                 return size;
402         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
403         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
404                              sizeof(double));
405         size = tmp + off1 + off2;
406         if (size > len)
407                 return size;
408         fd = desc;
409         if (fd != NULL) {
410                 *fd = (const struct rte_flow_desc) {
411                         .size = size,
412                         .attr = *attr,
413                 };
414                 tmp -= offsetof(struct rte_flow_desc, data);
415                 off2 = tmp + off1;
416                 off1 = tmp;
417                 goto store;
418         }
419         return 0;
420 }