2001fbbf4ba35cb86730995fecc64a047a0130dc
[deb_dpdk.git] / lib / librte_ether / rte_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2016 6WIND S.A.
5  *   Copyright 2016 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <errno.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <string.h>
38
39 #include <rte_common.h>
40 #include <rte_errno.h>
41 #include <rte_branch_prediction.h>
42 #include "rte_ethdev.h"
43 #include "rte_flow_driver.h"
44 #include "rte_flow.h"
45
46 /**
47  * Flow elements description tables.
48  */
49 struct rte_flow_desc_data {
50         const char *name;
51         size_t size;
52 };
53
54 /** Generate flow_item[] entry. */
55 #define MK_FLOW_ITEM(t, s) \
56         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
57                 .name = # t, \
58                 .size = s, \
59         }
60
61 /** Information about known flow pattern items. */
62 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
63         MK_FLOW_ITEM(END, 0),
64         MK_FLOW_ITEM(VOID, 0),
65         MK_FLOW_ITEM(INVERT, 0),
66         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
67         MK_FLOW_ITEM(PF, 0),
68         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
69         MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
70         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
71         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
72         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
73         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
74         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
75         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
76         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
77         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
78         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
79         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
80         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
81         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
82         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
83         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
84 };
85
86 /** Generate flow_action[] entry. */
87 #define MK_FLOW_ACTION(t, s) \
88         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
89                 .name = # t, \
90                 .size = s, \
91         }
92
93 /** Information about known flow actions. */
94 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
95         MK_FLOW_ACTION(END, 0),
96         MK_FLOW_ACTION(VOID, 0),
97         MK_FLOW_ACTION(PASSTHRU, 0),
98         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
99         MK_FLOW_ACTION(FLAG, 0),
100         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
101         MK_FLOW_ACTION(DROP, 0),
102         MK_FLOW_ACTION(COUNT, 0),
103         MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
104         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
105         MK_FLOW_ACTION(PF, 0),
106         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
107 };
108
109 /* Get generic flow operations structure from a port. */
110 const struct rte_flow_ops *
111 rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error)
112 {
113         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
114         const struct rte_flow_ops *ops;
115         int code;
116
117         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
118                 code = ENODEV;
119         else if (unlikely(!dev->dev_ops->filter_ctrl ||
120                           dev->dev_ops->filter_ctrl(dev,
121                                                     RTE_ETH_FILTER_GENERIC,
122                                                     RTE_ETH_FILTER_GET,
123                                                     &ops) ||
124                           !ops))
125                 code = ENOSYS;
126         else
127                 return ops;
128         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
129                            NULL, rte_strerror(code));
130         return NULL;
131 }
132
133 /* Check whether a flow rule can be created on a given port. */
134 int
135 rte_flow_validate(uint8_t port_id,
136                   const struct rte_flow_attr *attr,
137                   const struct rte_flow_item pattern[],
138                   const struct rte_flow_action actions[],
139                   struct rte_flow_error *error)
140 {
141         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
142         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
143
144         if (unlikely(!ops))
145                 return -rte_errno;
146         if (likely(!!ops->validate))
147                 return ops->validate(dev, attr, pattern, actions, error);
148         return -rte_flow_error_set(error, ENOSYS,
149                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
150                                    NULL, rte_strerror(ENOSYS));
151 }
152
153 /* Create a flow rule on a given port. */
154 struct rte_flow *
155 rte_flow_create(uint8_t port_id,
156                 const struct rte_flow_attr *attr,
157                 const struct rte_flow_item pattern[],
158                 const struct rte_flow_action actions[],
159                 struct rte_flow_error *error)
160 {
161         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
162         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
163
164         if (unlikely(!ops))
165                 return NULL;
166         if (likely(!!ops->create))
167                 return ops->create(dev, attr, pattern, actions, error);
168         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
169                            NULL, rte_strerror(ENOSYS));
170         return NULL;
171 }
172
173 /* Destroy a flow rule on a given port. */
174 int
175 rte_flow_destroy(uint8_t port_id,
176                  struct rte_flow *flow,
177                  struct rte_flow_error *error)
178 {
179         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
180         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
181
182         if (unlikely(!ops))
183                 return -rte_errno;
184         if (likely(!!ops->destroy))
185                 return ops->destroy(dev, flow, error);
186         return -rte_flow_error_set(error, ENOSYS,
187                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
188                                    NULL, rte_strerror(ENOSYS));
189 }
190
191 /* Destroy all flow rules associated with a port. */
192 int
193 rte_flow_flush(uint8_t port_id,
194                struct rte_flow_error *error)
195 {
196         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
197         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
198
199         if (unlikely(!ops))
200                 return -rte_errno;
201         if (likely(!!ops->flush))
202                 return ops->flush(dev, error);
203         return -rte_flow_error_set(error, ENOSYS,
204                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
205                                    NULL, rte_strerror(ENOSYS));
206 }
207
208 /* Query an existing flow rule. */
209 int
210 rte_flow_query(uint8_t port_id,
211                struct rte_flow *flow,
212                enum rte_flow_action_type action,
213                void *data,
214                struct rte_flow_error *error)
215 {
216         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
217         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
218
219         if (!ops)
220                 return -rte_errno;
221         if (likely(!!ops->query))
222                 return ops->query(dev, flow, action, data, error);
223         return -rte_flow_error_set(error, ENOSYS,
224                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
225                                    NULL, rte_strerror(ENOSYS));
226 }
227
228 /* Restrict ingress traffic to the defined flow rules. */
229 int
230 rte_flow_isolate(uint8_t port_id,
231                  int set,
232                  struct rte_flow_error *error)
233 {
234         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
235         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
236
237         if (!ops)
238                 return -rte_errno;
239         if (likely(!!ops->isolate))
240                 return ops->isolate(dev, set, error);
241         return -rte_flow_error_set(error, ENOSYS,
242                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
243                                    NULL, rte_strerror(ENOSYS));
244 }
245
246 /** Compute storage space needed by item specification. */
247 static void
248 flow_item_spec_size(const struct rte_flow_item *item,
249                     size_t *size, size_t *pad)
250 {
251         if (!item->spec) {
252                 *size = 0;
253                 goto empty;
254         }
255         switch (item->type) {
256                 union {
257                         const struct rte_flow_item_raw *raw;
258                 } spec;
259
260         /* Not a fall-through */
261         case RTE_FLOW_ITEM_TYPE_RAW:
262                 spec.raw = item->spec;
263                 *size = offsetof(struct rte_flow_item_raw, pattern) +
264                         spec.raw->length * sizeof(*spec.raw->pattern);
265                 break;
266         default:
267                 *size = rte_flow_desc_item[item->type].size;
268                 break;
269         }
270 empty:
271         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
272 }
273
274 /** Compute storage space needed by action configuration. */
275 static void
276 flow_action_conf_size(const struct rte_flow_action *action,
277                       size_t *size, size_t *pad)
278 {
279         if (!action->conf) {
280                 *size = 0;
281                 goto empty;
282         }
283         switch (action->type) {
284                 union {
285                         const struct rte_flow_action_rss *rss;
286                 } conf;
287
288         /* Not a fall-through. */
289         case RTE_FLOW_ACTION_TYPE_RSS:
290                 conf.rss = action->conf;
291                 *size = offsetof(struct rte_flow_action_rss, queue) +
292                         conf.rss->num * sizeof(*conf.rss->queue);
293                 break;
294         default:
295                 *size = rte_flow_desc_action[action->type].size;
296                 break;
297         }
298 empty:
299         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
300 }
301
302 /** Store a full rte_flow description. */
303 size_t
304 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
305               const struct rte_flow_attr *attr,
306               const struct rte_flow_item *items,
307               const struct rte_flow_action *actions)
308 {
309         struct rte_flow_desc *fd = NULL;
310         size_t tmp;
311         size_t pad;
312         size_t off1 = 0;
313         size_t off2 = 0;
314         size_t size = 0;
315
316 store:
317         if (items) {
318                 const struct rte_flow_item *item;
319
320                 item = items;
321                 if (fd)
322                         fd->items = (void *)&fd->data[off1];
323                 do {
324                         struct rte_flow_item *dst = NULL;
325
326                         if ((size_t)item->type >=
327                                 RTE_DIM(rte_flow_desc_item) ||
328                             !rte_flow_desc_item[item->type].name) {
329                                 rte_errno = ENOTSUP;
330                                 return 0;
331                         }
332                         if (fd)
333                                 dst = memcpy(fd->data + off1, item,
334                                              sizeof(*item));
335                         off1 += sizeof(*item);
336                         flow_item_spec_size(item, &tmp, &pad);
337                         if (item->spec) {
338                                 if (fd)
339                                         dst->spec = memcpy(fd->data + off2,
340                                                            item->spec, tmp);
341                                 off2 += tmp + pad;
342                         }
343                         if (item->last) {
344                                 if (fd)
345                                         dst->last = memcpy(fd->data + off2,
346                                                            item->last, tmp);
347                                 off2 += tmp + pad;
348                         }
349                         if (item->mask) {
350                                 if (fd)
351                                         dst->mask = memcpy(fd->data + off2,
352                                                            item->mask, tmp);
353                                 off2 += tmp + pad;
354                         }
355                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
356                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
357                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
358         }
359         if (actions) {
360                 const struct rte_flow_action *action;
361
362                 action = actions;
363                 if (fd)
364                         fd->actions = (void *)&fd->data[off1];
365                 do {
366                         struct rte_flow_action *dst = NULL;
367
368                         if ((size_t)action->type >=
369                                 RTE_DIM(rte_flow_desc_action) ||
370                             !rte_flow_desc_action[action->type].name) {
371                                 rte_errno = ENOTSUP;
372                                 return 0;
373                         }
374                         if (fd)
375                                 dst = memcpy(fd->data + off1, action,
376                                              sizeof(*action));
377                         off1 += sizeof(*action);
378                         flow_action_conf_size(action, &tmp, &pad);
379                         if (action->conf) {
380                                 if (fd)
381                                         dst->conf = memcpy(fd->data + off2,
382                                                            action->conf, tmp);
383                                 off2 += tmp + pad;
384                         }
385                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
386                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
387         }
388         if (fd != NULL)
389                 return size;
390         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
391         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
392                              sizeof(double));
393         size = tmp + off1 + off2;
394         if (size > len)
395                 return size;
396         fd = desc;
397         if (fd != NULL) {
398                 *fd = (const struct rte_flow_desc) {
399                         .size = size,
400                         .attr = *attr,
401                 };
402                 tmp -= offsetof(struct rte_flow_desc, data);
403                 off2 = tmp + off1;
404                 off1 = tmp;
405                 goto store;
406         }
407         return 0;
408 }