New upstream version 17.11-rc3
[deb_dpdk.git] / lib / librte_ether / rte_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2016 6WIND S.A.
5  *   Copyright 2016 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <errno.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <string.h>
38
39 #include <rte_common.h>
40 #include <rte_errno.h>
41 #include <rte_branch_prediction.h>
42 #include "rte_ethdev.h"
43 #include "rte_flow_driver.h"
44 #include "rte_flow.h"
45
46 /**
47  * Flow elements description tables.
48  */
49 struct rte_flow_desc_data {
50         const char *name;
51         size_t size;
52 };
53
54 /** Generate flow_item[] entry. */
55 #define MK_FLOW_ITEM(t, s) \
56         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
57                 .name = # t, \
58                 .size = s, \
59         }
60
61 /** Information about known flow pattern items. */
62 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
63         MK_FLOW_ITEM(END, 0),
64         MK_FLOW_ITEM(VOID, 0),
65         MK_FLOW_ITEM(INVERT, 0),
66         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
67         MK_FLOW_ITEM(PF, 0),
68         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
69         MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
70         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
71         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
72         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
73         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
74         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
75         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
76         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
77         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
78         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
79         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
80         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
81         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
82         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
83         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
84 };
85
86 /** Generate flow_action[] entry. */
87 #define MK_FLOW_ACTION(t, s) \
88         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
89                 .name = # t, \
90                 .size = s, \
91         }
92
93 /** Information about known flow actions. */
94 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
95         MK_FLOW_ACTION(END, 0),
96         MK_FLOW_ACTION(VOID, 0),
97         MK_FLOW_ACTION(PASSTHRU, 0),
98         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
99         MK_FLOW_ACTION(FLAG, 0),
100         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
101         MK_FLOW_ACTION(DROP, 0),
102         MK_FLOW_ACTION(COUNT, 0),
103         MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
104         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
105         MK_FLOW_ACTION(PF, 0),
106         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
107 };
108
109 /* Get generic flow operations structure from a port. */
110 const struct rte_flow_ops *
111 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
112 {
113         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
114         const struct rte_flow_ops *ops;
115         int code;
116
117         if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
118                 code = ENODEV;
119         else if (unlikely(!dev->dev_ops->filter_ctrl ||
120                           dev->dev_ops->filter_ctrl(dev,
121                                                     RTE_ETH_FILTER_GENERIC,
122                                                     RTE_ETH_FILTER_GET,
123                                                     &ops) ||
124                           !ops))
125                 code = ENOSYS;
126         else
127                 return ops;
128         rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
129                            NULL, rte_strerror(code));
130         return NULL;
131 }
132
133 /* Check whether a flow rule can be created on a given port. */
134 int
135 rte_flow_validate(uint16_t port_id,
136                   const struct rte_flow_attr *attr,
137                   const struct rte_flow_item pattern[],
138                   const struct rte_flow_action actions[],
139                   struct rte_flow_error *error)
140 {
141         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
142         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
143
144         if (unlikely(!ops))
145                 return -rte_errno;
146         if (likely(!!ops->validate))
147                 return ops->validate(dev, attr, pattern, actions, error);
148         return rte_flow_error_set(error, ENOSYS,
149                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
150                                   NULL, rte_strerror(ENOSYS));
151 }
152
153 /* Create a flow rule on a given port. */
154 struct rte_flow *
155 rte_flow_create(uint16_t port_id,
156                 const struct rte_flow_attr *attr,
157                 const struct rte_flow_item pattern[],
158                 const struct rte_flow_action actions[],
159                 struct rte_flow_error *error)
160 {
161         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
162         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
163
164         if (unlikely(!ops))
165                 return NULL;
166         if (likely(!!ops->create))
167                 return ops->create(dev, attr, pattern, actions, error);
168         rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
169                            NULL, rte_strerror(ENOSYS));
170         return NULL;
171 }
172
173 /* Destroy a flow rule on a given port. */
174 int
175 rte_flow_destroy(uint16_t port_id,
176                  struct rte_flow *flow,
177                  struct rte_flow_error *error)
178 {
179         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
180         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
181
182         if (unlikely(!ops))
183                 return -rte_errno;
184         if (likely(!!ops->destroy))
185                 return ops->destroy(dev, flow, error);
186         return rte_flow_error_set(error, ENOSYS,
187                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
188                                   NULL, rte_strerror(ENOSYS));
189 }
190
191 /* Destroy all flow rules associated with a port. */
192 int
193 rte_flow_flush(uint16_t port_id,
194                struct rte_flow_error *error)
195 {
196         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
197         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
198
199         if (unlikely(!ops))
200                 return -rte_errno;
201         if (likely(!!ops->flush))
202                 return ops->flush(dev, error);
203         return rte_flow_error_set(error, ENOSYS,
204                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
205                                   NULL, rte_strerror(ENOSYS));
206 }
207
208 /* Query an existing flow rule. */
209 int
210 rte_flow_query(uint16_t port_id,
211                struct rte_flow *flow,
212                enum rte_flow_action_type action,
213                void *data,
214                struct rte_flow_error *error)
215 {
216         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
217         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
218
219         if (!ops)
220                 return -rte_errno;
221         if (likely(!!ops->query))
222                 return ops->query(dev, flow, action, data, error);
223         return rte_flow_error_set(error, ENOSYS,
224                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
225                                   NULL, rte_strerror(ENOSYS));
226 }
227
228 /* Restrict ingress traffic to the defined flow rules. */
229 int
230 rte_flow_isolate(uint16_t port_id,
231                  int set,
232                  struct rte_flow_error *error)
233 {
234         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
235         const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
236
237         if (!ops)
238                 return -rte_errno;
239         if (likely(!!ops->isolate))
240                 return ops->isolate(dev, set, error);
241         return rte_flow_error_set(error, ENOSYS,
242                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
243                                   NULL, rte_strerror(ENOSYS));
244 }
245
246 /* Initialize flow error structure. */
247 int
248 rte_flow_error_set(struct rte_flow_error *error,
249                    int code,
250                    enum rte_flow_error_type type,
251                    const void *cause,
252                    const char *message)
253 {
254         if (error) {
255                 *error = (struct rte_flow_error){
256                         .type = type,
257                         .cause = cause,
258                         .message = message,
259                 };
260         }
261         rte_errno = code;
262         return -code;
263 }
264
265 /** Compute storage space needed by item specification. */
266 static void
267 flow_item_spec_size(const struct rte_flow_item *item,
268                     size_t *size, size_t *pad)
269 {
270         if (!item->spec) {
271                 *size = 0;
272                 goto empty;
273         }
274         switch (item->type) {
275                 union {
276                         const struct rte_flow_item_raw *raw;
277                 } spec;
278
279         /* Not a fall-through */
280         case RTE_FLOW_ITEM_TYPE_RAW:
281                 spec.raw = item->spec;
282                 *size = offsetof(struct rte_flow_item_raw, pattern) +
283                         spec.raw->length * sizeof(*spec.raw->pattern);
284                 break;
285         default:
286                 *size = rte_flow_desc_item[item->type].size;
287                 break;
288         }
289 empty:
290         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
291 }
292
293 /** Compute storage space needed by action configuration. */
294 static void
295 flow_action_conf_size(const struct rte_flow_action *action,
296                       size_t *size, size_t *pad)
297 {
298         if (!action->conf) {
299                 *size = 0;
300                 goto empty;
301         }
302         switch (action->type) {
303                 union {
304                         const struct rte_flow_action_rss *rss;
305                 } conf;
306
307         /* Not a fall-through. */
308         case RTE_FLOW_ACTION_TYPE_RSS:
309                 conf.rss = action->conf;
310                 *size = offsetof(struct rte_flow_action_rss, queue) +
311                         conf.rss->num * sizeof(*conf.rss->queue);
312                 break;
313         default:
314                 *size = rte_flow_desc_action[action->type].size;
315                 break;
316         }
317 empty:
318         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
319 }
320
321 /** Store a full rte_flow description. */
322 size_t
323 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
324               const struct rte_flow_attr *attr,
325               const struct rte_flow_item *items,
326               const struct rte_flow_action *actions)
327 {
328         struct rte_flow_desc *fd = NULL;
329         size_t tmp;
330         size_t pad;
331         size_t off1 = 0;
332         size_t off2 = 0;
333         size_t size = 0;
334
335 store:
336         if (items) {
337                 const struct rte_flow_item *item;
338
339                 item = items;
340                 if (fd)
341                         fd->items = (void *)&fd->data[off1];
342                 do {
343                         struct rte_flow_item *dst = NULL;
344
345                         if ((size_t)item->type >=
346                                 RTE_DIM(rte_flow_desc_item) ||
347                             !rte_flow_desc_item[item->type].name) {
348                                 rte_errno = ENOTSUP;
349                                 return 0;
350                         }
351                         if (fd)
352                                 dst = memcpy(fd->data + off1, item,
353                                              sizeof(*item));
354                         off1 += sizeof(*item);
355                         flow_item_spec_size(item, &tmp, &pad);
356                         if (item->spec) {
357                                 if (fd)
358                                         dst->spec = memcpy(fd->data + off2,
359                                                            item->spec, tmp);
360                                 off2 += tmp + pad;
361                         }
362                         if (item->last) {
363                                 if (fd)
364                                         dst->last = memcpy(fd->data + off2,
365                                                            item->last, tmp);
366                                 off2 += tmp + pad;
367                         }
368                         if (item->mask) {
369                                 if (fd)
370                                         dst->mask = memcpy(fd->data + off2,
371                                                            item->mask, tmp);
372                                 off2 += tmp + pad;
373                         }
374                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
375                 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
376                 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
377         }
378         if (actions) {
379                 const struct rte_flow_action *action;
380
381                 action = actions;
382                 if (fd)
383                         fd->actions = (void *)&fd->data[off1];
384                 do {
385                         struct rte_flow_action *dst = NULL;
386
387                         if ((size_t)action->type >=
388                                 RTE_DIM(rte_flow_desc_action) ||
389                             !rte_flow_desc_action[action->type].name) {
390                                 rte_errno = ENOTSUP;
391                                 return 0;
392                         }
393                         if (fd)
394                                 dst = memcpy(fd->data + off1, action,
395                                              sizeof(*action));
396                         off1 += sizeof(*action);
397                         flow_action_conf_size(action, &tmp, &pad);
398                         if (action->conf) {
399                                 if (fd)
400                                         dst->conf = memcpy(fd->data + off2,
401                                                            action->conf, tmp);
402                                 off2 += tmp + pad;
403                         }
404                         off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
405                 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
406         }
407         if (fd != NULL)
408                 return size;
409         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
410         tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
411                              sizeof(double));
412         size = tmp + off1 + off2;
413         if (size > len)
414                 return size;
415         fd = desc;
416         if (fd != NULL) {
417                 *fd = (const struct rte_flow_desc) {
418                         .size = size,
419                         .attr = *attr,
420                 };
421                 tmp -= offsetof(struct rte_flow_desc, data);
422                 off2 = tmp + off1;
423                 off1 = tmp;
424                 goto store;
425         }
426         return 0;
427 }