4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_flow_classify.h>
35 #include "rte_flow_classify_parse.h"
36 #include <rte_flow_driver.h>
37 #include <rte_table_acl.h>
40 int librte_flow_classify_logtype;
42 static struct rte_eth_ntuple_filter ntuple_filter;
43 static uint32_t unique_id = 1;
46 struct rte_flow_classify_table_entry {
47 /* meta-data for classify rule */
52 /* Input parameters */
53 struct rte_table_ops ops;
55 enum rte_flow_classify_table_type type;
57 /* Handle to the low-level table object */
61 #define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
63 struct rte_flow_classifier {
64 /* Input parameters */
65 char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
67 enum rte_flow_classify_table_type type;
70 struct rte_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
73 struct rte_flow_classify_table_entry
74 *entries[RTE_PORT_IN_BURST_SIZE_MAX];
75 } __rte_cache_aligned;
87 struct rte_table_acl_rule_add_params key_add; /* add key */
88 struct rte_table_acl_rule_delete_params key_del; /* delete key */
91 struct classify_rules {
92 enum rte_flow_classify_rule_type type;
94 struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
98 struct rte_flow_classify_rule {
99 uint32_t id; /* unique ID of classify rule */
100 struct rte_flow_action action; /* action when match found */
101 struct classify_rules rules; /* union of rules */
105 int key_found; /* rule key found in table */
106 void *entry; /* pointer to buffer to hold rule meta data */
107 void *entry_ptr; /* handle to the table entry for rule meta data */
111 flow_classify_parse_flow(
112 const struct rte_flow_attr *attr,
113 const struct rte_flow_item pattern[],
114 const struct rte_flow_action actions[],
115 struct rte_flow_error *error)
117 struct rte_flow_item *items;
118 parse_filter_t parse_filter;
119 uint32_t item_num = 0;
123 memset(&ntuple_filter, 0, sizeof(ntuple_filter));
125 /* Get the non-void item number of pattern */
126 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
127 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
133 items = malloc(item_num * sizeof(struct rte_flow_item));
135 rte_flow_error_set(error, ENOMEM,
136 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
137 NULL, "No memory for pattern items.");
141 memset(items, 0, item_num * sizeof(struct rte_flow_item));
142 classify_pattern_skip_void_item(items, pattern);
144 parse_filter = classify_find_parse_filter_func(items);
146 rte_flow_error_set(error, EINVAL,
147 RTE_FLOW_ERROR_TYPE_ITEM,
148 pattern, "Unsupported pattern");
153 ret = parse_filter(attr, items, actions, &ntuple_filter, error);
159 #define uint32_t_to_char(ip, a, b, c, d) do {\
160 *a = (unsigned char)(ip >> 24 & 0xff);\
161 *b = (unsigned char)(ip >> 16 & 0xff);\
162 *c = (unsigned char)(ip >> 8 & 0xff);\
163 *d = (unsigned char)(ip & 0xff);\
167 print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
169 unsigned char a, b, c, d;
171 printf("%s: 0x%02hhx/0x%hhx ", __func__,
172 key->field_value[PROTO_FIELD_IPV4].value.u8,
173 key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
175 uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
177 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
178 key->field_value[SRC_FIELD_IPV4].mask_range.u32);
180 uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
182 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
183 key->field_value[DST_FIELD_IPV4].mask_range.u32);
185 printf("%hu : 0x%x %hu : 0x%x",
186 key->field_value[SRCP_FIELD_IPV4].value.u16,
187 key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
188 key->field_value[DSTP_FIELD_IPV4].value.u16,
189 key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
191 printf(" priority: 0x%x\n", key->priority);
195 print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
197 unsigned char a, b, c, d;
199 printf("%s: 0x%02hhx/0x%hhx ", __func__,
200 key->field_value[PROTO_FIELD_IPV4].value.u8,
201 key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
203 uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
205 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
206 key->field_value[SRC_FIELD_IPV4].mask_range.u32);
208 uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
210 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
211 key->field_value[DST_FIELD_IPV4].mask_range.u32);
213 printf("%hu : 0x%x %hu : 0x%x\n",
214 key->field_value[SRCP_FIELD_IPV4].value.u16,
215 key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
216 key->field_value[DSTP_FIELD_IPV4].value.u16,
217 key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
221 rte_flow_classifier_check_params(struct rte_flow_classifier_params *params)
223 if (params == NULL) {
224 RTE_FLOW_CLASSIFY_LOG(ERR,
225 "%s: Incorrect value for parameter params\n", __func__);
230 if (params->name == NULL) {
231 RTE_FLOW_CLASSIFY_LOG(ERR,
232 "%s: Incorrect value for parameter name\n", __func__);
237 if ((params->socket_id < 0) ||
238 (params->socket_id >= RTE_MAX_NUMA_NODES)) {
239 RTE_FLOW_CLASSIFY_LOG(ERR,
240 "%s: Incorrect value for parameter socket_id\n",
248 struct rte_flow_classifier *
249 rte_flow_classifier_create(struct rte_flow_classifier_params *params)
251 struct rte_flow_classifier *cls;
254 /* Check input parameters */
255 ret = rte_flow_classifier_check_params(params);
257 RTE_FLOW_CLASSIFY_LOG(ERR,
258 "%s: flow classifier params check failed (%d)\n",
263 /* Allocate memory for the flow classifier */
264 cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
265 sizeof(struct rte_flow_classifier),
266 RTE_CACHE_LINE_SIZE, params->socket_id);
269 RTE_FLOW_CLASSIFY_LOG(ERR,
270 "%s: flow classifier memory allocation failed\n",
275 /* Save input parameters */
276 snprintf(cls->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ, "%s",
278 cls->socket_id = params->socket_id;
279 cls->type = params->type;
281 /* Initialize flow classifier internal data structure */
288 rte_flow_classify_table_free(struct rte_table *table)
290 if (table->ops.f_free != NULL)
291 table->ops.f_free(table->h_table);
295 rte_flow_classifier_free(struct rte_flow_classifier *cls)
299 /* Check input parameters */
301 RTE_FLOW_CLASSIFY_LOG(ERR,
302 "%s: rte_flow_classifier parameter is NULL\n",
308 for (i = 0; i < cls->num_tables; i++) {
309 struct rte_table *table = &cls->tables[i];
311 rte_flow_classify_table_free(table);
314 /* Free flow classifier memory */
321 rte_table_check_params(struct rte_flow_classifier *cls,
322 struct rte_flow_classify_table_params *params,
326 RTE_FLOW_CLASSIFY_LOG(ERR,
327 "%s: flow classifier parameter is NULL\n",
331 if (params == NULL) {
332 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params parameter is NULL\n",
336 if (table_id == NULL) {
337 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: table_id parameter is NULL\n",
343 if (params->ops == NULL) {
344 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params->ops is NULL\n",
349 if (params->ops->f_create == NULL) {
350 RTE_FLOW_CLASSIFY_LOG(ERR,
351 "%s: f_create function pointer is NULL\n", __func__);
355 if (params->ops->f_lookup == NULL) {
356 RTE_FLOW_CLASSIFY_LOG(ERR,
357 "%s: f_lookup function pointer is NULL\n", __func__);
361 /* De we have room for one more table? */
362 if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
363 RTE_FLOW_CLASSIFY_LOG(ERR,
364 "%s: Incorrect value for num_tables parameter\n",
373 rte_flow_classify_table_create(struct rte_flow_classifier *cls,
374 struct rte_flow_classify_table_params *params,
377 struct rte_table *table;
379 uint32_t entry_size, id;
382 /* Check input arguments */
383 ret = rte_table_check_params(cls, params, table_id);
387 id = cls->num_tables;
388 table = &cls->tables[id];
390 /* calculate table entry size */
391 entry_size = sizeof(struct rte_flow_classify_table_entry);
393 /* Create the table */
394 h_table = params->ops->f_create(params->arg_create, cls->socket_id,
396 if (h_table == NULL) {
397 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: Table creation failed\n",
402 /* Commit current table to the classifier */
406 /* Save input parameters */
407 memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
409 /* Initialize table internal data structure */
410 table->entry_size = entry_size;
411 table->h_table = h_table;
416 static struct rte_flow_classify_rule *
417 allocate_acl_ipv4_5tuple_rule(void)
419 struct rte_flow_classify_rule *rule;
422 rule = malloc(sizeof(struct rte_flow_classify_rule));
426 memset(rule, 0, sizeof(struct rte_flow_classify_rule));
427 rule->id = unique_id++;
428 rule->rules.type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
430 memcpy(&rule->action, classify_get_flow_action(),
431 sizeof(struct rte_flow_action));
434 rule->u.key.key_add.priority = ntuple_filter.priority;
435 rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
436 ntuple_filter.proto_mask;
437 rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
439 rule->rules.u.ipv4_5tuple.proto = ntuple_filter.proto;
440 rule->rules.u.ipv4_5tuple.proto_mask = ntuple_filter.proto_mask;
442 rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
443 ntuple_filter.src_ip_mask;
444 rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
445 ntuple_filter.src_ip;
446 rule->rules.u.ipv4_5tuple.src_ip_mask = ntuple_filter.src_ip_mask;
447 rule->rules.u.ipv4_5tuple.src_ip = ntuple_filter.src_ip;
449 rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
450 ntuple_filter.dst_ip_mask;
451 rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
452 ntuple_filter.dst_ip;
453 rule->rules.u.ipv4_5tuple.dst_ip_mask = ntuple_filter.dst_ip_mask;
454 rule->rules.u.ipv4_5tuple.dst_ip = ntuple_filter.dst_ip;
456 rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
457 ntuple_filter.src_port_mask;
458 rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
459 ntuple_filter.src_port;
460 rule->rules.u.ipv4_5tuple.src_port_mask = ntuple_filter.src_port_mask;
461 rule->rules.u.ipv4_5tuple.src_port = ntuple_filter.src_port;
463 rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
464 ntuple_filter.dst_port_mask;
465 rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
466 ntuple_filter.dst_port;
467 rule->rules.u.ipv4_5tuple.dst_port_mask = ntuple_filter.dst_port_mask;
468 rule->rules.u.ipv4_5tuple.dst_port = ntuple_filter.dst_port;
470 log_level = rte_log_get_level(librte_flow_classify_logtype);
472 if (log_level == RTE_LOG_DEBUG)
473 print_acl_ipv4_key_add(&rule->u.key.key_add);
475 /* key delete values */
476 memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
477 &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
478 NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
480 if (log_level == RTE_LOG_DEBUG)
481 print_acl_ipv4_key_delete(&rule->u.key.key_del);
486 struct rte_flow_classify_rule *
487 rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
490 const struct rte_flow_attr *attr,
491 const struct rte_flow_item pattern[],
492 const struct rte_flow_action actions[],
493 struct rte_flow_error *error)
495 struct rte_flow_classify_rule *rule;
496 struct rte_flow_classify_table_entry *table_entry;
503 rte_flow_error_set(error, EINVAL,
504 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
505 NULL, "NULL classifier.");
509 if (table_id >= cls->num_tables) {
510 rte_flow_error_set(error, EINVAL,
511 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
512 NULL, "invalid table_id.");
516 if (key_found == NULL) {
517 rte_flow_error_set(error, EINVAL,
518 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
519 NULL, "NULL key_found.");
524 rte_flow_error_set(error, EINVAL,
525 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
526 NULL, "NULL pattern.");
531 rte_flow_error_set(error, EINVAL,
532 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
533 NULL, "NULL action.");
538 rte_flow_error_set(error, EINVAL,
539 RTE_FLOW_ERROR_TYPE_ATTR,
540 NULL, "NULL attribute.");
544 /* parse attr, pattern and actions */
545 ret = flow_classify_parse_flow(attr, pattern, actions, error);
550 case RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL:
551 rule = allocate_acl_ipv4_5tuple_rule();
559 rule->entry = malloc(sizeof(struct rte_flow_classify_table_entry));
565 table_entry = rule->entry;
566 table_entry->rule_id = rule->id;
568 if (cls->tables[table_id].ops.f_add != NULL) {
569 ret = cls->tables[table_id].ops.f_add(
570 cls->tables[table_id].h_table,
571 &rule->u.key.key_add,
580 *key_found = rule->key_found;
586 rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
588 struct rte_flow_classify_rule *rule)
592 if (!cls || !rule || table_id >= cls->num_tables)
595 if (cls->tables[table_id].ops.f_delete != NULL)
596 ret = cls->tables[table_id].ops.f_delete(
597 cls->tables[table_id].h_table,
598 &rule->u.key.key_del,
606 flow_classifier_lookup(struct rte_flow_classifier *cls,
608 struct rte_mbuf **pkts,
609 const uint16_t nb_pkts)
613 uint64_t lookup_hit_mask;
615 pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
616 ret = cls->tables[table_id].ops.f_lookup(
617 cls->tables[table_id].h_table,
618 pkts, pkts_mask, &lookup_hit_mask,
619 (void **)cls->entries);
621 if (!ret && lookup_hit_mask)
622 cls->nb_pkts = nb_pkts;
630 action_apply(struct rte_flow_classifier *cls,
631 struct rte_flow_classify_rule *rule,
632 struct rte_flow_classify_stats *stats)
634 struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
639 switch (rule->action.type) {
640 case RTE_FLOW_ACTION_TYPE_COUNT:
641 for (i = 0; i < cls->nb_pkts; i++) {
642 if (rule->id == cls->entries[i]->rule_id)
648 (struct rte_flow_classify_ipv4_5tuple_stats *)
650 ntuple_stats->counter1 = count;
651 ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
663 rte_flow_classifier_query(struct rte_flow_classifier *cls,
665 struct rte_mbuf **pkts,
666 const uint16_t nb_pkts,
667 struct rte_flow_classify_rule *rule,
668 struct rte_flow_classify_stats *stats)
672 if (!cls || !rule || !stats || !pkts || nb_pkts == 0 ||
673 table_id >= cls->num_tables)
676 ret = flow_classifier_lookup(cls, table_id, pkts, nb_pkts);
678 ret = action_apply(cls, rule, stats);
682 RTE_INIT(librte_flow_classify_init_log);
685 librte_flow_classify_init_log(void)
687 librte_flow_classify_logtype =
688 rte_log_register("librte.flow_classify");
689 if (librte_flow_classify_logtype >= 0)
690 rte_log_set_level(librte_flow_classify_logtype, RTE_LOG_INFO);