X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fclassify%2Fvnet_classify.c;h=fb9a59c1f016c6ad3212ab1faaa0c617d96ff4fb;hb=52fa5f21b;hp=4e822226aaa58e29cbb2f2e210e9ae439d14b815;hpb=178cf493d009995b28fdf220f04c98860ff79a9b;p=vpp.git diff --git a/src/vnet/classify/vnet_classify.c b/src/vnet/classify/vnet_classify.c index 4e822226aaa..fb9a59c1f01 100644 --- a/src/vnet/classify/vnet_classify.c +++ b/src/vnet/classify/vnet_classify.c @@ -12,12 +12,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #include #include #include #include /* for API error numbers */ #include /* for L2_INPUT_CLASSIFY_NEXT_xxx */ #include +#include +#include + + + +/** + * @file + * @brief N-tuple classifier + */ vnet_classify_main_t vnet_classify_main; @@ -118,19 +128,18 @@ vnet_classify_register_unformat_opaque_index_fn (unformat_function_t * fn) } vnet_classify_table_t * -vnet_classify_new_table (vnet_classify_main_t * cm, - u8 * mask, u32 nbuckets, u32 memory_size, - u32 skip_n_vectors, u32 match_n_vectors) +vnet_classify_new_table (vnet_classify_main_t *cm, const u8 *mask, + u32 nbuckets, u32 memory_size, u32 skip_n_vectors, + u32 match_n_vectors) { vnet_classify_table_t *t; void *oldheap; nbuckets = 1 << (max_log2 (nbuckets)); - pool_get_aligned (cm->tables, t, CLIB_CACHE_LINE_BYTES); - clib_memset (t, 0, sizeof (*t)); + pool_get_aligned_zero (cm->tables, t, CLIB_CACHE_LINE_BYTES); - vec_validate_aligned (t->mask, match_n_vectors - 1, sizeof (u32x4)); + clib_memset_u32 (t->mask, 0, 4 * ARRAY_LEN (t->mask)); clib_memcpy_fast (t->mask, mask, match_n_vectors * sizeof (u32x4)); t->next_table_index = ~0; @@ -139,20 +148,15 @@ vnet_classify_new_table (vnet_classify_main_t * cm, t->match_n_vectors = match_n_vectors; t->skip_n_vectors = skip_n_vectors; t->entries_per_page = 2; + t->load_mask = pow2_mask (match_n_vectors * 2); -#if USE_DLMALLOC == 0 - t->mheap = mheap_alloc (0 /* use VM */ , memory_size); -#else - t->mheap = create_mspace (memory_size, 1 /* locked */ ); -#endif + t->mheap = clib_mem_create_heap (0, memory_size, 1 /* locked */ , + "classify"); vec_validate_aligned (t->buckets, nbuckets - 1, CLIB_CACHE_LINE_BYTES); oldheap = clib_mem_set_heap (t->mheap); - t->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, - CLIB_CACHE_LINE_BYTES); - t->writer_lock[0] = 0; - + clib_spinlock_init (&t->writer_lock); clib_mem_set_heap (oldheap); return (t); } @@ -172,14 +176,8 @@ vnet_classify_delete_table_index (vnet_classify_main_t * cm, /* Recursively delete the entire chain */ vnet_classify_delete_table_index (cm, t->next_table_index, del_chain); - vec_free (t->mask); vec_free (t->buckets); -#if USE_DLMALLOC == 0 - mheap_free (t->mheap); -#else - destroy_mspace (t->mheap); -#endif - + clib_mem_destroy_heap (t->mheap); pool_put (cm->tables, t); } @@ -190,7 +188,7 @@ vnet_classify_entry_alloc (vnet_classify_table_t * t, u32 log2_pages) u32 required_length; void *oldheap; - ASSERT (t->writer_lock[0]); + CLIB_SPINLOCK_ASSERT_LOCKED (&t->writer_lock); required_length = (sizeof (vnet_classify_entry_t) + (t->match_n_vectors * sizeof (u32x4))) * t->entries_per_page * (1 << log2_pages); @@ -219,7 +217,7 @@ static void vnet_classify_entry_free (vnet_classify_table_t * t, vnet_classify_entry_t * v, u32 log2_pages) { - ASSERT (t->writer_lock[0]); + CLIB_SPINLOCK_ASSERT_LOCKED (&t->writer_lock); ASSERT (vec_len (t->freelists) > log2_pages); @@ -295,7 +293,7 @@ split_and_rehash (vnet_classify_table_t * t, for (i = 0; i < length_in_entries; i++) { - u64 new_hash; + u32 new_hash; v = vnet_classify_entry_at_index (t, old_values, i); @@ -394,6 +392,7 @@ vnet_classify_entry_claim_resource (vnet_classify_entry_t * e) fib_table_lock (e->metadata, FIB_PROTOCOL_IP6, FIB_SOURCE_CLASSIFY); break; case CLASSIFY_ACTION_SET_METADATA: + case CLASSIFY_ACTION_NONE: break; } } @@ -410,13 +409,14 @@ vnet_classify_entry_release_resource (vnet_classify_entry_t * e) fib_table_unlock (e->metadata, FIB_PROTOCOL_IP6, FIB_SOURCE_CLASSIFY); break; case CLASSIFY_ACTION_SET_METADATA: + case CLASSIFY_ACTION_NONE: break; } } -int -vnet_classify_add_del (vnet_classify_table_t * t, - vnet_classify_entry_t * add_v, int is_add) +static int +vnet_classify_add_del (vnet_classify_table_t *t, vnet_classify_entry_t *add_v, + int is_add) { u32 bucket_index; vnet_classify_bucket_t *b, tmp_b; @@ -424,7 +424,7 @@ vnet_classify_add_del (vnet_classify_table_t * t, u32 value_index; int rv = 0; int i; - u64 hash, new_hash; + u32 hash, new_hash; u32 limit; u32 old_log2_pages, new_log2_pages; u32 thread_index = vlib_get_thread_index (); @@ -444,8 +444,7 @@ vnet_classify_add_del (vnet_classify_table_t * t, hash >>= t->log2_nbuckets; - while (clib_atomic_test_and_set (t->writer_lock)) - ; + clib_spinlock_lock (&t->writer_lock); /* First elt in the bucket? */ if (b->offset == 0) @@ -619,7 +618,6 @@ vnet_classify_add_del (vnet_classify_table_t * t, } /* Crap. Try again */ vnet_classify_entry_free (t, save_new_v, new_log2_pages); - new_log2_pages++; if (resplit_once) goto mark_linear; @@ -638,8 +636,7 @@ expand_ok: vnet_classify_entry_free (t, v, old_log2_pages); unlock: - CLIB_MEMORY_BARRIER (); - t->writer_lock[0] = 0; + clib_spinlock_unlock (&t->writer_lock); return rv; } @@ -650,21 +647,21 @@ typedef CLIB_PACKED(struct { }) classify_data_or_mask_t; /* *INDENT-ON* */ -u64 -vnet_classify_hash_packet (vnet_classify_table_t * t, u8 * h) +u32 +vnet_classify_hash_packet (const vnet_classify_table_t *t, u8 *h) { return vnet_classify_hash_packet_inline (t, h); } vnet_classify_entry_t * -vnet_classify_find_entry (vnet_classify_table_t * t, - u8 * h, u64 hash, f64 now) +vnet_classify_find_entry (const vnet_classify_table_t *t, u8 *h, u32 hash, + f64 now) { return vnet_classify_find_entry_inline (t, h, hash, now); } -static u8 * -format_classify_entry (u8 * s, va_list * args) +u8 * +format_classify_entry (u8 *s, va_list *args) { vnet_classify_table_t *t = va_arg (*args, vnet_classify_table_t *); vnet_classify_entry_t *e = va_arg (*args, vnet_classify_entry_t *); @@ -747,17 +744,11 @@ format_classify_table (u8 * s, va_list * args) } int -vnet_classify_add_del_table (vnet_classify_main_t * cm, - u8 * mask, - u32 nbuckets, - u32 memory_size, - u32 skip, - u32 match, - u32 next_table_index, - u32 miss_next_index, - u32 * table_index, - u8 current_data_flag, - i16 current_data_offset, +vnet_classify_add_del_table (vnet_classify_main_t *cm, const u8 *mask, + u32 nbuckets, u32 memory_size, u32 skip, + u32 match, u32 next_table_index, + u32 miss_next_index, u32 *table_index, + u8 current_data_flag, i16 current_data_offset, int is_add, int del_chain) { vnet_classify_table_t *t; @@ -772,6 +763,9 @@ vnet_classify_add_del_table (vnet_classify_main_t * cm, if (nbuckets == 0) return VNET_API_ERROR_INVALID_VALUE; + if (match < 1 || match > 5) + return VNET_API_ERROR_INVALID_VALUE; + t = vnet_classify_new_table (cm, mask, nbuckets, memory_size, skip, match); t->next_table_index = next_table_index; @@ -783,8 +777,10 @@ vnet_classify_add_del_table (vnet_classify_main_t * cm, else /* update */ { vnet_classify_main_t *cm = &vnet_classify_main; - t = pool_elt_at_index (cm->tables, *table_index); + if (pool_is_free_index (cm->tables, *table_index)) + return VNET_API_ERROR_CLASSIFY_TABLE_NOT_FOUND; + t = pool_elt_at_index (cm->tables, *table_index); t->next_table_index = next_table_index; } return 0; @@ -917,7 +913,7 @@ unformat_l4_mask (unformat_input_t * input, va_list * args) else if (unformat (input, "dst_port")) dst_port = 0xFFFF; else - return 0; + break; } if (!src_port && !dst_port) @@ -942,6 +938,10 @@ unformat_ip4_mask (unformat_input_t * input, va_list * args) u8 *mask = 0; u8 found_something = 0; ip4_header_t *ip; + u32 src_prefix_len = 32; + u32 src_prefix_mask = ~0; + u32 dst_prefix_len = 32; + u32 dst_prefix_mask = ~0; #define _(a) u8 a=0; foreach_ip4_proto_field; @@ -956,6 +956,18 @@ unformat_ip4_mask (unformat_input_t * input, va_list * args) version = 1; else if (unformat (input, "hdr_length")) hdr_length = 1; + else if (unformat (input, "src/%d", &src_prefix_len)) + { + src_address = 1; + src_prefix_mask &= ~((1 << (32 - src_prefix_len)) - 1); + src_prefix_mask = clib_host_to_net_u32 (src_prefix_mask); + } + else if (unformat (input, "dst/%d", &dst_prefix_len)) + { + dst_address = 1; + dst_prefix_mask &= ~((1 << (32 - dst_prefix_len)) - 1); + dst_prefix_mask = clib_host_to_net_u32 (dst_prefix_mask); + } else if (unformat (input, "src")) src_address = 1; else if (unformat (input, "dst")) @@ -970,6 +982,7 @@ unformat_ip4_mask (unformat_input_t * input, va_list * args) break; } + found_something = version + hdr_length; #define _(a) found_something += a; foreach_ip4_proto_field; #undef _ @@ -985,6 +998,12 @@ unformat_ip4_mask (unformat_input_t * input, va_list * args) foreach_ip4_proto_field; #undef _ + if (src_address) + ip->src_address.as_u32 = src_prefix_mask; + + if (dst_address) + ip->dst_address.as_u32 = dst_prefix_mask; + ip->ip_version_and_header_length = 0; if (version) @@ -1009,7 +1028,7 @@ unformat_ip6_mask (unformat_input_t * input, va_list * args) { u8 **maskp = va_arg (*args, u8 **); u8 *mask = 0; - u8 found_something = 0; + u8 found_something; ip6_header_t *ip; u32 ip_version_traffic_class_and_flow_label; @@ -1042,6 +1061,10 @@ unformat_ip6_mask (unformat_input_t * input, va_list * args) break; } + /* Account for "special" field names */ + found_something = version + traffic_class + flow_label + + src_address + dst_address + protocol; + #define _(a) found_something += a; foreach_ip6_proto_field; #undef _ @@ -1212,12 +1235,16 @@ unformat_classify_mask (unformat_input_t * input, va_list * args) u8 *l2 = 0; u8 *l3 = 0; u8 *l4 = 0; + u8 add_l2 = 1; int i; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "hex %U", unformat_hex_string, &mask)) ; + else if (unformat (input, "l2 none")) + /* Don't add the l2 header in the mask */ + add_l2 = 0; else if (unformat (input, "l2 %U", unformat_l2_mask, &l2)) ; else if (unformat (input, "l3 %U", unformat_l3_mask, &l3)) @@ -1228,6 +1255,15 @@ unformat_classify_mask (unformat_input_t * input, va_list * args) break; } + if (l2 && !add_l2) + { + vec_free (mask); + vec_free (l2); + vec_free (l3); + vec_free (l4); + return 0; + } + if (l4 && !l3) { vec_free (mask); @@ -1240,15 +1276,20 @@ unformat_classify_mask (unformat_input_t * input, va_list * args) { if (l2 || l3 || l4) { - /* "With a free Ethernet header in every package" */ - if (l2 == 0) - vec_validate (l2, 13); - mask = l2; - if (l3) + if (add_l2) { - vec_append (mask, l3); - vec_free (l3); + /* "With a free Ethernet header in every package" */ + if (l2 == 0) + vec_validate (l2, 13); + mask = l2; + if (l3) + { + vec_append (mask, l3); + vec_free (l3); + } } + else + mask = l3; if (l4) { vec_append (mask, l4); @@ -1281,7 +1322,7 @@ unformat_classify_mask (unformat_input_t * input, va_list * args) if (match == 0) clib_warning ("BUG: match 0"); - _vec_len (mask) = match * sizeof (u32x4); + vec_set_len (mask, match * sizeof (u32x4)); *matchp = match; *maskp = mask; @@ -1580,7 +1621,7 @@ classify_table_command_fn (vlib_main_t * vm, if (!is_add && table_index == ~0) return clib_error_return (0, "table index required for delete"); - rv = vnet_classify_add_del_table (cm, mask, nbuckets, memory_size, + rv = vnet_classify_add_del_table (cm, mask, nbuckets, (u32) memory_size, skip, match, next_table_index, miss_next_index, &table_index, current_data_flag, current_data_offset, @@ -1598,7 +1639,8 @@ classify_table_command_fn (vlib_main_t * vm, } /* *INDENT-OFF* */ -VLIB_CLI_COMMAND (classify_table, static) = { +VLIB_CLI_COMMAND (classify_table, static) = +{ .path = "classify table", .short_help = "classify table [miss-next|l2-miss_next|acl-miss-next ]" @@ -1610,8 +1652,581 @@ VLIB_CLI_COMMAND (classify_table, static) = { }; /* *INDENT-ON* */ -static u8 * -format_vnet_classify_table (u8 * s, va_list * args) +static int +filter_table_mask_compare (void *a1, void *a2) +{ + vnet_classify_main_t *cm = &vnet_classify_main; + u32 *ti1 = a1; + u32 *ti2 = a2; + u32 n1 = 0, n2 = 0; + vnet_classify_table_t *t1, *t2; + u8 *m1, *m2; + int i; + + t1 = pool_elt_at_index (cm->tables, *ti1); + t2 = pool_elt_at_index (cm->tables, *ti2); + + m1 = (u8 *) (t1->mask); + m2 = (u8 *) (t2->mask); + + for (i = 0; i < t1->match_n_vectors * sizeof (u32x4); i++) + { + n1 += count_set_bits (m1[0]); + m1++; + } + + for (i = 0; i < t2->match_n_vectors * sizeof (u32x4); i++) + { + n2 += count_set_bits (m2[0]); + m2++; + } + + /* Reverse sort: descending number of set bits */ + if (n1 < n2) + return 1; + else if (n1 > n2) + return -1; + else + return 0; +} + + +/* + * Reorder the chain of tables starting with table_index such + * that more more-specific masks come before less-specific masks. + * Return the new head of the table chain. + */ +u32 +classify_sort_table_chain (vnet_classify_main_t * cm, u32 table_index) +{ + /* + * Form a vector of all classifier tables in this chain. + */ + u32 *tables = 0; + vnet_classify_table_t *t; + u32 cti; + for (cti = table_index; cti != ~0; cti = t->next_table_index) + { + vec_add1 (tables, cti); + t = pool_elt_at_index (cm->tables, cti); + } + + /* + * Sort filter tables from most-specific mask to least-specific mask. + */ + vec_sort_with_function (tables, filter_table_mask_compare); + + /* + * Relink tables via next_table_index fields. + */ + int i; + for (i = 0; i < vec_len (tables); i++) + { + t = pool_elt_at_index (cm->tables, tables[i]); + + if ((i + 1) < vec_len (tables)) + t->next_table_index = tables[i + 1]; + else + t->next_table_index = ~0; + } + + table_index = tables[0]; + vec_free (tables); + + return table_index; +} + + +u32 +classify_get_trace_chain (void) +{ + u32 table_index; + + table_index = vlib_global_main.trace_filter.classify_table_index; + + return table_index; +} + +/* + * Seting the Trace chain to ~0 is a request to delete and clear it. + */ +void +classify_set_trace_chain (vnet_classify_main_t * cm, u32 table_index) +{ + if (table_index == ~0) + { + u32 old_table_index; + + old_table_index = vlib_global_main.trace_filter.classify_table_index; + vnet_classify_delete_table_index (cm, old_table_index, 1); + } + + vlib_global_main.trace_filter.classify_table_index = table_index; +} + + +u32 +classify_get_pcap_chain (vnet_classify_main_t * cm, u32 sw_if_index) +{ + u32 table_index = ~0; + + if (sw_if_index != ~0 + && (sw_if_index < vec_len (cm->classify_table_index_by_sw_if_index))) + table_index = cm->classify_table_index_by_sw_if_index[sw_if_index]; + + return table_index; +} + +void +classify_set_pcap_chain (vnet_classify_main_t * cm, + u32 sw_if_index, u32 table_index) +{ + vnet_main_t *vnm = vnet_get_main (); + + if (sw_if_index != ~0 && table_index != ~0) + vec_validate_init_empty (cm->classify_table_index_by_sw_if_index, + sw_if_index, ~0); + + if (table_index == ~0) + { + u32 old_table_index = ~0; + + if (sw_if_index < vec_len (cm->classify_table_index_by_sw_if_index)) + old_table_index = + cm->classify_table_index_by_sw_if_index[sw_if_index]; + + vnet_classify_delete_table_index (cm, old_table_index, 1); + } + + /* + * Put the table index where device drivers can find them. + * This table index will be either a valid table or a ~0 to clear it. + */ + if (vec_len (cm->classify_table_index_by_sw_if_index) > sw_if_index) + cm->classify_table_index_by_sw_if_index[sw_if_index] = table_index; + if (sw_if_index > 0) + { + vnet_hw_interface_t *hi; + hi = vnet_get_sup_hw_interface (vnm, sw_if_index); + hi->trace_classify_table_index = table_index; + } +} + + +/* + * Search for a mask-compatible Classify table within the given table chain. + */ +u32 +classify_lookup_chain (u32 table_index, u8 * mask, u32 n_skip, u32 n_match) +{ + vnet_classify_main_t *cm = &vnet_classify_main; + vnet_classify_table_t *t; + u32 cti; + + if (table_index == ~0) + return ~0; + + for (cti = table_index; cti != ~0; cti = t->next_table_index) + { + t = pool_elt_at_index (cm->tables, cti); + + /* Classifier geometry mismatch, can't use this table. */ + if (t->match_n_vectors != n_match || t->skip_n_vectors != n_skip) + continue; + + /* Masks aren't congruent, can't use this table. */ + if (t->match_n_vectors * sizeof (u32x4) != vec_len (mask)) + continue; + + /* Masks aren't bit-for-bit identical, can't use this table. */ + if (memcmp (t->mask, mask, t->match_n_vectors * sizeof (u32x4))) + continue; + + /* Winner... */ + return cti; + } + + return ~0; +} + + +static clib_error_t * +classify_filter_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + u32 nbuckets = 8; + vnet_main_t *vnm = vnet_get_main (); + uword memory_size = (uword) (128 << 10); + u32 skip = ~0; + u32 match = ~0; + u8 *match_vector; + int is_add = 1; + u32 table_index = ~0; + u32 next_table_index = ~0; + u32 miss_next_index = ~0; + u32 current_data_flag = 0; + int current_data_offset = 0; + u32 sw_if_index = ~0; + int pkt_trace = 0; + int pcap = 0; + u8 *mask = 0; + vnet_classify_main_t *cm = &vnet_classify_main; + int rv = 0; + clib_error_t *err = 0; + + unformat_input_t _line_input, *line_input = &_line_input; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "del")) + is_add = 0; + else if (unformat (line_input, "pcap %=", &pcap, 1)) + sw_if_index = 0; + else if (unformat (line_input, "trace")) + pkt_trace = 1; + else if (unformat (line_input, "%U", + unformat_vnet_sw_interface, vnm, &sw_if_index)) + { + if (sw_if_index == 0) + return clib_error_return (0, "Local interface not supported..."); + } + else if (unformat (line_input, "buckets %d", &nbuckets)) + ; + else if (unformat (line_input, "mask %U", unformat_classify_mask, + &mask, &skip, &match)) + ; + else if (unformat (line_input, "memory-size %U", unformat_memory_size, + &memory_size)) + ; + else + break; + } + + if (is_add && mask == 0) + err = clib_error_return (0, "Mask required"); + + else if (is_add && skip == ~0) + err = clib_error_return (0, "skip count required"); + + else if (is_add && match == ~0) + err = clib_error_return (0, "match count required"); + + else if (sw_if_index == ~0 && pkt_trace == 0 && pcap == 0) + err = clib_error_return (0, "Must specify trace, pcap or interface..."); + + else if (pkt_trace && pcap) + err = clib_error_return + (0, "Packet trace and pcap are mutually exclusive..."); + + else if (pkt_trace && sw_if_index != ~0) + err = clib_error_return (0, "Packet trace filter is per-system"); + + if (err) + { + unformat_free (line_input); + return err; + } + + if (!is_add) + { + /* + * Delete an existing PCAP or trace classify table. + */ + if (pkt_trace) + classify_set_trace_chain (cm, ~0); + else + classify_set_pcap_chain (cm, sw_if_index, ~0); + + vec_free (mask); + unformat_free (line_input); + + return 0; + } + + /* + * Find an existing compatible table or else make a new one. + */ + if (pkt_trace) + table_index = classify_get_trace_chain (); + else + table_index = classify_get_pcap_chain (cm, sw_if_index); + + if (table_index != ~0) + { + /* + * look for a compatible table in the existing chain + * - if a compatible table is found, table_index is updated with it + * - if not, table_index is updated to ~0 (aka nil) and because of that + * we are going to create one (see below). We save the original head + * in next_table_index so we can chain it with the newly created + * table + */ + next_table_index = table_index; + table_index = classify_lookup_chain (table_index, mask, skip, match); + } + + /* + * When no table is found, make one. + */ + if (table_index == ~0) + { + u32 new_head_index; + + /* + * Matching table wasn't found, so create a new one at the + * head of the next_table_index chain. + */ + rv = vnet_classify_add_del_table (cm, mask, nbuckets, memory_size, + skip, match, next_table_index, + miss_next_index, &table_index, + current_data_flag, + current_data_offset, 1, 0); + + if (rv != 0) + { + vec_free (mask); + unformat_free (line_input); + return clib_error_return (0, + "vnet_classify_add_del_table returned %d", + rv); + } + + /* + * Reorder tables such that masks are most-specify to least-specific. + */ + new_head_index = classify_sort_table_chain (cm, table_index); + + /* + * Put first classifier table in chain in a place where + * other data structures expect to find and use it. + */ + if (pkt_trace) + classify_set_trace_chain (cm, new_head_index); + else + classify_set_pcap_chain (cm, sw_if_index, new_head_index); + } + + vec_free (mask); + + /* + * Now try to parse a and add a filter-match session. + */ + if (unformat (line_input, "match %U", unformat_classify_match, + cm, &match_vector, table_index) == 0) + return 0; + + /* + * We use hit or miss to determine whether to trace or pcap pkts + * so the session setup is very limited + */ + rv = vnet_classify_add_del_session (cm, table_index, + match_vector, 0 /* hit_next_index */ , + 0 /* opaque_index */ , + 0 /* advance */ , + 0 /* action */ , + 0 /* metadata */ , + 1 /* is_add */ ); + + vec_free (match_vector); + + return 0; +} + +/** Enable / disable packet trace filter */ +int +vlib_enable_disable_pkt_trace_filter (int enable) +{ + if (enable) + { + vlib_global_main.trace_filter.trace_filter_enable = 1; + } + else + { + vlib_global_main.trace_filter.trace_filter_enable = 0; + } + return 0; +} + +/*? + * Construct an arbitrary set of packet classifier tables for use with + * "pcap trace rx | tx," and with the vpp packet tracer + * + * Packets which match a rule in the classifier table chain + * will be traced. The tables are automatically ordered so that + * matches in the most specific table are tried first. + * + * It's reasonably likely that folks will configure a single + * table with one or two matches. As a result, we configure + * 8 hash buckets and 128K of match rule space. One can override + * the defaults by specifying "buckets " and "memory-size " + * as desired. + * + * To build up complex filter chains, repeatedly issue the + * classify filter debug CLI command. Each command must specify the desired + * mask and match values. If a classifier table with a suitable mask + * already exists, the CLI command adds a match rule to the existing table. + * If not, the CLI command add a new table and the indicated mask rule + * + * Here is a terse description of the "mask " syntax: + * + * l2 src dst proto tag1 tag2 ignore-tag1 ignore-tag2 cos1 cos2 dot1q dot1ad + * + * l3 ip4 ip6 + * + * version hdr_length src[/width] dst[/width] + * tos length fragment_id ttl protocol checksum + * + * version traffic-class flow-label src dst proto + * payload_length hop_limit protocol + * + * l4 tcp udp src_port dst_port + * + * src dst # ports + * + * src_port dst_port + * + * To construct matches, add the values to match after the indicated keywords: + * in the match syntax. For example: + * mask l3 ip4 src -> match l3 ip4 src 192.168.1.11 + * + * @cliexpar + * Configuring the classify filter + * + * Configure a simple classify filter, and configure pcap trace rx to use it: + * + * @cliexcmd{classify filter rx mask l3 ip4 src match l3 ip4 src 192.168.1.11} + * pcap trace rx max 100 filter + * + * Configure another fairly simple filter + * + * @cliexcmd{classify filter mask l3 ip4 src dst match l3 ip4 src 192.168.1.10 + * dst 192.168.2.10} + * + * + * Configure a filter for use with the vpp packet tracer: + * @cliexcmd{classify filter trace mask l3 ip4 src dst match l3 ip4 src + * 192.168.1.10 dst 192.168.2.10} + * trace add dpdk-input 100 filter + * + * Clear classifier filters + * + * classify filter [trace | rx | tx | ] del + * + * To display the top-level classifier tables for each use case: + * show classify filter + * + * To inspect the classifier tables, use + * + * show classify table [verbose] + * The verbose form displays all of the match rules, with hit-counters + * @cliexend + ?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (classify_filter, static) = +{ + .path = "classify filter", + .short_help = + "classify filter | pcap mask match \n" + " | trace mask match [del]\n" + " [buckets ] [memory-size ]", + .function = classify_filter_command_fn, +}; +/* *INDENT-ON* */ + +static clib_error_t * +show_classify_filter_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + vnet_classify_main_t *cm = &vnet_classify_main; + vnet_main_t *vnm = vnet_get_main (); + u8 *name = 0; + u8 *s = 0; + u32 table_index; + int verbose = 0; + int i, j, limit; + + (void) unformat (input, "verbose %=", &verbose, 1); + + vlib_cli_output (vm, "%-30s%s", "Filter Used By", " Table(s)"); + vlib_cli_output (vm, "%-30s%s", "--------------", " --------"); + + limit = vec_len (cm->classify_table_index_by_sw_if_index); + + for (i = -1; i < limit; i++) + { + switch (i) + { + case -1: + table_index = vlib_global_main.trace_filter.classify_table_index; + name = format (0, "packet tracer:"); + break; + + case 0: + table_index = cm->classify_table_index_by_sw_if_index[i]; + name = format (0, "pcap rx/tx/drop:"); + break; + + default: + table_index = cm->classify_table_index_by_sw_if_index[i]; + name = format (0, "%U:", format_vnet_sw_if_index_name, vnm, i); + break; + } + + if (verbose) + { + vnet_classify_table_t *t; + j = table_index; + do + { + if (j == ~0) + s = format (s, " none"); + else + { + s = format (s, " %u", j); + t = pool_elt_at_index (cm->tables, j); + j = t->next_table_index; + } + } + while (j != ~0); + + vlib_cli_output (vm, "%-30v table(s)%v", name, s); + vec_reset_length (s); + } + else + { + if (table_index != ~0) + s = format (s, " %u", table_index); + else + s = format (s, " none"); + + vlib_cli_output (vm, "%-30v first table%v", name, s); + vec_reset_length (s); + } + vec_reset_length (name); + } + vec_free (s); + vec_free (name); + return 0; +} + + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_classify_filter, static) = +{ + .path = "show classify filter", + .short_help = "show classify filter [verbose [nn]]", + .function = show_classify_filter_command_fn, +}; +/* *INDENT-ON* */ + +u8 * +format_vnet_classify_table (u8 *s, va_list *args) { vnet_classify_main_t *cm = va_arg (*args, vnet_classify_main_t *); int verbose = va_arg (*args, int); @@ -1620,7 +2235,7 @@ format_vnet_classify_table (u8 * s, va_list * args) if (index == ~0) { - s = format (s, "%10s%10s%10s%10s", "TableIdx", "Sessions", "NextTbl", + s = format (s, "\n%10s%10s%10s%10s", "TableIdx", "Sessions", "NextTbl", "NextNode", verbose ? "Details" : ""); return s; } @@ -1629,7 +2244,8 @@ format_vnet_classify_table (u8 * s, va_list * args) s = format (s, "%10u%10d%10d%10d", index, t->active_elements, t->next_table_index, t->miss_next_index); - s = format (s, "\n Heap: %U", format_mheap, t->mheap, 0 /*verbose */ ); + s = format (s, "\n Heap: %U", format_clib_mem_heap, t->mheap, + 0 /*verbose */ ); s = format (s, "\n nbuckets %d, skip %d match %d flag %d offset %d", t->nbuckets, t->skip_n_vectors, t->match_n_vectors, @@ -1671,20 +2287,22 @@ show_classify_tables_command_fn (vlib_main_t * vm, } /* *INDENT-OFF* */ - pool_foreach (t, cm->tables, - ({ + pool_foreach (t, cm->tables) + { if (match_index == ~0 || (match_index == t - cm->tables)) vec_add1 (indices, t - cm->tables); - })); + } /* *INDENT-ON* */ if (vec_len (indices)) { - vlib_cli_output (vm, "%U", format_vnet_classify_table, cm, verbose, - ~0 /* hdr */ ); for (i = 0; i < vec_len (indices); i++) - vlib_cli_output (vm, "%U", format_vnet_classify_table, cm, - verbose, indices[i]); + { + vlib_cli_output (vm, "%U", format_vnet_classify_table, cm, verbose, + ~0 /* hdr */); + vlib_cli_output (vm, "%U", format_vnet_classify_table, cm, verbose, + indices[i]); + } } else vlib_cli_output (vm, "No classifier tables configured"); @@ -1720,7 +2338,7 @@ unformat_l4_match (unformat_input_t * input, va_list * args) else if (unformat (input, "dst_port %d", &dst_port)) ; else - return 0; + break; } h.src_port = clib_host_to_net_u16 (src_port); @@ -2081,6 +2699,7 @@ unformat_classify_match (unformat_input_t * input, va_list * args) u8 *l2 = 0; u8 *l3 = 0; u8 *l4 = 0; + u8 add_l2 = 1; if (pool_is_free_index (cm->tables, table_index)) return 0; @@ -2091,6 +2710,9 @@ unformat_classify_match (unformat_input_t * input, va_list * args) { if (unformat (input, "hex %U", unformat_hex_string, &match)) ; + else if (unformat (input, "l2 none")) + /* Don't add the l2 header in the mask */ + add_l2 = 0; else if (unformat (input, "l2 %U", unformat_l2_match, &l2)) ; else if (unformat (input, "l3 %U", unformat_l3_match, &l3)) @@ -2101,6 +2723,15 @@ unformat_classify_match (unformat_input_t * input, va_list * args) break; } + if (l2 && !add_l2) + { + vec_free (match); + vec_free (l2); + vec_free (l3); + vec_free (l4); + return 0; + } + if (l4 && !l3) { vec_free (match); @@ -2113,15 +2744,20 @@ unformat_classify_match (unformat_input_t * input, va_list * args) { if (l2 || l3 || l4) { - /* "Win a free Ethernet header in every packet" */ - if (l2 == 0) - vec_validate_aligned (l2, 13, sizeof (u32x4)); - match = l2; - if (l3) + if (add_l2) { - vec_append_aligned (match, l3, sizeof (u32x4)); - vec_free (l3); + /* "Win a free Ethernet header in every packet" */ + if (l2 == 0) + vec_validate_aligned (l2, 13, sizeof (u32x4)); + match = l2; + if (l3) + { + vec_append_aligned (match, l3, sizeof (u32x4)); + vec_free (l3); + } } + else + match = l3; if (l4) { vec_append_aligned (match, l4, sizeof (u32x4)); @@ -2136,8 +2772,8 @@ unformat_classify_match (unformat_input_t * input, va_list * args) sizeof (u32x4)); /* Set size, include skipped vectors */ - _vec_len (match) = - (t->match_n_vectors + t->skip_n_vectors) * sizeof (u32x4); + vec_set_len (match, + (t->match_n_vectors + t->skip_n_vectors) * sizeof (u32x4)); *matchp = match; @@ -2148,13 +2784,10 @@ unformat_classify_match (unformat_input_t * input, va_list * args) } int -vnet_classify_add_del_session (vnet_classify_main_t * cm, - u32 table_index, - u8 * match, - u32 hit_next_index, - u32 opaque_index, - i32 advance, - u8 action, u32 metadata, int is_add) +vnet_classify_add_del_session (vnet_classify_main_t *cm, u32 table_index, + const u8 *match, u16 hit_next_index, + u32 opaque_index, i32 advance, u8 action, + u32 metadata, int is_add) { vnet_classify_table_t *t; vnet_classify_entry_5_t _max_e __attribute__ ((aligned (16))); @@ -2302,7 +2935,7 @@ classify_session_command_fn (vlib_main_t * vm, VLIB_CLI_COMMAND (classify_session_command, static) = { .path = "classify session", .short_help = - "classify session [hit-next|l2-hit-next|" + "classify session [hit-next|l2-input-hit-next|l2-output-hit-next|" "acl-hit-next |policer-hit-next ]" "\n table-index match [hex] [l2] [l3 ip4] [opaque-index ]" "\n [action set-ip4-fib-id|set-ip6-fib-id|set-sr-policy-index ] [del]", @@ -2440,12 +3073,26 @@ vnet_classify_init (vlib_main_t * vm) vnet_classify_register_unformat_acl_next_index_fn (unformat_acl_next_node); + vlib_global_main.trace_filter.classify_table_index = ~0; + return 0; } VLIB_INIT_FUNCTION (vnet_classify_init); -#define TEST_CODE 1 +int +vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func) +{ + return vnet_is_packet_traced_inline (b, classify_table_index, func); +} +VLIB_REGISTER_TRACE_FILTER_FUNCTION (vnet_is_packet_traced_fn, static) = { + .name = "vnet_is_packet_traced", + .description = "classifier based filter", + .priority = 50, + .function = vnet_is_packet_traced +}; + +#define TEST_CODE 0 #if TEST_CODE > 0 @@ -2595,7 +3242,7 @@ test_classify_churn (test_classify_main_t * tm) for (i = 0; i < tm->sessions; i++) { u8 *key_minus_skip; - u64 hash; + u32 hash; vnet_classify_entry_t *e; ep = tm->entries + i;