X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fipsec.c;h=3ea2e4d62df304a8fe3dbbc1bf780d09b0b2c940;hb=d7c030d6065962b433416c679f3b568b096b49e2;hp=da1a3fe8727144cf20b8af5e800e0e4090488201;hpb=e8915fc707a03260c05624425f9548d796c089fb;p=vpp.git diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c index da1a3fe8727..3ea2e4d62df 100644 --- a/src/vnet/ipsec/ipsec.c +++ b/src/vnet/ipsec/ipsec.c @@ -19,26 +19,108 @@ #include #include #include -#include +#include #include #include #include +#include +#include +#include + +/* Flow cache is sized for 1 million flows with a load factor of .25. + */ +#define IPSEC4_OUT_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22) + +/* Flow cache is sized for 1 million flows with a load factor of .25. + */ +#define IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22) ipsec_main_t ipsec_main; +esp_async_post_next_t esp_encrypt_async_next; +esp_async_post_next_t esp_decrypt_async_next; + +clib_error_t * +ipsec_register_next_header (vlib_main_t *vm, u8 next_header, + const char *next_node) +{ + ipsec_main_t *im = &ipsec_main; + const vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) next_node); + /* -post nodes (eg. esp4-decrypt-post) are siblings of non-post nodes (eg. + * esp4-decrypt) and will therefore have the same next index */ + const vlib_node_t *esp_decrypt_nodes[] = { + vlib_get_node (vm, im->esp4_decrypt_node_index), + vlib_get_node (vm, im->esp6_decrypt_node_index), + vlib_get_node (vm, im->esp4_decrypt_tun_node_index), + vlib_get_node (vm, im->esp6_decrypt_tun_node_index), + }; + uword slot, max; + int i; + + /* looks for a next_index value that we can use for all esp decrypt nodes to + * avoid maintaining different next index arrays... */ + + slot = vlib_node_get_next (vm, esp_decrypt_nodes[0]->index, node->index); + max = vec_len (esp_decrypt_nodes[0]->next_nodes); + for (i = 1; i < ARRAY_LEN (esp_decrypt_nodes); i++) + { + /* if next node already exists, check it shares the same next_index */ + if (slot != + vlib_node_get_next (vm, esp_decrypt_nodes[i]->index, node->index)) + return clib_error_return ( + 0, "next node already exists with different next index"); + /* compute a suitable slot from the max of all nodes next index */ + max = clib_max (max, vec_len (esp_decrypt_nodes[i]->next_nodes)); + } + + if (~0 == slot) + { + /* next node not there yet, add it using the computed max */ + slot = max; + for (i = 0; i < ARRAY_LEN (esp_decrypt_nodes); i++) + vlib_node_add_next_with_slot (vm, esp_decrypt_nodes[i]->index, + node->index, slot); + } + + im->next_header_registrations[next_header] = slot; + return 0; +} + static clib_error_t * ipsec_check_ah_support (ipsec_sa_t * sa) { + ipsec_main_t *im = &ipsec_main; + if (sa->integ_alg == IPSEC_INTEG_ALG_NONE) return clib_error_return (0, "unsupported none integ-alg"); + + if (!vnet_crypto_is_set_handler (im->integ_algs[sa->integ_alg].alg)) + return clib_error_return (0, "No crypto engine support for %U", + format_ipsec_integ_alg, sa->integ_alg); + return 0; } static clib_error_t * ipsec_check_esp_support (ipsec_sa_t * sa) { - return 0; + ipsec_main_t *im = &ipsec_main; + + if (IPSEC_INTEG_ALG_NONE != sa->integ_alg) + { + if (!vnet_crypto_is_set_handler (im->integ_algs[sa->integ_alg].alg)) + return clib_error_return (0, "No crypto engine support for %U", + format_ipsec_integ_alg, sa->integ_alg); + } + if (IPSEC_CRYPTO_ALG_NONE != sa->crypto_alg) + { + if (!vnet_crypto_is_set_handler (im->crypto_algs[sa->crypto_alg].alg)) + return clib_error_return (0, "No crypto engine support for %U", + format_ipsec_crypto_alg, sa->crypto_alg); + } + + return (0); } clib_error_t * @@ -100,14 +182,60 @@ ipsec_add_node (vlib_main_t * vm, const char *node_name, *out_next_index = vlib_node_add_next (vm, prev_node->index, node->index); } -static void -ipsec_add_feature (const char *arc_name, - const char *node_name, u32 * out_feature_index) +static inline uword +ipsec_udp_registration_key (u16 port, u8 is_ip4) +{ + uword key = (is_ip4) ? AF_IP4 : AF_IP6; + + key |= (uword) (port << 16); + return key; +} + +void +ipsec_unregister_udp_port (u16 port, u8 is_ip4) +{ + ipsec_main_t *im = &ipsec_main; + u32 n_regs; + uword *p, key; + + key = ipsec_udp_registration_key (port, is_ip4); + p = hash_get (im->udp_port_registrations, key); + + ASSERT (p); + + n_regs = p[0]; + + if (0 == --n_regs) + { + udp_unregister_dst_port (vlib_get_main (), port, is_ip4); + hash_unset (im->udp_port_registrations, key); + } + else + { + hash_unset (im->udp_port_registrations, key); + hash_set (im->udp_port_registrations, key, n_regs); + } +} + +void +ipsec_register_udp_port (u16 port, u8 is_ip4) { - u8 arc; + ipsec_main_t *im = &ipsec_main; + u32 n_regs, node_index; + uword *p, key; + + key = ipsec_udp_registration_key (port, is_ip4); + node_index = + (is_ip4) ? ipsec4_tun_input_node.index : ipsec6_tun_input_node.index; + p = hash_get (im->udp_port_registrations, key); + + n_regs = (p ? p[0] : 0); - arc = vnet_get_feature_arc_index (arc_name); - *out_feature_index = vnet_get_feature_index (arc, node_name); + if (0 == n_regs++) + udp_register_dst_port (vlib_get_main (), port, node_index, is_ip4); + + hash_unset (im->udp_port_registrations, key); + hash_set (im->udp_port_registrations, key, n_regs); } u32 @@ -139,16 +267,16 @@ ipsec_register_ah_backend (vlib_main_t * vm, ipsec_main_t * im, } u32 -ipsec_register_esp_backend (vlib_main_t * vm, ipsec_main_t * im, - const char *name, - const char *esp4_encrypt_node_name, - const char *esp4_encrypt_node_tun_name, - const char *esp4_decrypt_node_name, - const char *esp6_encrypt_node_name, - const char *esp6_encrypt_node_tun_name, - const char *esp6_decrypt_node_name, - check_support_cb_t esp_check_support_cb, - add_del_sa_sess_cb_t esp_add_del_sa_sess_cb) +ipsec_register_esp_backend ( + vlib_main_t *vm, ipsec_main_t *im, const char *name, + const char *esp4_encrypt_node_name, const char *esp4_encrypt_node_tun_name, + const char *esp4_decrypt_node_name, const char *esp4_decrypt_tun_node_name, + const char *esp6_encrypt_node_name, const char *esp6_encrypt_node_tun_name, + const char *esp6_decrypt_node_name, const char *esp6_decrypt_tun_node_name, + const char *esp_mpls_encrypt_node_tun_name, + check_support_cb_t esp_check_support_cb, + add_del_sa_sess_cb_t esp_add_del_sa_sess_cb, + enable_disable_cb_t enable_disable_cb) { ipsec_esp_backend_t *b; @@ -163,14 +291,24 @@ ipsec_register_esp_backend (vlib_main_t * vm, ipsec_main_t * im, &b->esp6_encrypt_node_index, &b->esp6_encrypt_next_index); ipsec_add_node (vm, esp6_decrypt_node_name, "ipsec6-input-feature", &b->esp6_decrypt_node_index, &b->esp6_decrypt_next_index); - - ipsec_add_feature ("ip4-output", esp4_encrypt_node_tun_name, - &b->esp4_encrypt_tun_feature_index); - ipsec_add_feature ("ip6-output", esp6_encrypt_node_tun_name, - &b->esp6_encrypt_tun_feature_index); + ipsec_add_node (vm, esp4_decrypt_tun_node_name, "ipsec4-tun-input", + &b->esp4_decrypt_tun_node_index, + &b->esp4_decrypt_tun_next_index); + ipsec_add_node (vm, esp6_decrypt_tun_node_name, "ipsec6-tun-input", + &b->esp6_decrypt_tun_node_index, + &b->esp6_decrypt_tun_next_index); + + b->esp6_encrypt_tun_node_index = + vlib_get_node_by_name (vm, (u8 *) esp6_encrypt_node_tun_name)->index; + b->esp_mpls_encrypt_tun_node_index = + vlib_get_node_by_name (vm, (u8 *) esp_mpls_encrypt_node_tun_name)->index; + b->esp4_encrypt_tun_node_index = + vlib_get_node_by_name (vm, (u8 *) esp4_encrypt_node_tun_name)->index; b->check_support_cb = esp_check_support_cb; b->add_del_sa_sess_cb = esp_add_del_sa_sess_cb; + b->enable_disable_cb = enable_disable_cb; + return b - im->esp_backends; } @@ -178,15 +316,12 @@ clib_error_t * ipsec_rsc_in_use (ipsec_main_t * im) { /* return an error is crypto resource are in use */ - if (pool_elts (im->sad) > 0) - return clib_error_return (0, - "%d SA entries configured", - pool_elts (im->sad)); - - if (pool_elts (im->tunnel_interfaces)) - return clib_error_return (0, - "%d tunnel-interface entries configured", - pool_elts (im->tunnel_interfaces)); + if (pool_elts (ipsec_sa_pool) > 0) + return clib_error_return (0, "%d SA entries configured", + pool_elts (ipsec_sa_pool)); + if (ipsec_itf_count () > 0) + return clib_error_return (0, "%d IPSec interface configured", + ipsec_itf_count ()); return (NULL); } @@ -223,6 +358,18 @@ ipsec_select_esp_backend (ipsec_main_t * im, u32 backend_idx) if (pool_is_free_index (im->esp_backends, backend_idx)) return VNET_API_ERROR_INVALID_VALUE; + /* disable current backend */ + if (im->esp_current_backend != ~0) + { + ipsec_esp_backend_t *cb = pool_elt_at_index (im->esp_backends, + im->esp_current_backend); + if (cb->enable_disable_cb) + { + if ((cb->enable_disable_cb) (0) != 0) + return -1; + } + } + ipsec_esp_backend_t *b = pool_elt_at_index (im->esp_backends, backend_idx); im->esp_current_backend = backend_idx; im->esp4_encrypt_node_index = b->esp4_encrypt_node_index; @@ -233,11 +380,67 @@ ipsec_select_esp_backend (ipsec_main_t * im, u32 backend_idx) im->esp6_decrypt_node_index = b->esp6_decrypt_node_index; im->esp6_encrypt_next_index = b->esp6_encrypt_next_index; im->esp6_decrypt_next_index = b->esp6_decrypt_next_index; + im->esp4_decrypt_tun_node_index = b->esp4_decrypt_tun_node_index; + im->esp4_decrypt_tun_next_index = b->esp4_decrypt_tun_next_index; + im->esp6_decrypt_tun_node_index = b->esp6_decrypt_tun_node_index; + im->esp6_decrypt_tun_next_index = b->esp6_decrypt_tun_next_index; + im->esp4_encrypt_tun_node_index = b->esp4_encrypt_tun_node_index; + im->esp6_encrypt_tun_node_index = b->esp6_encrypt_tun_node_index; + im->esp_mpls_encrypt_tun_node_index = b->esp_mpls_encrypt_tun_node_index; + + if (b->enable_disable_cb) + { + if ((b->enable_disable_cb) (1) != 0) + return -1; + } + return 0; +} - im->esp4_encrypt_tun_feature_index = b->esp4_encrypt_tun_feature_index; - im->esp6_encrypt_tun_feature_index = b->esp6_encrypt_tun_feature_index; +void +ipsec_set_async_mode (u32 is_enabled) +{ + ipsec_main_t *im = &ipsec_main; + ipsec_sa_t *sa; - return 0; + vnet_crypto_request_async_mode (is_enabled); + + im->async_mode = is_enabled; + + /* change SA crypto op data */ + pool_foreach (sa, ipsec_sa_pool) + { + sa->crypto_op_data = + (is_enabled ? sa->async_op_data.data : sa->sync_op_data.data); + } +} + +static void +crypto_engine_backend_register_post_node (vlib_main_t * vm) +{ + esp_async_post_next_t *eit; + esp_async_post_next_t *dit; + + eit = &esp_encrypt_async_next; + eit->esp4_post_next = + vnet_crypto_register_post_node (vm, "esp4-encrypt-post"); + eit->esp6_post_next = + vnet_crypto_register_post_node (vm, "esp6-encrypt-post"); + eit->esp4_tun_post_next = + vnet_crypto_register_post_node (vm, "esp4-encrypt-tun-post"); + eit->esp6_tun_post_next = + vnet_crypto_register_post_node (vm, "esp6-encrypt-tun-post"); + eit->esp_mpls_tun_post_next = + vnet_crypto_register_post_node (vm, "esp-mpls-encrypt-tun-post"); + + dit = &esp_decrypt_async_next; + dit->esp4_post_next = + vnet_crypto_register_post_node (vm, "esp4-decrypt-post"); + dit->esp6_post_next = + vnet_crypto_register_post_node (vm, "esp6-decrypt-post"); + dit->esp4_tun_post_next = + vnet_crypto_register_post_node (vm, "esp4-decrypt-tun-post"); + dit->esp6_tun_post_next = + vnet_crypto_register_post_node (vm, "esp6-decrypt-tun-post"); } static clib_error_t * @@ -247,6 +450,10 @@ ipsec_init (vlib_main_t * vm) ipsec_main_t *im = &ipsec_main; ipsec_main_crypto_alg_t *a; + /* Backend registration requires the feature arcs to be set up */ + if ((error = vlib_call_init_function (vm, vnet_feature_init))) + return (error); + im->vnet_main = vnet_get_main (); im->vlib_main = vm; @@ -258,7 +465,10 @@ ipsec_init (vlib_main_t * vm) ASSERT (node); im->error_drop_node_index = node->index; - u32 idx = ipsec_register_ah_backend (vm, im, "default openssl backend", + im->ah_current_backend = ~0; + im->esp_current_backend = ~0; + + u32 idx = ipsec_register_ah_backend (vm, im, "crypto engine backend", "ah4-encrypt", "ah4-decrypt", "ah6-encrypt", @@ -271,14 +481,11 @@ ipsec_init (vlib_main_t * vm) ASSERT (0 == rv); (void) (rv); // avoid warning - idx = ipsec_register_esp_backend (vm, im, "default openssl backend", - "esp4-encrypt", - "esp4-encrypt-tun", - "esp4-decrypt", - "esp6-encrypt", - "esp6-encrypt-tun", - "esp6-decrypt", - ipsec_check_esp_support, NULL); + idx = ipsec_register_esp_backend ( + vm, im, "crypto engine backend", "esp4-encrypt", "esp4-encrypt-tun", + "esp4-decrypt", "esp4-decrypt-tun", "esp6-encrypt", "esp6-encrypt-tun", + "esp6-decrypt", "esp6-decrypt-tun", "esp-mpls-encrypt-tun", + ipsec_check_esp_support, NULL, crypto_dispatch_enable_disable); im->esp_default_backend = idx; rv = ipsec_select_esp_backend (im, idx); @@ -288,65 +495,105 @@ ipsec_init (vlib_main_t * vm) if ((error = vlib_call_init_function (vm, ipsec_cli_init))) return error; - if ((error = vlib_call_init_function (vm, ipsec_tunnel_if_init))) - return error; - vec_validate (im->crypto_algs, IPSEC_CRYPTO_N_ALG - 1); + a = im->crypto_algs + IPSEC_CRYPTO_ALG_NONE; + a->enc_op_id = VNET_CRYPTO_OP_NONE; + a->dec_op_id = VNET_CRYPTO_OP_NONE; + a->alg = VNET_CRYPTO_ALG_NONE; + a->iv_size = 0; + a->block_align = 1; + a = im->crypto_algs + IPSEC_CRYPTO_ALG_DES_CBC; a->enc_op_id = VNET_CRYPTO_OP_DES_CBC_ENC; a->dec_op_id = VNET_CRYPTO_OP_DES_CBC_DEC; a->alg = VNET_CRYPTO_ALG_DES_CBC; - a->iv_size = a->block_size = 8; + a->iv_size = a->block_align = 8; a = im->crypto_algs + IPSEC_CRYPTO_ALG_3DES_CBC; a->enc_op_id = VNET_CRYPTO_OP_3DES_CBC_ENC; a->dec_op_id = VNET_CRYPTO_OP_3DES_CBC_DEC; a->alg = VNET_CRYPTO_ALG_3DES_CBC; - a->iv_size = a->block_size = 8; + a->iv_size = a->block_align = 8; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_128; a->enc_op_id = VNET_CRYPTO_OP_AES_128_CBC_ENC; a->dec_op_id = VNET_CRYPTO_OP_AES_128_CBC_DEC; a->alg = VNET_CRYPTO_ALG_AES_128_CBC; - a->iv_size = a->block_size = 16; + a->iv_size = a->block_align = 16; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_192; a->enc_op_id = VNET_CRYPTO_OP_AES_192_CBC_ENC; a->dec_op_id = VNET_CRYPTO_OP_AES_192_CBC_DEC; a->alg = VNET_CRYPTO_ALG_AES_192_CBC; - a->iv_size = a->block_size = 16; + a->iv_size = a->block_align = 16; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_256; a->enc_op_id = VNET_CRYPTO_OP_AES_256_CBC_ENC; a->dec_op_id = VNET_CRYPTO_OP_AES_256_CBC_DEC; a->alg = VNET_CRYPTO_ALG_AES_256_CBC; - a->iv_size = a->block_size = 16; + a->iv_size = a->block_align = 16; + + a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_128; + a->enc_op_id = VNET_CRYPTO_OP_AES_128_CTR_ENC; + a->dec_op_id = VNET_CRYPTO_OP_AES_128_CTR_DEC; + a->alg = VNET_CRYPTO_ALG_AES_128_CTR; + a->iv_size = 8; + a->block_align = 1; + + a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_192; + a->enc_op_id = VNET_CRYPTO_OP_AES_192_CTR_ENC; + a->dec_op_id = VNET_CRYPTO_OP_AES_192_CTR_DEC; + a->alg = VNET_CRYPTO_ALG_AES_192_CTR; + a->iv_size = 8; + a->block_align = 1; + + a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_256; + a->enc_op_id = VNET_CRYPTO_OP_AES_256_CTR_ENC; + a->dec_op_id = VNET_CRYPTO_OP_AES_256_CTR_DEC; + a->alg = VNET_CRYPTO_ALG_AES_256_CTR; + a->iv_size = 8; + a->block_align = 1; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_128; a->enc_op_id = VNET_CRYPTO_OP_AES_128_GCM_ENC; a->dec_op_id = VNET_CRYPTO_OP_AES_128_GCM_DEC; a->alg = VNET_CRYPTO_ALG_AES_128_GCM; - a->iv_size = a->block_size = 8; + a->iv_size = 8; + a->block_align = 1; a->icv_size = 16; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_192; a->enc_op_id = VNET_CRYPTO_OP_AES_192_GCM_ENC; a->dec_op_id = VNET_CRYPTO_OP_AES_192_GCM_DEC; a->alg = VNET_CRYPTO_ALG_AES_192_GCM; - a->iv_size = a->block_size = 8; + a->iv_size = 8; + a->block_align = 1; a->icv_size = 16; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_256; a->enc_op_id = VNET_CRYPTO_OP_AES_256_GCM_ENC; a->dec_op_id = VNET_CRYPTO_OP_AES_256_GCM_DEC; a->alg = VNET_CRYPTO_ALG_AES_256_GCM; - a->iv_size = a->block_size = 8; + a->iv_size = 8; + a->block_align = 1; + a->icv_size = 16; + + a = im->crypto_algs + IPSEC_CRYPTO_ALG_CHACHA20_POLY1305; + a->enc_op_id = VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC; + a->dec_op_id = VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC; + a->alg = VNET_CRYPTO_ALG_CHACHA20_POLY1305; + a->iv_size = 8; a->icv_size = 16; vec_validate (im->integ_algs, IPSEC_INTEG_N_ALG - 1); ipsec_main_integ_alg_t *i; + i = &im->integ_algs[IPSEC_INTEG_ALG_MD5_96]; + i->op_id = VNET_CRYPTO_OP_MD5_HMAC; + i->alg = VNET_CRYPTO_ALG_HMAC_MD5; + i->icv_size = 12; + i = &im->integ_algs[IPSEC_INTEG_ALG_SHA1_96]; i->op_id = VNET_CRYPTO_OP_SHA1_HMAC; i->alg = VNET_CRYPTO_ALG_HMAC_SHA1; @@ -364,7 +611,7 @@ ipsec_init (vlib_main_t * vm) i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_384_192]; i->op_id = VNET_CRYPTO_OP_SHA384_HMAC; - i->alg = VNET_CRYPTO_ALG_HMAC_SHA512; + i->alg = VNET_CRYPTO_ALG_HMAC_SHA384; i->icv_size = 24; i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_512_256]; @@ -374,11 +621,171 @@ ipsec_init (vlib_main_t * vm) vec_validate_aligned (im->ptd, vlib_num_workers (), CLIB_CACHE_LINE_BYTES); + im->async_mode = 0; + crypto_engine_backend_register_post_node (vm); + + im->ipsec4_out_spd_hash_tbl = NULL; + im->output_flow_cache_flag = 0; + im->ipsec4_out_spd_flow_cache_entries = 0; + im->epoch_count = 0; + im->ipsec4_out_spd_hash_num_buckets = + IPSEC4_OUT_SPD_DEFAULT_HASH_NUM_BUCKETS; + + im->ipsec4_in_spd_hash_tbl = NULL; + im->input_flow_cache_flag = 0; + im->ipsec4_in_spd_flow_cache_entries = 0; + im->input_epoch_count = 0; + im->ipsec4_in_spd_hash_num_buckets = IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS; + + vec_validate_init_empty_aligned (im->next_header_registrations, 255, ~0, + CLIB_CACHE_LINE_BYTES); + + im->fp_spd_ipv4_out_is_enabled = 0; + im->fp_spd_ipv6_out_is_enabled = 0; + im->fp_spd_ipv4_in_is_enabled = 0; + im->fp_spd_ipv6_in_is_enabled = 0; + + im->fp_lookup_hash_buckets = IPSEC_FP_HASH_LOOKUP_HASH_BUCKETS; + return 0; } VLIB_INIT_FUNCTION (ipsec_init); +static clib_error_t * +ipsec_config (vlib_main_t *vm, unformat_input_t *input) +{ + ipsec_main_t *im = &ipsec_main; + unformat_input_t sub_input; + + u32 ipsec4_out_spd_hash_num_buckets; + u32 ipsec4_in_spd_hash_num_buckets; + u32 ipsec_spd_fp_num_buckets; + bool fp_spd_ip4_enabled = false; + bool fp_spd_ip6_enabled = false; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "ipv6-outbound-spd-fast-path on")) + { + im->fp_spd_ipv6_out_is_enabled = 1; + fp_spd_ip6_enabled = true; + } + else if (unformat (input, "ipv6-outbound-spd-fast-path off")) + im->fp_spd_ipv6_out_is_enabled = 0; + else if (unformat (input, "ipv4-outbound-spd-fast-path on")) + { + im->fp_spd_ipv4_out_is_enabled = 1; + im->output_flow_cache_flag = 0; + fp_spd_ip4_enabled = true; + } + else if (unformat (input, "ipv4-outbound-spd-fast-path off")) + im->fp_spd_ipv4_out_is_enabled = 0; + else if (unformat (input, "ipv6-inbound-spd-fast-path on")) + { + im->fp_spd_ipv6_in_is_enabled = 1; + fp_spd_ip6_enabled = true; + } + else if (unformat (input, "ipv6-inbound-spd-fast-path off")) + im->fp_spd_ipv6_in_is_enabled = 0; + else if (unformat (input, "ipv4-inbound-spd-fast-path on")) + { + im->fp_spd_ipv4_in_is_enabled = 1; + im->input_flow_cache_flag = 0; + fp_spd_ip4_enabled = true; + } + else if (unformat (input, "ipv4-inbound-spd-fast-path off")) + im->fp_spd_ipv4_in_is_enabled = 0; + else if (unformat (input, "spd-fast-path-num-buckets %d", + &ipsec_spd_fp_num_buckets)) + { + /* Number of bihash buckets is power of 2 >= input */ + im->fp_lookup_hash_buckets = 1ULL + << max_log2 (ipsec_spd_fp_num_buckets); + } + else if (unformat (input, "ipv4-outbound-spd-flow-cache on")) + im->output_flow_cache_flag = im->fp_spd_ipv4_out_is_enabled ? 0 : 1; + else if (unformat (input, "ipv4-outbound-spd-flow-cache off")) + im->output_flow_cache_flag = 0; + else if (unformat (input, "ipv4-outbound-spd-hash-buckets %d", + &ipsec4_out_spd_hash_num_buckets)) + { + /* Size of hash is power of 2 >= number of buckets */ + im->ipsec4_out_spd_hash_num_buckets = + 1ULL << max_log2 (ipsec4_out_spd_hash_num_buckets); + } + else if (unformat (input, "ipv4-inbound-spd-flow-cache on")) + im->input_flow_cache_flag = im->fp_spd_ipv4_in_is_enabled ? 0 : 1; + else if (unformat (input, "ipv4-inbound-spd-flow-cache off")) + im->input_flow_cache_flag = 0; + else if (unformat (input, "ipv4-inbound-spd-hash-buckets %d", + &ipsec4_in_spd_hash_num_buckets)) + { + im->ipsec4_in_spd_hash_num_buckets = + 1ULL << max_log2 (ipsec4_in_spd_hash_num_buckets); + } + else if (unformat (input, "ip4 %U", unformat_vlib_cli_sub_input, + &sub_input)) + { + uword table_size = ~0; + u32 n_buckets = ~0; + + while (unformat_check_input (&sub_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (&sub_input, "num-buckets %u", &n_buckets)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, &sub_input); + } + + ipsec_tun_table_init (AF_IP4, table_size, n_buckets); + } + else if (unformat (input, "ip6 %U", unformat_vlib_cli_sub_input, + &sub_input)) + { + uword table_size = ~0; + u32 n_buckets = ~0; + + while (unformat_check_input (&sub_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (&sub_input, "num-buckets %u", &n_buckets)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, &sub_input); + } + + ipsec_tun_table_init (AF_IP6, table_size, n_buckets); + } + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + if (im->output_flow_cache_flag) + { + vec_add2 (im->ipsec4_out_spd_hash_tbl, im->ipsec4_out_spd_hash_tbl, + im->ipsec4_out_spd_hash_num_buckets); + } + if (im->input_flow_cache_flag) + { + vec_add2 (im->ipsec4_in_spd_hash_tbl, im->ipsec4_in_spd_hash_tbl, + im->ipsec4_in_spd_hash_num_buckets); + } + + if (fp_spd_ip4_enabled) + pool_alloc_aligned (im->fp_ip4_lookup_hashes_pool, + IPSEC_FP_IP4_HASHES_POOL_SIZE, CLIB_CACHE_LINE_BYTES); + + if (fp_spd_ip6_enabled) + pool_alloc_aligned (im->fp_ip6_lookup_hashes_pool, + IPSEC_FP_IP6_HASHES_POOL_SIZE, CLIB_CACHE_LINE_BYTES); + + return 0; +} + +VLIB_CONFIG_FUNCTION (ipsec_config, "ipsec"); + /* * fd.io coding-style-patch-verification: ON *