X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fipsec.c;h=74713458b145967438dba5cd3ed31c2a3e338aff;hb=f16e9a550;hp=95e322e87a645cf639bf4eba27546ab67fc8ce78;hpb=4ec36c5535849a4e456ed99b57968d54d5e03b62;p=vpp.git diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c index 95e322e87a6..74713458b14 100644 --- a/src/vnet/ipsec/ipsec.c +++ b/src/vnet/ipsec/ipsec.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include @@ -124,6 +124,50 @@ ipsec_add_node (vlib_main_t * vm, const char *node_name, *out_next_index = vlib_node_add_next (vm, prev_node->index, node->index); } +void +ipsec_unregister_udp_port (u16 port) +{ + ipsec_main_t *im = &ipsec_main; + u32 n_regs; + uword *p; + + p = hash_get (im->udp_port_registrations, port); + + ASSERT (p); + + n_regs = p[0]; + + if (0 == --n_regs) + { + udp_unregister_dst_port (vlib_get_main (), port, 1); + hash_unset (im->udp_port_registrations, port); + } + else + { + hash_unset (im->udp_port_registrations, port); + hash_set (im->udp_port_registrations, port, n_regs); + } +} + +void +ipsec_register_udp_port (u16 port) +{ + ipsec_main_t *im = &ipsec_main; + u32 n_regs; + uword *p; + + p = hash_get (im->udp_port_registrations, port); + + n_regs = (p ? p[0] : 0); + + if (0 == n_regs++) + udp_register_dst_port (vlib_get_main (), port, + ipsec4_tun_input_node.index, 1); + + hash_unset (im->udp_port_registrations, port); + hash_set (im->udp_port_registrations, port, n_regs); +} + u32 ipsec_register_ah_backend (vlib_main_t * vm, ipsec_main_t * im, const char *name, @@ -153,19 +197,16 @@ ipsec_register_ah_backend (vlib_main_t * vm, ipsec_main_t * im, } u32 -ipsec_register_esp_backend (vlib_main_t * vm, ipsec_main_t * im, - const char *name, - const char *esp4_encrypt_node_name, - const char *esp4_encrypt_node_tun_name, - const char *esp4_decrypt_node_name, - const char *esp4_decrypt_tun_node_name, - const char *esp6_encrypt_node_name, - const char *esp6_encrypt_node_tun_name, - const char *esp6_decrypt_node_name, - const char *esp6_decrypt_tun_node_name, - check_support_cb_t esp_check_support_cb, - add_del_sa_sess_cb_t esp_add_del_sa_sess_cb, - enable_disable_cb_t enable_disable_cb) +ipsec_register_esp_backend ( + vlib_main_t *vm, ipsec_main_t *im, const char *name, + const char *esp4_encrypt_node_name, const char *esp4_encrypt_node_tun_name, + const char *esp4_decrypt_node_name, const char *esp4_decrypt_tun_node_name, + const char *esp6_encrypt_node_name, const char *esp6_encrypt_node_tun_name, + const char *esp6_decrypt_node_name, const char *esp6_decrypt_tun_node_name, + const char *esp_mpls_encrypt_node_tun_name, + check_support_cb_t esp_check_support_cb, + add_del_sa_sess_cb_t esp_add_del_sa_sess_cb, + enable_disable_cb_t enable_disable_cb) { ipsec_esp_backend_t *b; @@ -189,6 +230,8 @@ ipsec_register_esp_backend (vlib_main_t * vm, ipsec_main_t * im, b->esp6_encrypt_tun_node_index = vlib_get_node_by_name (vm, (u8 *) esp6_encrypt_node_tun_name)->index; + b->esp_mpls_encrypt_tun_node_index = + vlib_get_node_by_name (vm, (u8 *) esp_mpls_encrypt_node_tun_name)->index; b->esp4_encrypt_tun_node_index = vlib_get_node_by_name (vm, (u8 *) esp4_encrypt_node_tun_name)->index; @@ -203,10 +246,9 @@ clib_error_t * ipsec_rsc_in_use (ipsec_main_t * im) { /* return an error is crypto resource are in use */ - if (pool_elts (im->sad) > 0) - return clib_error_return (0, - "%d SA entries configured", - pool_elts (im->sad)); + if (pool_elts (ipsec_sa_pool) > 0) + return clib_error_return (0, "%d SA entries configured", + pool_elts (ipsec_sa_pool)); return (NULL); } @@ -271,6 +313,7 @@ ipsec_select_esp_backend (ipsec_main_t * im, u32 backend_idx) im->esp6_decrypt_tun_next_index = b->esp6_decrypt_tun_next_index; im->esp4_encrypt_tun_node_index = b->esp4_encrypt_tun_node_index; im->esp6_encrypt_tun_node_index = b->esp6_encrypt_tun_node_index; + im->esp_mpls_encrypt_tun_node_index = b->esp_mpls_encrypt_tun_node_index; if (b->enable_disable_cb) { @@ -286,21 +329,16 @@ ipsec_set_async_mode (u32 is_enabled) ipsec_main_t *im = &ipsec_main; ipsec_sa_t *sa; - /* lock all SAs before change im->async_mode */ - pool_foreach (sa, im->sad, ( - { - fib_node_lock (&sa->node); - })); + vnet_crypto_request_async_mode (is_enabled); im->async_mode = is_enabled; - /* change SA crypto op data before unlock them */ - pool_foreach (sa, im->sad, ( - { - sa->crypto_op_data = is_enabled ? - sa->async_op_data.data : sa->sync_op_data.data; - fib_node_unlock (&sa->node); - })); + /* change SA crypto op data */ + pool_foreach (sa, ipsec_sa_pool) + { + sa->crypto_op_data = + (is_enabled ? sa->async_op_data.data : sa->sync_op_data.data); + } } static void @@ -318,6 +356,8 @@ crypto_engine_backend_register_post_node (vlib_main_t * vm) vnet_crypto_register_post_node (vm, "esp4-encrypt-tun-post"); eit->esp6_tun_post_next = vnet_crypto_register_post_node (vm, "esp6-encrypt-tun-post"); + eit->esp_mpls_tun_post_next = + vnet_crypto_register_post_node (vm, "esp-mpls-encrypt-tun-post"); dit = &esp_decrypt_async_next; dit->esp4_post_next = @@ -368,17 +408,11 @@ ipsec_init (vlib_main_t * vm) ASSERT (0 == rv); (void) (rv); // avoid warning - idx = ipsec_register_esp_backend (vm, im, "crypto engine backend", - "esp4-encrypt", - "esp4-encrypt-tun", - "esp4-decrypt", - "esp4-decrypt-tun", - "esp6-encrypt", - "esp6-encrypt-tun", - "esp6-decrypt", - "esp6-decrypt-tun", - ipsec_check_esp_support, - NULL, crypto_dispatch_enable_disable); + idx = ipsec_register_esp_backend ( + vm, im, "crypto engine backend", "esp4-encrypt", "esp4-encrypt-tun", + "esp4-decrypt", "esp4-decrypt-tun", "esp6-encrypt", "esp6-encrypt-tun", + "esp6-decrypt", "esp6-decrypt-tun", "esp-mpls-encrypt-tun", + ipsec_check_esp_support, NULL, crypto_dispatch_enable_disable); im->esp_default_backend = idx; rv = ipsec_select_esp_backend (im, idx); @@ -395,44 +429,65 @@ ipsec_init (vlib_main_t * vm) a->dec_op_id = VNET_CRYPTO_OP_NONE; a->alg = VNET_CRYPTO_ALG_NONE; a->iv_size = 0; - a->block_size = 1; + a->block_align = 1; a = im->crypto_algs + IPSEC_CRYPTO_ALG_DES_CBC; a->enc_op_id = VNET_CRYPTO_OP_DES_CBC_ENC; a->dec_op_id = VNET_CRYPTO_OP_DES_CBC_DEC; a->alg = VNET_CRYPTO_ALG_DES_CBC; - a->iv_size = a->block_size = 8; + a->iv_size = a->block_align = 8; a = im->crypto_algs + IPSEC_CRYPTO_ALG_3DES_CBC; a->enc_op_id = VNET_CRYPTO_OP_3DES_CBC_ENC; a->dec_op_id = VNET_CRYPTO_OP_3DES_CBC_DEC; a->alg = VNET_CRYPTO_ALG_3DES_CBC; - a->iv_size = a->block_size = 8; + a->iv_size = a->block_align = 8; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_128; a->enc_op_id = VNET_CRYPTO_OP_AES_128_CBC_ENC; a->dec_op_id = VNET_CRYPTO_OP_AES_128_CBC_DEC; a->alg = VNET_CRYPTO_ALG_AES_128_CBC; - a->iv_size = a->block_size = 16; + a->iv_size = a->block_align = 16; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_192; a->enc_op_id = VNET_CRYPTO_OP_AES_192_CBC_ENC; a->dec_op_id = VNET_CRYPTO_OP_AES_192_CBC_DEC; a->alg = VNET_CRYPTO_ALG_AES_192_CBC; - a->iv_size = a->block_size = 16; + a->iv_size = a->block_align = 16; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_256; a->enc_op_id = VNET_CRYPTO_OP_AES_256_CBC_ENC; a->dec_op_id = VNET_CRYPTO_OP_AES_256_CBC_DEC; a->alg = VNET_CRYPTO_ALG_AES_256_CBC; - a->iv_size = a->block_size = 16; + a->iv_size = a->block_align = 16; + + a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_128; + a->enc_op_id = VNET_CRYPTO_OP_AES_128_CTR_ENC; + a->dec_op_id = VNET_CRYPTO_OP_AES_128_CTR_DEC; + a->alg = VNET_CRYPTO_ALG_AES_128_CTR; + a->iv_size = 8; + a->block_align = 1; + + a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_192; + a->enc_op_id = VNET_CRYPTO_OP_AES_192_CTR_ENC; + a->dec_op_id = VNET_CRYPTO_OP_AES_192_CTR_DEC; + a->alg = VNET_CRYPTO_ALG_AES_192_CTR; + a->iv_size = 8; + a->block_align = 1; + + a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_256; + a->enc_op_id = VNET_CRYPTO_OP_AES_256_CTR_ENC; + a->dec_op_id = VNET_CRYPTO_OP_AES_256_CTR_DEC; + a->alg = VNET_CRYPTO_ALG_AES_256_CTR; + a->iv_size = 8; + a->block_align = 1; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_128; a->enc_op_id = VNET_CRYPTO_OP_AES_128_GCM_ENC; a->dec_op_id = VNET_CRYPTO_OP_AES_128_GCM_DEC; a->alg = VNET_CRYPTO_ALG_AES_128_GCM; a->iv_size = 8; - a->block_size = 16; + a->block_align = 1; a->icv_size = 16; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_192; @@ -440,7 +495,7 @@ ipsec_init (vlib_main_t * vm) a->dec_op_id = VNET_CRYPTO_OP_AES_192_GCM_DEC; a->alg = VNET_CRYPTO_ALG_AES_192_GCM; a->iv_size = 8; - a->block_size = 16; + a->block_align = 1; a->icv_size = 16; a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_256; @@ -448,7 +503,7 @@ ipsec_init (vlib_main_t * vm) a->dec_op_id = VNET_CRYPTO_OP_AES_256_GCM_DEC; a->alg = VNET_CRYPTO_ALG_AES_256_GCM; a->iv_size = 8; - a->block_size = 16; + a->block_align = 1; a->icv_size = 16; vec_validate (im->integ_algs, IPSEC_INTEG_N_ALG - 1); @@ -486,32 +541,6 @@ ipsec_init (vlib_main_t * vm) vec_validate_aligned (im->ptd, vlib_num_workers (), CLIB_CACHE_LINE_BYTES); - im->ah4_enc_fq_index = - vlib_frame_queue_main_init (ah4_encrypt_node.index, 0); - im->ah4_dec_fq_index = - vlib_frame_queue_main_init (ah4_decrypt_node.index, 0); - im->ah6_enc_fq_index = - vlib_frame_queue_main_init (ah6_encrypt_node.index, 0); - im->ah6_dec_fq_index = - vlib_frame_queue_main_init (ah6_decrypt_node.index, 0); - - im->esp4_enc_fq_index = - vlib_frame_queue_main_init (esp4_encrypt_node.index, 0); - im->esp4_dec_fq_index = - vlib_frame_queue_main_init (esp4_decrypt_node.index, 0); - im->esp6_enc_fq_index = - vlib_frame_queue_main_init (esp6_encrypt_node.index, 0); - im->esp6_dec_fq_index = - vlib_frame_queue_main_init (esp6_decrypt_node.index, 0); - im->esp4_enc_tun_fq_index = - vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0); - im->esp6_enc_tun_fq_index = - vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0); - im->esp4_dec_tun_fq_index = - vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0); - im->esp6_dec_tun_fq_index = - vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0); - im->async_mode = 0; crypto_engine_backend_register_post_node (vm);