X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fesp_encrypt.c;h=9c775ef1e479b6908bf085d1f1eebf8f0e1ecfd7;hb=be5a5dd904d4d25857c53a4b5dee7951f724e3e2;hp=44ae22972af0aba3f828d991136d474b04cf3e68;hpb=cb33dc2d7a566d571c86b950b4aa92dd7ae01c3c;p=vpp.git diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index 44ae22972af..9c775ef1e47 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -18,10 +18,12 @@ #include #include #include +#include #include #include +ipsec_proto_main_t ipsec_proto_main; #define foreach_esp_encrypt_next \ _(DROP, "error-drop") \ @@ -58,12 +60,11 @@ static char *esp_encrypt_error_strings[] = { #undef _ }; -vlib_node_registration_t esp_encrypt_node; - typedef struct { u32 spi; u32 seq; + u8 udp_encap; ipsec_crypto_alg_t crypto_alg; ipsec_integ_alg_t integ_alg; } esp_encrypt_trace_t; @@ -76,32 +77,39 @@ format_esp_encrypt_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *); - s = format (s, "esp: spi %u seq %u crypto %U integrity %U", + s = format (s, "esp: spi %u seq %u crypto %U integrity %U%s", t->spi, t->seq, format_ipsec_crypto_alg, t->crypto_alg, - format_ipsec_integ_alg, t->integ_alg); + format_ipsec_integ_alg, t->integ_alg, + t->udp_encap ? " udp-encap-enabled" : ""); return s; } always_inline void -esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg, - u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv) +esp_encrypt_cbc (vlib_main_t * vm, ipsec_crypto_alg_t alg, + u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv) { - esp_main_t *em = &esp_main; - u32 cpu_index = os_get_cpu_number (); - EVP_CIPHER_CTX *ctx = &(em->per_thread_data[cpu_index].encrypt_ctx); + ipsec_proto_main_t *em = &ipsec_proto_main; + u32 thread_index = vm->thread_index; +#if OPENSSL_VERSION_NUMBER >= 0x10100000L + EVP_CIPHER_CTX *ctx = em->per_thread_data[thread_index].encrypt_ctx; +#else + EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].encrypt_ctx); +#endif const EVP_CIPHER *cipher = NULL; int out_len; ASSERT (alg < IPSEC_CRYPTO_N_ALG); - if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE)) + if (PREDICT_FALSE + (em->ipsec_proto_main_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE)) return; - if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_encrypt_alg)) + if (PREDICT_FALSE + (alg != em->per_thread_data[thread_index].last_encrypt_alg)) { - cipher = em->esp_crypto_algs[alg].type; - em->per_thread_data[cpu_index].last_encrypt_alg = alg; + cipher = em->ipsec_proto_main_crypto_algs[alg].type; + em->per_thread_data[thread_index].last_encrypt_alg = alg; } EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv); @@ -110,25 +118,33 @@ esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg, EVP_EncryptFinal_ex (ctx, out + out_len, &out_len); } -static uword -esp_encrypt_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +always_inline uword +esp_encrypt_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame, + int is_ip6) { u32 n_left_from, *from, *to_next = 0, next_index; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; ipsec_main_t *im = &ipsec_main; + ipsec_proto_main_t *em = &ipsec_proto_main; u32 *recycle = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vm->thread_index; ipsec_alloc_empty_buffers (vm, im); - u32 *empty_buffers = im->empty_buffers[cpu_index]; + u32 *empty_buffers = im->empty_buffers[thread_index]; if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from)) { - vlib_node_increment_counter (vm, esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from); + if (is_ip6) + vlib_node_increment_counter (vm, esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_NO_BUFFER, + n_left_from); + else + vlib_node_increment_counter (vm, esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_NO_BUFFER, + n_left_from); clib_warning ("no enough empty buffers. discarding frame"); goto free_buffers_and_exit; } @@ -147,13 +163,13 @@ esp_encrypt_node_fn (vlib_main_t * vm, vlib_buffer_t *i_b0, *o_b0 = 0; u32 sa_index0; ipsec_sa_t *sa0; - ip4_and_esp_header_t *ih0, *oh0 = 0; + ip4_and_esp_header_t *oh0 = 0; ip6_and_esp_header_t *ih6_0, *oh6_0 = 0; + ip4_and_udp_and_esp_header_t *iuh0, *ouh0 = 0; uword last_empty_buffer; esp_header_t *o_esp0; esp_footer_t *f0; - u8 is_ipv6; - u8 ip_hdr_size; + u8 ip_udp_hdr_size; u8 next_hdr_type; u32 ip_proto = 0; u8 transport_mode = 0; @@ -173,8 +189,12 @@ esp_encrypt_node_fn (vlib_main_t * vm, { clib_warning ("sequence number counter has cycled SPI %u", sa0->spi); - vlib_node_increment_counter (vm, esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + if (is_ip6) + vlib_node_increment_counter (vm, esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + else + vlib_node_increment_counter (vm, esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); //TODO: rekey SA o_bi0 = i_bi0; to_next[0] = o_bi0; @@ -190,7 +210,7 @@ esp_encrypt_node_fn (vlib_main_t * vm, o_b0 = vlib_get_buffer (vm, o_bi0); o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID; o_b0->current_data = sizeof (ethernet_header_t); - ih0 = vlib_buffer_get_current (i_b0); + iuh0 = vlib_buffer_get_current (i_b0); vlib_prefetch_buffer_with_index (vm, empty_buffers[last_empty_buffer - 1], STORE); @@ -201,20 +221,17 @@ esp_encrypt_node_fn (vlib_main_t * vm, /* add old buffer to the recycle list */ vec_add1 (recycle, i_bi0); - /* is ipv6 */ - if (PREDICT_FALSE - ((ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60)) + if (is_ip6) { - is_ipv6 = 1; ih6_0 = vlib_buffer_get_current (i_b0); - ip_hdr_size = sizeof (ip6_header_t); next_hdr_type = IP_PROTOCOL_IPV6; oh6_0 = vlib_buffer_get_current (o_b0); - o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip6_header_t); oh6_0->ip6.ip_version_traffic_class_and_flow_label = ih6_0->ip6.ip_version_traffic_class_and_flow_label; oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; + ip_udp_hdr_size = sizeof (ip6_header_t); + o_esp0 = vlib_buffer_get_current (o_b0) + ip_udp_hdr_size; oh6_0->ip6.hop_limit = 254; oh6_0->ip6.src_address.as_u64[0] = ih6_0->ip6.src_address.as_u64[0]; @@ -224,44 +241,57 @@ esp_encrypt_node_fn (vlib_main_t * vm, ih6_0->ip6.dst_address.as_u64[0]; oh6_0->ip6.dst_address.as_u64[1] = ih6_0->ip6.dst_address.as_u64[1]; - oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi); - oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq); + o_esp0->spi = clib_net_to_host_u32 (sa0->spi); + o_esp0->seq = clib_net_to_host_u32 (sa0->seq); ip_proto = ih6_0->ip6.protocol; next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP; } else { - is_ipv6 = 0; - ip_hdr_size = sizeof (ip4_header_t); next_hdr_type = IP_PROTOCOL_IP_IN_IP; oh0 = vlib_buffer_get_current (o_b0); - o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip4_header_t); + ouh0 = vlib_buffer_get_current (o_b0); oh0->ip4.ip_version_and_header_length = 0x45; - oh0->ip4.tos = ih0->ip4.tos; + oh0->ip4.tos = iuh0->ip4.tos; oh0->ip4.fragment_id = 0; oh0->ip4.flags_and_fragment_offset = 0; oh0->ip4.ttl = 254; - oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; - oh0->ip4.src_address.as_u32 = ih0->ip4.src_address.as_u32; - oh0->ip4.dst_address.as_u32 = ih0->ip4.dst_address.as_u32; - oh0->esp.spi = clib_net_to_host_u32 (sa0->spi); - oh0->esp.seq = clib_net_to_host_u32 (sa0->seq); - ip_proto = ih0->ip4.protocol; + if (sa0->udp_encap) + { + ouh0->udp.src_port = + clib_host_to_net_u16 (UDP_DST_PORT_ipsec); + ouh0->udp.dst_port = + clib_host_to_net_u16 (UDP_DST_PORT_ipsec); + ouh0->udp.checksum = 0; + ouh0->ip4.protocol = IP_PROTOCOL_UDP; + ip_udp_hdr_size = + sizeof (udp_header_t) + sizeof (ip4_header_t); + } + else + { + oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; + ip_udp_hdr_size = sizeof (ip4_header_t); + } + o_esp0 = vlib_buffer_get_current (o_b0) + ip_udp_hdr_size; + oh0->ip4.src_address.as_u32 = iuh0->ip4.src_address.as_u32; + oh0->ip4.dst_address.as_u32 = iuh0->ip4.dst_address.as_u32; + o_esp0->spi = clib_net_to_host_u32 (sa0->spi); + o_esp0->seq = clib_net_to_host_u32 (sa0->seq); + ip_proto = iuh0->ip4.protocol; next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP; } - if (PREDICT_TRUE - (!is_ipv6 && sa0->is_tunnel && !sa0->is_tunnel_ip6)) + if (PREDICT_TRUE (!is_ip6 && sa0->is_tunnel && !sa0->is_tunnel_ip6)) { oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32; oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32; vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; } - else if (is_ipv6 && sa0->is_tunnel && sa0->is_tunnel_ip6) + else if (is_ip6 && sa0->is_tunnel && sa0->is_tunnel_ip6) { oh6_0->ip6.src_address.as_u64[0] = sa0->tunnel_src_addr.ip6.as_u64[0]; @@ -291,7 +321,7 @@ esp_encrypt_node_fn (vlib_main_t * vm, vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = vnet_buffer (i_b0)->sw_if_index[VLIB_TX]; } - vlib_buffer_advance (i_b0, ip_hdr_size); + vlib_buffer_advance (i_b0, ip_udp_hdr_size); } ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG); @@ -299,8 +329,10 @@ esp_encrypt_node_fn (vlib_main_t * vm, if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE)) { - const int BLOCK_SIZE = 16; - const int IV_SIZE = 16; + const int BLOCK_SIZE = + em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].block_size; + const int IV_SIZE = + em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size; int blocks = 1 + (i_b0->current_length + 1) / BLOCK_SIZE; /* pad packet in input buffer */ @@ -317,37 +349,40 @@ esp_encrypt_node_fn (vlib_main_t * vm, f0->pad_length = pad_bytes; f0->next_header = next_hdr_type; - o_b0->current_length = ip_hdr_size + sizeof (esp_header_t) + + o_b0->current_length = ip_udp_hdr_size + sizeof (esp_header_t) + BLOCK_SIZE * blocks + IV_SIZE; vnet_buffer (o_b0)->sw_if_index[VLIB_RX] = vnet_buffer (i_b0)->sw_if_index[VLIB_RX]; - u8 iv[16]; + u8 iv[em-> + ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size]; RAND_bytes (iv, sizeof (iv)); clib_memcpy ((u8 *) vlib_buffer_get_current (o_b0) + - ip_hdr_size + sizeof (esp_header_t), iv, 16); - - esp_encrypt_aes_cbc (sa0->crypto_alg, - (u8 *) vlib_buffer_get_current (i_b0), - (u8 *) vlib_buffer_get_current (o_b0) + - ip_hdr_size + sizeof (esp_header_t) + - IV_SIZE, BLOCK_SIZE * blocks, - sa0->crypto_key, iv); + ip_udp_hdr_size + sizeof (esp_header_t), iv, + em->ipsec_proto_main_crypto_algs[sa0-> + crypto_alg].iv_size); + + esp_encrypt_cbc (vm, sa0->crypto_alg, + (u8 *) vlib_buffer_get_current (i_b0), + (u8 *) vlib_buffer_get_current (o_b0) + + ip_udp_hdr_size + sizeof (esp_header_t) + + IV_SIZE, BLOCK_SIZE * blocks, + sa0->crypto_key, iv); } o_b0->current_length += hmac_calc (sa0->integ_alg, sa0->integ_key, sa0->integ_key_len, (u8 *) o_esp0, o_b0->current_length - - ip_hdr_size, + ip_udp_hdr_size, vlib_buffer_get_current (o_b0) + o_b0->current_length, sa0->use_esn, sa0->seq_hi); - if (PREDICT_FALSE (is_ipv6)) + if (is_ip6) { oh6_0->ip6.payload_length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0) - @@ -358,6 +393,13 @@ esp_encrypt_node_fn (vlib_main_t * vm, oh0->ip4.length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0)); oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4); + if (sa0->udp_encap) + { + ouh0->udp.length = + clib_host_to_net_u16 (clib_net_to_host_u16 + (oh0->ip4.length) - + ip4_header_bytes (&oh0->ip4)); + } } if (transport_mode) @@ -374,6 +416,7 @@ esp_encrypt_node_fn (vlib_main_t * vm, vlib_add_trace (vm, node, o_b0, sizeof (*tr)); tr->spi = sa0->spi; tr->seq = sa0->seq - 1; + tr->udp_encap = sa0->udp_encap; tr->crypto_alg = sa0->crypto_alg; tr->integ_alg = sa0->integ_alg; } @@ -385,9 +428,14 @@ esp_encrypt_node_fn (vlib_main_t * vm, } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); + if (is_ip6) + vlib_node_increment_counter (vm, esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + else + vlib_node_increment_counter (vm, esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); free_buffers_and_exit: if (recycle) @@ -396,11 +444,46 @@ free_buffers_and_exit: return from_frame->n_vectors; } +static uword +esp4_encrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (esp4_encrypt_node) = { + .function = esp4_encrypt_node_fn, + .name = "esp4-encrypt", + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + + .n_next_nodes = ESP_ENCRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n, + foreach_esp_encrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (esp4_encrypt_node, esp4_encrypt_node_fn); + +static uword +esp6_encrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ ); +} /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (esp_encrypt_node) = { - .function = esp_encrypt_node_fn, - .name = "esp-encrypt", +VLIB_REGISTER_NODE (esp6_encrypt_node) = { + .function = esp6_encrypt_node_fn, + .name = "esp6-encrypt", .vector_size = sizeof (u32), .format_trace = format_esp_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -417,7 +500,7 @@ VLIB_REGISTER_NODE (esp_encrypt_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (esp_encrypt_node, esp_encrypt_node_fn) +VLIB_NODE_FUNCTION_MULTIARCH (esp6_encrypt_node, esp6_encrypt_node_fn); /* * fd.io coding-style-patch-verification: ON *