X-Git-Url: https://gerrit.fd.io/r/gitweb?p=vpp.git;a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fesp_encrypt.c;h=7df537a555580f9ded2c8daefc8146c49da38add;hp=3e196b3486906595db76c1fa3de2178420ea37df;hb=282872127;hpb=430ac939d115b59e3f7f704645c6f88878223e1b diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index 3e196b34869..7df537a5555 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -18,16 +18,17 @@ #include #include #include +#include + +#include #include +#include #include -ipsec_proto_main_t ipsec_proto_main; - #define foreach_esp_encrypt_next \ _(DROP, "error-drop") \ -_(IP4_LOOKUP, "ip4-lookup") \ -_(IP6_LOOKUP, "ip6-lookup") \ +_(HANDOFF, "handoff") \ _(INTERFACE_OUTPUT, "interface-output") #define _(v, s) ESP_ENCRYPT_NEXT_##v, @@ -38,12 +39,12 @@ typedef enum ESP_ENCRYPT_N_NEXT, } esp_encrypt_next_t; -#define foreach_esp_encrypt_error \ - _(RX_PKTS, "ESP pkts received") \ - _(NO_BUFFER, "No buffer (packet dropped)") \ - _(DECRYPTION_FAILED, "ESP encryption failed") \ - _(SEQ_CYCLED, "sequence number cycled") - +#define foreach_esp_encrypt_error \ + _(RX_PKTS, "ESP pkts received") \ + _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \ + _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \ + _(NO_BUFFERS, "no buffers (packet dropped)") \ + _(NO_TRAILER_SPACE, "no trailer space (packet dropped)") typedef enum { @@ -59,12 +60,13 @@ static char *esp_encrypt_error_strings[] = { #undef _ }; -vlib_node_registration_t esp_encrypt_node; - typedef struct { + u32 sa_index; u32 spi; u32 seq; + u32 sa_seq_hi; + u8 udp_encap; ipsec_crypto_alg_t crypto_alg; ipsec_integ_alg_t integ_alg; } esp_encrypt_trace_t; @@ -77,337 +79,659 @@ format_esp_encrypt_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *); - s = format (s, "esp: spi %u seq %u crypto %U integrity %U", - t->spi, t->seq, - format_ipsec_crypto_alg, t->crypto_alg, - format_ipsec_integ_alg, t->integ_alg); + s = + format (s, + "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s", + t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi, + format_ipsec_crypto_alg, + t->crypto_alg, format_ipsec_integ_alg, t->integ_alg, + t->udp_encap ? " udp-encap-enabled" : ""); return s; } -always_inline void -esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg, - u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv) +/* pad packet in input buffer */ +static_always_inline u8 * +esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz, + u16 * next, vlib_node_runtime_t * node, + u16 buffer_data_size, uword total_len) +{ + static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = { + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x00, 0x00, + }; + + u16 min_length = total_len + sizeof (esp_footer_t); + u16 new_length = round_pow2 (min_length, block_size); + u8 pad_bytes = new_length - min_length; + esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (b) + + b->current_length + pad_bytes); + u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz; + + if (b->current_data + tail_sz > buffer_data_size) + { + // TODO alloc new buffer + b->error = node->errors[ESP_ENCRYPT_ERROR_NO_TRAILER_SPACE]; + next[0] = ESP_ENCRYPT_NEXT_DROP; + return 0; + } + + if (pad_bytes) + { + ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE); + pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes); + clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes); + } + + f->pad_length = pad_bytes; + b->current_length += tail_sz; + return &f->next_header; +} + +static_always_inline void +esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp) +{ + ip_csum_t sum; + u16 old_len; + + len = clib_net_to_host_u16 (len); + old_len = ip4->length; + + if (is_transport) + { + u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP; + + sum = ip_csum_update (ip4->checksum, ip4->protocol, + prot, ip4_header_t, protocol); + ip4->protocol = prot; + + sum = ip_csum_update (sum, old_len, len, ip4_header_t, length); + } + else + sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length); + + ip4->length = len; + ip4->checksum = ip_csum_fold (sum); +} + +static_always_inline void +esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len) +{ + clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t)); + udp->length = clib_net_to_host_u16 (len); +} + +static_always_inline u8 +ext_hdr_is_pre_esp (u8 nexthdr) { - ipsec_proto_main_t *em = &ipsec_proto_main; - u32 thread_index = vlib_get_thread_index (); -#if OPENSSL_VERSION_NUMBER >= 0x10100000L - EVP_CIPHER_CTX *ctx = em->per_thread_data[thread_index].encrypt_ctx; +#ifdef CLIB_HAVE_VEC128 + static const u8x16 ext_hdr_types = { + IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS, + IP_PROTOCOL_IPV6_ROUTE, + IP_PROTOCOL_IPV6_FRAGMENTATION, + }; + + return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr)); #else - EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].encrypt_ctx); + return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) | + (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) | + (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0); #endif - const EVP_CIPHER *cipher = NULL; - int out_len; +} + +static_always_inline u8 +esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr) +{ + /* this code assumes that HbH, route and frag headers will be before + others, if that is not the case, they will end up encrypted */ + u8 len = sizeof (ip6_header_t); + ip6_ext_header_t *p; - ASSERT (alg < IPSEC_CRYPTO_N_ALG); + /* if next packet doesn't have ext header */ + if (ext_hdr_is_pre_esp (ip6->protocol) == 0) + { + *ext_hdr = NULL; + return len; + } - if (PREDICT_FALSE - (em->ipsec_proto_main_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE)) - return; + p = (void *) (ip6 + 1); + len += ip6_ext_header_len (p); - if (PREDICT_FALSE - (alg != em->per_thread_data[thread_index].last_encrypt_alg)) + while (ext_hdr_is_pre_esp (p->next_hdr)) { - cipher = em->ipsec_proto_main_crypto_algs[alg].type; - em->per_thread_data[thread_index].last_encrypt_alg = alg; + len += ip6_ext_header_len (p); + p = ip6_ext_next_header (p); } - EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv); + *ext_hdr = p; + return len; +} + +static_always_inline void +esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node, + vnet_crypto_op_t * ops, vlib_buffer_t * b[], + u16 * nexts, vnet_crypto_op_chunk_t * chunks) +{ + u32 n_fail, n_ops = vec_len (ops); + vnet_crypto_op_t *op = ops; + + if (n_ops == 0) + return; + + n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops); + + while (n_fail) + { + ASSERT (op - ops < n_ops); - EVP_EncryptUpdate (ctx, out, &out_len, in, in_len); - EVP_EncryptFinal_ex (ctx, out + out_len, &out_len); + if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) + { + u32 bi = op->user_data; + b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR]; + nexts[bi] = ESP_ENCRYPT_NEXT_DROP; + n_fail--; + } + op++; + } } -static uword -esp_encrypt_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +static_always_inline void +esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node, + vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts) { - u32 n_left_from, *from, *to_next = 0, next_index; - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - ipsec_main_t *im = &ipsec_main; - u32 *recycle = 0; - u32 thread_index = vlib_get_thread_index (); + u32 n_fail, n_ops = vec_len (ops); + vnet_crypto_op_t *op = ops; - ipsec_alloc_empty_buffers (vm, im); + if (n_ops == 0) + return; - u32 *empty_buffers = im->empty_buffers[thread_index]; + n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops); - if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from)) + while (n_fail) { - vlib_node_increment_counter (vm, esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from); - clib_warning ("no enough empty buffers. discarding frame"); - goto free_buffers_and_exit; + ASSERT (op - ops < n_ops); + + if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) + { + u32 bi = op->user_data; + b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR]; + nexts[bi] = ESP_ENCRYPT_NEXT_DROP; + n_fail--; + } + op++; } +} + +typedef struct +{ + u32 salt; + u64 iv; +} __clib_packed esp_gcm_nonce_t; - next_index = node->cached_next_index; +STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12); - while (n_left_from > 0) +always_inline uword +esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame, int is_ip6, int is_tun) +{ + ipsec_main_t *im = &ipsec_main; + ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index); + u32 *from = vlib_frame_vector_args (frame); + u32 n_left = frame->n_vectors; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; + u16 nexts[VLIB_FRAME_SIZE], *next = nexts; + esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces; + u32 thread_index = vm->thread_index; + u16 buffer_data_size = vlib_buffer_get_default_data_size (vm); + u32 current_sa_index = ~0, current_sa_packets = 0; + u32 current_sa_bytes = 0, spi = 0; + u8 block_sz = 0, iv_sz = 0, icv_sz = 0; + ipsec_sa_t *sa0 = 0; + vnet_crypto_op_chunk_t *ch; + vlib_buffer_t *lb; + vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops; + vnet_crypto_op_t **integ_ops = &ptd->integ_ops; + + vlib_get_buffers (vm, from, b, n_left); + vec_reset_length (ptd->crypto_ops); + vec_reset_length (ptd->integ_ops); + vec_reset_length (ptd->chained_crypto_ops); + vec_reset_length (ptd->chained_integ_ops); + vec_reset_length (ptd->chunks); + + while (n_left > 0) { - u32 n_left_to_next; + u32 sa_index0; + dpo_id_t *dpo; + esp_header_t *esp; + u8 *payload, *next_hdr_ptr; + u16 payload_len, payload_len_total, n_bufs; + u32 hdr_len, config_index; + + if (n_left > 2) + { + u8 *p; + vlib_prefetch_buffer_header (b[2], LOAD); + p = vlib_buffer_get_current (b[1]); + CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); + p -= CLIB_CACHE_LINE_BYTES; + CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); + } - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + if (is_tun) + { + /* we are on a ipsec tunnel's feature arc */ + u32 next0 = 0; + config_index = b[0]->current_config_index; + vnet_feature_next (&next0, b[0]); + next[0] = next0; + vnet_buffer (b[0])->ipsec.sad_index = + sa_index0 = ipsec_tun_protect_get_sa_out + (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]); + } + else + sa_index0 = vnet_buffer (b[0])->ipsec.sad_index; - while (n_left_from > 0 && n_left_to_next > 0) + if (sa_index0 != current_sa_index) { - u32 i_bi0, o_bi0, next0; - vlib_buffer_t *i_b0, *o_b0 = 0; - u32 sa_index0; - ipsec_sa_t *sa0; - ip4_and_esp_header_t *ih0, *oh0 = 0; - ip6_and_esp_header_t *ih6_0, *oh6_0 = 0; - uword last_empty_buffer; - esp_header_t *o_esp0; - esp_footer_t *f0; - u8 is_ipv6; - u8 ip_hdr_size; - u8 next_hdr_type; - u32 ip_proto = 0; - u8 transport_mode = 0; - - i_bi0 = from[0]; - from += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - next0 = ESP_ENCRYPT_NEXT_DROP; - - i_b0 = vlib_get_buffer (vm, i_bi0); - sa_index0 = vnet_buffer (i_b0)->ipsec.sad_index; + if (current_sa_packets) + vlib_increment_combined_counter (&ipsec_sa_counters, thread_index, + current_sa_index, + current_sa_packets, + current_sa_bytes); + current_sa_packets = current_sa_bytes = 0; + sa0 = pool_elt_at_index (im->sad, sa_index0); + current_sa_index = sa_index0; + spi = clib_net_to_host_u32 (sa0->spi); + block_sz = sa0->crypto_block_size; + icv_sz = sa0->integ_icv_size; + iv_sz = sa0->crypto_iv_size; + } + + if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index)) + { + /* this is the first packet to use this SA, claim the SA + * for this thread. this could happen simultaneously on + * another thread */ + clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0, + ipsec_sa_assign_thread (thread_index)); + } + + if (PREDICT_TRUE (thread_index != sa0->encrypt_thread_index)) + { + next[0] = ESP_ENCRYPT_NEXT_HANDOFF; + if (is_tun) + { + b[0]->current_config_index = config_index; + } + goto trace; + } + + lb = b[0]; + n_bufs = vlib_buffer_chain_linearize (vm, b[0]); + if (n_bufs == 0) + { + b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS]; + next[0] = ESP_ENCRYPT_NEXT_DROP; + goto trace; + } + + if (n_bufs > 1) + { + crypto_ops = &ptd->chained_crypto_ops; + integ_ops = &ptd->chained_integ_ops; + + /* find last buffer in the chain */ + while (lb->flags & VLIB_BUFFER_NEXT_PRESENT) + lb = vlib_get_buffer (vm, lb->next_buffer); + } + else + { + crypto_ops = &ptd->crypto_ops; + integ_ops = &ptd->integ_ops; + } + + if (PREDICT_FALSE (esp_seq_advance (sa0))) + { + b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED]; + next[0] = ESP_ENCRYPT_NEXT_DROP; + goto trace; + } - if (PREDICT_FALSE (esp_seq_advance (sa0))) + /* space for IV */ + hdr_len = iv_sz; + + if (ipsec_sa_is_set_IS_TUNNEL (sa0)) + { + payload = vlib_buffer_get_current (b[0]); + next_hdr_ptr = esp_add_footer_and_icv (lb, block_sz, icv_sz, + next, node, + buffer_data_size, + vlib_buffer_length_in_chain + (vm, b[0])); + if (!next_hdr_ptr) + goto trace; + b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; + payload_len = b[0]->current_length; + payload_len_total = vlib_buffer_length_in_chain (vm, b[0]); + + /* ESP header */ + hdr_len += sizeof (*esp); + esp = (esp_header_t *) (payload - hdr_len); + + /* optional UDP header */ + if (ipsec_sa_is_set_UDP_ENCAP (sa0)) { - clib_warning ("sequence number counter has cycled SPI %u", - sa0->spi); - vlib_node_increment_counter (vm, esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); - //TODO: rekey SA - o_bi0 = i_bi0; - to_next[0] = o_bi0; - to_next += 1; - goto trace; + hdr_len += sizeof (udp_header_t); + esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len), + payload_len_total + hdr_len); } - sa0->total_data_size += i_b0->current_length; - - /* grab free buffer */ - last_empty_buffer = vec_len (empty_buffers) - 1; - o_bi0 = empty_buffers[last_empty_buffer]; - o_b0 = vlib_get_buffer (vm, o_bi0); - o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID; - o_b0->current_data = sizeof (ethernet_header_t); - ih0 = vlib_buffer_get_current (i_b0); - vlib_prefetch_buffer_with_index (vm, - empty_buffers[last_empty_buffer - - 1], STORE); - _vec_len (empty_buffers) = last_empty_buffer; - to_next[0] = o_bi0; - to_next += 1; - - /* add old buffer to the recycle list */ - vec_add1 (recycle, i_bi0); - - /* is ipv6 */ - if (PREDICT_FALSE - ((ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60)) + /* IP header */ + if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)) { - is_ipv6 = 1; - ih6_0 = vlib_buffer_get_current (i_b0); - ip_hdr_size = sizeof (ip6_header_t); - next_hdr_type = IP_PROTOCOL_IPV6; - oh6_0 = vlib_buffer_get_current (o_b0); - o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip6_header_t); - - oh6_0->ip6.ip_version_traffic_class_and_flow_label = - ih6_0->ip6.ip_version_traffic_class_and_flow_label; - oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; - oh6_0->ip6.hop_limit = 254; - oh6_0->ip6.src_address.as_u64[0] = - ih6_0->ip6.src_address.as_u64[0]; - oh6_0->ip6.src_address.as_u64[1] = - ih6_0->ip6.src_address.as_u64[1]; - oh6_0->ip6.dst_address.as_u64[0] = - ih6_0->ip6.dst_address.as_u64[0]; - oh6_0->ip6.dst_address.as_u64[1] = - ih6_0->ip6.dst_address.as_u64[1]; - oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi); - oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq); - ip_proto = ih6_0->ip6.protocol; - - next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP; + ip6_header_t *ip6; + u16 len = sizeof (ip6_header_t); + hdr_len += len; + ip6 = (ip6_header_t *) (payload - hdr_len); + clib_memcpy_fast (ip6, &sa0->ip6_hdr, len); + *next_hdr_ptr = (is_ip6 ? + IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); + len = payload_len_total + hdr_len - len; + ip6->payload_length = clib_net_to_host_u16 (len); } else { - is_ipv6 = 0; - ip_hdr_size = sizeof (ip4_header_t); - next_hdr_type = IP_PROTOCOL_IP_IN_IP; - oh0 = vlib_buffer_get_current (o_b0); - o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip4_header_t); - - oh0->ip4.ip_version_and_header_length = 0x45; - oh0->ip4.tos = ih0->ip4.tos; - oh0->ip4.fragment_id = 0; - oh0->ip4.flags_and_fragment_offset = 0; - oh0->ip4.ttl = 254; - oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; - oh0->ip4.src_address.as_u32 = ih0->ip4.src_address.as_u32; - oh0->ip4.dst_address.as_u32 = ih0->ip4.dst_address.as_u32; - oh0->esp.spi = clib_net_to_host_u32 (sa0->spi); - oh0->esp.seq = clib_net_to_host_u32 (sa0->seq); - ip_proto = ih0->ip4.protocol; - - next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP; + ip4_header_t *ip4; + u16 len = sizeof (ip4_header_t); + hdr_len += len; + ip4 = (ip4_header_t *) (payload - hdr_len); + clib_memcpy_fast (ip4, &sa0->ip4_hdr, len); + *next_hdr_ptr = (is_ip6 ? + IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); + len = payload_len_total + hdr_len; + esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0); } - if (PREDICT_TRUE - (!is_ipv6 && sa0->is_tunnel && !sa0->is_tunnel_ip6)) + dpo = &sa0->dpo; + if (!is_tun) { - oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32; - oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32; - - vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + next[0] = dpo->dpoi_next_node; + vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index; + } + } + else /* transport mode */ + { + u8 *l2_hdr, l2_len, *ip_hdr, ip_len; + ip6_ext_header_t *ext_hdr; + udp_header_t *udp = 0; + u8 *old_ip_hdr = vlib_buffer_get_current (b[0]); + + ip_len = is_ip6 ? + esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) : + ip4_header_bytes ((ip4_header_t *) old_ip_hdr); + + vlib_buffer_advance (b[0], ip_len); + payload = vlib_buffer_get_current (b[0]); + next_hdr_ptr = esp_add_footer_and_icv (lb, block_sz, icv_sz, + next, node, + buffer_data_size, + vlib_buffer_length_in_chain + (vm, b[0])); + if (!next_hdr_ptr) + goto trace; + + b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; + payload_len = b[0]->current_length; + payload_len_total = vlib_buffer_length_in_chain (vm, b[0]); + + /* ESP header */ + hdr_len += sizeof (*esp); + esp = (esp_header_t *) (payload - hdr_len); + + /* optional UDP header */ + if (ipsec_sa_is_set_UDP_ENCAP (sa0)) + { + hdr_len += sizeof (udp_header_t); + udp = (udp_header_t *) (payload - hdr_len); } - else if (is_ipv6 && sa0->is_tunnel && sa0->is_tunnel_ip6) + + /* IP header */ + hdr_len += ip_len; + ip_hdr = payload - hdr_len; + + /* L2 header */ + if (!is_tun) { - oh6_0->ip6.src_address.as_u64[0] = - sa0->tunnel_src_addr.ip6.as_u64[0]; - oh6_0->ip6.src_address.as_u64[1] = - sa0->tunnel_src_addr.ip6.as_u64[1]; - oh6_0->ip6.dst_address.as_u64[0] = - sa0->tunnel_dst_addr.ip6.as_u64[0]; - oh6_0->ip6.dst_address.as_u64[1] = - sa0->tunnel_dst_addr.ip6.as_u64[1]; - - vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + l2_len = vnet_buffer (b[0])->ip.save_rewrite_length; + hdr_len += l2_len; + l2_hdr = payload - hdr_len; + + /* copy l2 and ip header */ + clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len); } else + l2_len = 0; + + if (is_ip6) { - next_hdr_type = ip_proto; - if (vnet_buffer (i_b0)->sw_if_index[VLIB_TX] != ~0) + ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr); + if (PREDICT_TRUE (NULL == ext_hdr)) + { + *next_hdr_ptr = ip6->protocol; + ip6->protocol = IP_PROTOCOL_IPSEC_ESP; + } + else { - transport_mode = 1; - ethernet_header_t *ieh0, *oeh0; - ieh0 = - (ethernet_header_t *) ((u8 *) - vlib_buffer_get_current (i_b0) - - sizeof (ethernet_header_t)); - oeh0 = (ethernet_header_t *) o_b0->data; - clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t)); - next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT; - vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = - vnet_buffer (i_b0)->sw_if_index[VLIB_TX]; + *next_hdr_ptr = ext_hdr->next_hdr; + ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP; } - vlib_buffer_advance (i_b0, ip_hdr_size); + ip6->payload_length = + clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len - + sizeof (ip6_header_t)); } - - ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG); - - if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE)) + else { - - const int BLOCK_SIZE = 16; - const int IV_SIZE = 16; - int blocks = 1 + (i_b0->current_length + 1) / BLOCK_SIZE; - - /* pad packet in input buffer */ - u8 pad_bytes = BLOCK_SIZE * blocks - 2 - i_b0->current_length; - u8 i; - u8 *padding = - vlib_buffer_get_current (i_b0) + i_b0->current_length; - i_b0->current_length = BLOCK_SIZE * blocks; - for (i = 0; i < pad_bytes; ++i) + u16 len; + ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr); + *next_hdr_ptr = ip4->protocol; + len = payload_len_total + hdr_len - l2_len; + if (udp) { - padding[i] = i + 1; + esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1); + esp_fill_udp_hdr (sa0, udp, len - ip_len); } - f0 = vlib_buffer_get_current (i_b0) + i_b0->current_length - 2; - f0->pad_length = pad_bytes; - f0->next_header = next_hdr_type; - - o_b0->current_length = ip_hdr_size + sizeof (esp_header_t) + - BLOCK_SIZE * blocks + IV_SIZE; - - vnet_buffer (o_b0)->sw_if_index[VLIB_RX] = - vnet_buffer (i_b0)->sw_if_index[VLIB_RX]; + else + esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0); + } - u8 iv[16]; - RAND_bytes (iv, sizeof (iv)); + clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len); - clib_memcpy ((u8 *) vlib_buffer_get_current (o_b0) + - ip_hdr_size + sizeof (esp_header_t), iv, 16); + if (!is_tun) + next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT; + } - esp_encrypt_aes_cbc (sa0->crypto_alg, - (u8 *) vlib_buffer_get_current (i_b0), - (u8 *) vlib_buffer_get_current (o_b0) + - ip_hdr_size + sizeof (esp_header_t) + - IV_SIZE, BLOCK_SIZE * blocks, - sa0->crypto_key, iv); - } + esp->spi = spi; + esp->seq = clib_net_to_host_u32 (sa0->seq); - o_b0->current_length += hmac_calc (sa0->integ_alg, sa0->integ_key, - sa0->integ_key_len, - (u8 *) o_esp0, - o_b0->current_length - - ip_hdr_size, - vlib_buffer_get_current (o_b0) + - o_b0->current_length, - sa0->use_esn, sa0->seq_hi); + if (sa0->crypto_enc_op_id) + { + vnet_crypto_op_t *op; + vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); + vnet_crypto_op_init (op, sa0->crypto_enc_op_id); + op->src = op->dst = payload; + op->key_index = sa0->crypto_key_index; + op->len = payload_len - icv_sz; + op->user_data = b - bufs; - if (PREDICT_FALSE (is_ipv6)) + if (ipsec_sa_is_set_IS_AEAD (sa0)) { - oh6_0->ip6.payload_length = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0) - - sizeof (ip6_header_t)); + /* + * construct the AAD in a scratch space in front + * of the IP header. + */ + op->aad = payload - hdr_len - sizeof (esp_aead_t); + + esp_aad_fill (op, esp, sa0); + + op->tag = payload + op->len; + op->tag_len = 16; + + u64 *iv = (u64 *) (payload - iv_sz); + nonce->salt = sa0->salt; + nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++); + op->iv = (u8 *) nonce; + nonce++; } else { - oh0->ip4.length = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0)); - oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4); + op->iv = payload - iv_sz; + op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV; } - if (transport_mode) - vlib_buffer_reset (o_b0); + if (lb != b[0]) + { + /* is chained */ + vlib_buffer_t *cb = b[0]; + op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; + op->chunk_index = vec_len (ptd->chunks); + op->tag = vlib_buffer_get_tail (lb) - icv_sz; + vec_add2 (ptd->chunks, ch, 1); + ch->len = payload_len; + ch->src = ch->dst = payload; + cb = vlib_get_buffer (vm, cb->next_buffer); + op->n_chunks = 1; + + while (1) + { + vec_add2 (ptd->chunks, ch, 1); + op->n_chunks += 1; + if (lb == cb) + ch->len = cb->current_length - icv_sz; + else + ch->len = cb->current_length; + ch->src = ch->dst = vlib_buffer_get_current (cb); + + if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT)) + break; + + cb = vlib_get_buffer (vm, cb->next_buffer); + } + } + } - trace: - if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED)) + if (sa0->integ_op_id) + { + vnet_crypto_op_t *op; + vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); + vnet_crypto_op_init (op, sa0->integ_op_id); + op->src = payload - iv_sz - sizeof (esp_header_t); + op->digest = payload + payload_len - icv_sz; + op->key_index = sa0->integ_key_index; + op->digest_len = icv_sz; + op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t); + op->user_data = b - bufs; + + if (lb != b[0]) { - if (o_b0) + /* is chained */ + op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; + vlib_buffer_t *cb = b[0]; + op->chunk_index = vec_len (ptd->chunks); + op->digest = vlib_buffer_get_tail (lb) - icv_sz; + vec_add2 (ptd->chunks, ch, 1); + ch->len = payload_len + iv_sz + sizeof (esp_header_t); + ch->src = payload - iv_sz - sizeof (esp_header_t); + cb = vlib_get_buffer (vm, cb->next_buffer); + op->n_chunks = 1; + + while (1) { - o_b0->flags |= VLIB_BUFFER_IS_TRACED; - o_b0->trace_index = i_b0->trace_index; - esp_encrypt_trace_t *tr = - vlib_add_trace (vm, node, o_b0, sizeof (*tr)); - tr->spi = sa0->spi; - tr->seq = sa0->seq - 1; - tr->crypto_alg = sa0->crypto_alg; - tr->integ_alg = sa0->integ_alg; + vec_add2 (ptd->chunks, ch, 1); + op->n_chunks += 1; + if (lb == cb) + { + ch->len = cb->current_length - icv_sz; + if (ipsec_sa_is_set_USE_ESN (sa0)) + { + u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi); + clib_memcpy_fast (op->digest, &seq_hi, + sizeof (seq_hi)); + ch->len += sizeof (seq_hi); + } + } + else + ch->len = cb->current_length; + ch->src = vlib_buffer_get_current (cb); + + if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT)) + break; + + cb = vlib_get_buffer (vm, cb->next_buffer); } } + else if (ipsec_sa_is_set_USE_ESN (sa0)) + { + u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi); + clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi)); + op->len += sizeof (seq_hi); + } + } + + vlib_buffer_advance (b[0], 0LL - hdr_len); + + current_sa_packets += 1; + current_sa_bytes += payload_len_total; - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, o_bi0, - next0); + trace: + if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0], + sizeof (*tr)); + tr->sa_index = sa_index0; + tr->spi = sa0->spi; + tr->seq = sa0->seq; + tr->sa_seq_hi = sa0->seq_hi; + tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0); + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); + /* next */ + n_left -= 1; + next += 1; + b += 1; } - vlib_node_increment_counter (vm, esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); - -free_buffers_and_exit: - if (recycle) - vlib_buffer_free (vm, recycle, vec_len (recycle)); - vec_free (recycle); - return from_frame->n_vectors; + + vlib_increment_combined_counter (&ipsec_sa_counters, thread_index, + current_sa_index, current_sa_packets, + current_sa_bytes); + + esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts); + esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts, + ptd->chunks); + + esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts); + esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts, + ptd->chunks); + + vlib_node_increment_counter (vm, node->node_index, + ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors); + + vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); + return frame->n_vectors; } +VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 0); +} /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (esp_encrypt_node) = { - .function = esp_encrypt_node_fn, - .name = "esp-encrypt", +VLIB_REGISTER_NODE (esp4_encrypt_node) = { + .name = "esp4-encrypt", .vector_size = sizeof (u32), .format_trace = format_esp_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -417,14 +741,258 @@ VLIB_REGISTER_NODE (esp_encrypt_node) = { .n_next_nodes = ESP_ENCRYPT_N_NEXT, .next_nodes = { -#define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n, - foreach_esp_encrypt_next -#undef _ + [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop", + [ESP_ENCRYPT_NEXT_HANDOFF] = "esp4-encrypt-handoff", + [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output", }, }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (esp_encrypt_node, esp_encrypt_node_fn) +VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 0); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (esp6_encrypt_node) = { + .name = "esp6-encrypt", + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + + .n_next_nodes = ESP_ENCRYPT_N_NEXT, + .next_nodes = { + [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop", + [ESP_ENCRYPT_NEXT_HANDOFF] = "esp6-encrypt-handoff", + [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output", + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 1); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = { + .name = "esp4-encrypt-tun", + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + + .n_next_nodes = ESP_ENCRYPT_N_NEXT, + .next_nodes = { + [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop", + [ESP_ENCRYPT_NEXT_HANDOFF] = "esp4-encrypt-tun-handoff", + [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "error-drop", + }, +}; + +VNET_FEATURE_INIT (esp4_encrypt_tun_feat_node, static) = +{ + .arc_name = "ip4-output", + .node_name = "esp4-encrypt-tun", + .runs_before = VNET_FEATURES ("adj-midchain-tx"), +}; + +VNET_FEATURE_INIT (esp6o4_encrypt_tun_feat_node, static) = +{ + .arc_name = "ip6-output", + .node_name = "esp4-encrypt-tun", + .runs_before = VNET_FEATURES ("adj-midchain-tx"), +}; + +VNET_FEATURE_INIT (esp4_ethernet_encrypt_tun_feat_node, static) = +{ + .arc_name = "ethernet-output", + .node_name = "esp4-encrypt-tun", + .runs_before = VNET_FEATURES ("adj-midchain-tx", "adj-midchain-tx-no-count"), +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 1); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = { + .name = "esp6-encrypt-tun", + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + + .n_next_nodes = ESP_ENCRYPT_N_NEXT, + .next_nodes = { + [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop", + [ESP_ENCRYPT_NEXT_HANDOFF] = "esp6-encrypt-tun-handoff", + [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "error-drop", + }, +}; + +VNET_FEATURE_INIT (esp6_encrypt_tun_feat_node, static) = +{ + .arc_name = "ip6-output", + .node_name = "esp6-encrypt-tun", + .runs_before = VNET_FEATURES ("adj-midchain-tx"), +}; + +VNET_FEATURE_INIT (esp4o6_encrypt_tun_feat_node, static) = +{ + .arc_name = "ip4-output", + .node_name = "esp6-encrypt-tun", + .runs_before = VNET_FEATURES ("adj-midchain-tx"), +}; + +/* *INDENT-ON* */ + +typedef struct +{ + u32 sa_index; +} esp_no_crypto_trace_t; + +static u8 * +format_esp_no_crypto_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *); + + s = format (s, "esp-no-crypto: sa-index %u", t->sa_index); + + return s; +} + +enum +{ + ESP_NO_CRYPTO_NEXT_DROP, + ESP_NO_CRYPTO_N_NEXT, +}; + +enum +{ + ESP_NO_CRYPTO_ERROR_RX_PKTS, +}; + +static char *esp_no_crypto_error_strings[] = { + "Outbound ESP packets received", +}; + +always_inline uword +esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; + u16 nexts[VLIB_FRAME_SIZE], *next = nexts; + u32 *from = vlib_frame_vector_args (frame); + u32 n_left = frame->n_vectors; + + vlib_get_buffers (vm, from, b, n_left); + + while (n_left > 0) + { + u32 next0; + u32 sa_index0; + + /* packets are always going to be dropped, but get the sa_index */ + sa_index0 = *(u32 *) vnet_feature_next_with_data (&next0, b[0], + sizeof (sa_index0)); + + next[0] = ESP_NO_CRYPTO_NEXT_DROP; + + if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0], + sizeof (*tr)); + tr->sa_index = sa_index0; + } + + n_left -= 1; + next += 1; + b += 1; + } + + vlib_node_increment_counter (vm, node->node_index, + ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors); + + vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); + + return frame->n_vectors; +} + +VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return esp_no_crypto_inline (vm, node, from_frame); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) = +{ + .name = "esp4-no-crypto", + .vector_size = sizeof (u32), + .format_trace = format_esp_no_crypto_trace, + .n_errors = ARRAY_LEN(esp_no_crypto_error_strings), + .error_strings = esp_no_crypto_error_strings, + .n_next_nodes = ESP_NO_CRYPTO_N_NEXT, + .next_nodes = { + [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop", + }, +}; + +VNET_FEATURE_INIT (esp4_no_crypto_tun_feat_node, static) = +{ + .arc_name = "ip4-output", + .node_name = "esp4-no-crypto", + .runs_before = VNET_FEATURES ("adj-midchain-tx"), +}; + +VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return esp_no_crypto_inline (vm, node, from_frame); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) = +{ + .name = "esp6-no-crypto", + .vector_size = sizeof (u32), + .format_trace = format_esp_no_crypto_trace, + .n_errors = ARRAY_LEN(esp_no_crypto_error_strings), + .error_strings = esp_no_crypto_error_strings, + .n_next_nodes = ESP_NO_CRYPTO_N_NEXT, + .next_nodes = { + [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop", + }, +}; + +VNET_FEATURE_INIT (esp6_no_crypto_tun_feat_node, static) = +{ + .arc_name = "ip6-output", + .node_name = "esp6-no-crypto", + .runs_before = VNET_FEATURES ("adj-midchain-tx"), +}; +/* *INDENT-ON* */ + /* * fd.io coding-style-patch-verification: ON *