X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fesp_encrypt.c;h=1fc53a53317acad51b9a1c6feefabb9d1e984472;hb=fc81134a26458a8358483b0d2908a6b83afb7f11;hp=bd6e7641f4a88cdbddf1e58e37b3cd72dcfcf16a;hpb=4a58e49cfe03150034a65e147a2ffe8d24391b86;p=vpp.git diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index bd6e7641f4a..1fc53a53317 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -292,14 +292,6 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node, } } -typedef struct -{ - u32 salt; - u64 iv; -} __clib_packed esp_gcm_nonce_t; - -STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12); - static_always_inline u32 esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, ipsec_sa_t * sa0, vlib_buffer_t * b, @@ -384,13 +376,12 @@ esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, } always_inline void -esp_prepare_sync_op (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, - vnet_crypto_op_t ** crypto_ops, - vnet_crypto_op_t ** integ_ops, ipsec_sa_t * sa0, - u8 * payload, u16 payload_len, u8 iv_sz, u8 icv_sz, - vlib_buffer_t ** bufs, vlib_buffer_t ** b, - vlib_buffer_t * lb, u32 hdr_len, esp_header_t * esp, - esp_gcm_nonce_t * nonce) +esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, + vnet_crypto_op_t **crypto_ops, + vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, + u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, + vlib_buffer_t **bufs, vlib_buffer_t **b, + vlib_buffer_t *lb, u32 hdr_len, esp_header_t *esp) { if (sa0->crypto_enc_op_id) { @@ -403,21 +394,30 @@ esp_prepare_sync_op (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, op->len = payload_len - icv_sz; op->user_data = b - bufs; - if (ipsec_sa_is_set_IS_AEAD (sa0)) + if (ipsec_sa_is_set_IS_CTR (sa0)) { - /* - * construct the AAD in a scratch space in front - * of the IP header. - */ - op->aad = payload - hdr_len - sizeof (esp_aead_t); - op->aad_len = esp_aad_fill (op->aad, esp, sa0); - - op->tag = payload + op->len; - op->tag_len = 16; + ASSERT (sizeof (u64) == iv_sz); + /* construct nonce in a scratch space in front of the IP header */ + esp_ctr_nonce_t *nonce = + (esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len - + sizeof (*nonce)); + u64 *pkt_iv = (u64 *) (payload - sizeof (u64)); + + if (ipsec_sa_is_set_IS_AEAD (sa0)) + { + /* constuct aad in a scratch space in front of the nonce */ + op->aad = (u8 *) nonce - sizeof (esp_aead_t); + op->aad_len = esp_aad_fill (op->aad, esp, sa0); + op->tag = payload + op->len; + op->tag_len = 16; + } + else + { + nonce->ctr = clib_host_to_net_u32 (1); + } - u64 *iv = (u64 *) (payload - iv_sz); nonce->salt = sa0->salt; - nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++); + nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++); op->iv = (u8 *) nonce; } else @@ -471,13 +471,13 @@ esp_prepare_sync_op (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, } } -static_always_inline int -esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, - vnet_crypto_async_frame_t ** async_frame, - ipsec_sa_t * sa, vlib_buffer_t * b, - esp_header_t * esp, u8 * payload, u32 payload_len, - u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len, - u16 async_next, vlib_buffer_t * lb) +static_always_inline void +esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, + vnet_crypto_async_frame_t *async_frame, + ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp, + u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz, + u32 bi, u16 next, u32 hdr_len, u16 async_next, + vlib_buffer_t *lb) { esp_post_data_t *post = esp_post_data (b); u8 *tag, *iv, *aad = 0; @@ -493,67 +493,72 @@ esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, crypto_total_len = integ_total_len = payload_len - icv_sz; tag = payload + crypto_total_len; - /* aead */ - if (ipsec_sa_is_set_IS_AEAD (sa)) - { - esp_gcm_nonce_t *nonce; - u64 *pkt_iv = (u64 *) (payload - iv_sz); + key_index = sa->linked_key_index; - aad = payload - hdr_len - sizeof (esp_aead_t); - esp_aad_fill (aad, esp, sa); - nonce = (esp_gcm_nonce_t *) (aad - sizeof (*nonce)); - nonce->salt = sa->salt; - nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->gcm_iv_counter++); - iv = (u8 *) nonce; - key_index = sa->crypto_key_index; + if (ipsec_sa_is_set_IS_CTR (sa)) + { + ASSERT (sizeof (u64) == iv_sz); + /* construct nonce in a scratch space in front of the IP header */ + esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) - + hdr_len - sizeof (*nonce)); + u64 *pkt_iv = (u64 *) (payload - sizeof (u64)); - if (lb != b) + if (ipsec_sa_is_set_IS_AEAD (sa)) { - /* chain */ - flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; - tag = vlib_buffer_get_tail (lb) - icv_sz; - crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, - icv_sz, payload, - payload_len, 0); + /* constuct aad in a scratch space in front of the nonce */ + aad = (u8 *) nonce - sizeof (esp_aead_t); + esp_aad_fill (aad, esp, sa); + key_index = sa->crypto_key_index; + } + else + { + nonce->ctr = clib_host_to_net_u32 (1); } - goto out; - } - /* cipher then hash */ - iv = payload - iv_sz; - integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t); - integ_total_len += iv_sz + sizeof (esp_header_t); - flag |= VNET_CRYPTO_OP_FLAG_INIT_IV; - key_index = sa->linked_key_index; + nonce->salt = sa->salt; + nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++); + iv = (u8 *) nonce; + } + else + { + iv = payload - iv_sz; + flag |= VNET_CRYPTO_OP_FLAG_INIT_IV; + } - if (b != lb) + if (lb != b) { + /* chain */ flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; - crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, - icv_sz, payload, - payload_len, 0); tag = vlib_buffer_get_tail (lb) - icv_sz; - integ_total_len = esp_encrypt_chain_integ (vm, ptd, sa, b, lb, icv_sz, - payload - iv_sz - - sizeof (esp_header_t), - payload_len + iv_sz + - sizeof (esp_header_t), - tag, 0); + crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz, + payload, payload_len, 0); } - else if (ipsec_sa_is_set_USE_ESN (sa) && !ipsec_sa_is_set_IS_AEAD (sa)) + + if (sa->integ_op_id) { - u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi); - clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi)); - integ_total_len += sizeof (seq_hi); + integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t); + integ_total_len += iv_sz + sizeof (esp_header_t); + + if (b != lb) + { + integ_total_len = esp_encrypt_chain_integ ( + vm, ptd, sa, b, lb, icv_sz, + payload - iv_sz - sizeof (esp_header_t), + payload_len + iv_sz + sizeof (esp_header_t), tag, 0); + } + else if (ipsec_sa_is_set_USE_ESN (sa)) + { + u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi); + clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi)); + integ_total_len += sizeof (seq_hi); + } } -out: - return vnet_crypto_async_add_to_frame (vm, async_frame, key_index, - crypto_total_len, - integ_total_len - crypto_total_len, - crypto_start_offset, - integ_start_offset, bi, async_next, - iv, tag, aad, flag); + /* this always succeeds because we know the frame is not full */ + vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len, + integ_total_len - crypto_total_len, + crypto_start_offset, integ_start_offset, bi, + async_next, iv, tag, aad, flag); } always_inline uword @@ -567,7 +572,6 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, u32 n_left = frame->n_vectors; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; u16 nexts[VLIB_FRAME_SIZE], *next = nexts; - esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces; u32 thread_index = vm->thread_index; u16 buffer_data_size = vlib_buffer_get_default_data_size (vm); u32 current_sa_index = ~0, current_sa_packets = 0; @@ -577,9 +581,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *lb; vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops; vnet_crypto_op_t **integ_ops = &ptd->integ_ops; - vnet_crypto_async_frame_t *async_frame = 0; + vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS]; int is_async = im->async_mode; - vnet_crypto_async_op_id_t last_async_op = ~0; + vnet_crypto_async_op_id_t async_op = ~0; u16 drop_next = (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 : (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 : @@ -598,7 +602,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vec_reset_length (ptd->chained_crypto_ops); vec_reset_length (ptd->chained_integ_ops); } + vec_reset_length (ptd->async_frames); vec_reset_length (ptd->chunks); + clib_memset (async_frames, 0, sizeof (async_frames)); while (n_left > 0) { @@ -641,7 +647,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, current_sa_bytes); current_sa_packets = current_sa_bytes = 0; - sa0 = pool_elt_at_index (im->sad, sa_index0); + sa0 = ipsec_sa_get (sa_index0); /* fetch the second cacheline ASAP */ CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD); @@ -651,35 +657,43 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, esp_align = sa0->esp_block_align; icv_sz = sa0->integ_icv_size; iv_sz = sa0->crypto_iv_size; + } + + if (is_async) + { + async_op = sa0->crypto_async_enc_op_id; - /* submit frame when op_id is different then the old one */ - if (is_async && sa0->crypto_async_enc_op_id != last_async_op) + if (PREDICT_FALSE (async_op == 0)) { - if (async_frame && async_frame->n_elts) - { - if (vnet_crypto_async_submit_open_frame (vm, async_frame)) - esp_async_recycle_failed_submit (async_frame, b, from, - nexts, &n_async_drop, - drop_next, - ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR); - } - async_frame = - vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id); - last_async_op = sa0->crypto_async_enc_op_id; + esp_set_next_index (is_async, from, nexts, from[b - bufs], + &n_async_drop, drop_next, next); + goto trace; + } + + /* get a frame for this op if we don't yet have one or it's full + */ + if (NULL == async_frames[async_op] || + vnet_crypto_async_frame_is_full (async_frames[async_op])) + { + async_frames[async_op] = + vnet_crypto_async_get_frame (vm, async_op); + /* Save the frame to the list we'll submit at the end */ + vec_add1 (ptd->async_frames, async_frames[async_op]); } } - if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index)) + if (PREDICT_FALSE (~0 == sa0->thread_index)) { /* this is the first packet to use this SA, claim the SA * for this thread. this could happen simultaneously on * another thread */ - clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0, + clib_atomic_cmp_and_swap (&sa0->thread_index, ~0, ipsec_sa_assign_thread (thread_index)); } - if (PREDICT_FALSE (thread_index != sa0->encrypt_thread_index)) + if (PREDICT_FALSE (thread_index != sa0->thread_index)) { + vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index; esp_set_next_index (is_async, from, nexts, from[b - bufs], &n_async_drop, handoff_next, next); goto trace; @@ -763,22 +777,22 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, else if (VNET_LINK_IP4 == lt) { *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP; - tunnel_encap_fixup_4o6 (sa0->tunnel_flags, - (const ip4_header_t *) payload, - ip6); + tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0], + (const ip4_header_t *) payload, ip6); } else if (VNET_LINK_MPLS == lt) { *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP; tunnel_encap_fixup_mplso6 ( - sa0->tunnel_flags, (const mpls_unicast_header_t *) payload, - ip6); + sa0->tunnel_flags, b[0], + (const mpls_unicast_header_t *) payload, ip6); } else ASSERT (0); len = payload_len_total + hdr_len - len; ip6->payload_length = clib_net_to_host_u16 (len); + b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED; } else { @@ -824,6 +838,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, } else next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT; + b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED; } else /* transport mode */ { @@ -941,36 +956,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, esp->seq = clib_net_to_host_u32 (sa0->seq); if (is_async) - { - if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0)) - { - esp_set_next_index (is_async, from, nexts, from[b - bufs], - &n_async_drop, drop_next, next); - goto trace; - } - - if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp, - payload, payload_len, iv_sz, - icv_sz, from[b - bufs], next[0], - hdr_len, async_next, lb)) - { - /* The fail only caused by submission, free the whole frame. */ - if (async_frame->n_elts) - esp_async_recycle_failed_submit (async_frame, b, from, nexts, - &n_async_drop, drop_next, - ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR); - b[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR; - esp_set_next_index (1, from, nexts, from[b - bufs], - &n_async_drop, drop_next, next); - goto trace; - } - } + esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0], + esp, payload, payload_len, iv_sz, icv_sz, + from[b - bufs], next[0], hdr_len, async_next, + lb); else - { esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload, payload_len, iv_sz, icv_sz, bufs, b, lb, - hdr_len, esp, nonce++); - } + hdr_len, esp); vlib_buffer_advance (b[0], 0LL - hdr_len); @@ -1008,29 +1001,36 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next); esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts, ptd->chunks, drop_next); + + vlib_node_increment_counter ( + vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors); + + vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); } else { - if (async_frame && async_frame->n_elts) + /* submit all of the open frames */ + vnet_crypto_async_frame_t **async_frame; + + vec_foreach (async_frame, ptd->async_frames) { - if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0) - esp_async_recycle_failed_submit (async_frame, b, from, nexts, - &n_async_drop, drop_next, - ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR); + if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0) + { + esp_async_recycle_failed_submit ( + *async_frame, b, from, nexts, &n_async_drop, drop_next, + ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR); + vnet_crypto_async_reset_frame (*async_frame); + vnet_crypto_async_free_frame (vm, *async_frame); + } } + vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors); if (n_async_drop) vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop); - - return frame->n_vectors; } - vlib_node_increment_counter (vm, node->node_index, - ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors); - - vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); return frame->n_vectors; } @@ -1471,6 +1471,31 @@ VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) = }; /* *INDENT-ON* */ +#ifndef CLIB_MARCH_VARIANT + +static clib_error_t * +esp_encrypt_init (vlib_main_t *vm) +{ + ipsec_main_t *im = &ipsec_main; + + im->esp4_enc_fq_index = + vlib_frame_queue_main_init (esp4_encrypt_node.index, 0); + im->esp6_enc_fq_index = + vlib_frame_queue_main_init (esp6_encrypt_node.index, 0); + im->esp4_enc_tun_fq_index = + vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0); + im->esp6_enc_tun_fq_index = + vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0); + im->esp_mpls_enc_tun_fq_index = + vlib_frame_queue_main_init (esp_mpls_encrypt_tun_node.index, 0); + + return 0; +} + +VLIB_INIT_FUNCTION (esp_encrypt_init); + +#endif + /* * fd.io coding-style-patch-verification: ON *