X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fesp_decrypt.c;h=ea5a99c6fa11ec7307117c29681ea4aa5336250f;hb=f16e9a550;hp=ff9fc0c2d374af6b606740533382d4317b2a60ec;hpb=123b5ebb9816e520d3029146ee233826f4e974bc;p=vpp.git diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index ff9fc0c2d37..ea5a99c6fa1 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -25,15 +25,15 @@ #include #include -#include +#include -#define foreach_esp_decrypt_next \ -_(DROP, "error-drop") \ -_(IP4_INPUT, "ip4-input-no-checksum") \ -_(IP6_INPUT, "ip6-input") \ -_(L2_INPUT, "l2-input") \ -_(HANDOFF, "handoff") \ -_(PENDING, "pending") +#define foreach_esp_decrypt_next \ + _ (DROP, "error-drop") \ + _ (IP4_INPUT, "ip4-input-no-checksum") \ + _ (IP6_INPUT, "ip6-input") \ + _ (L2_INPUT, "l2-input") \ + _ (MPLS_INPUT, "mpls-input") \ + _ (HANDOFF, "handoff") #define _(v, s) ESP_DECRYPT_NEXT_##v, typedef enum @@ -43,11 +43,12 @@ typedef enum ESP_DECRYPT_N_NEXT, } esp_decrypt_next_t; -#define foreach_esp_decrypt_post_next \ -_(DROP, "error-drop") \ -_(IP4_INPUT, "ip4-input-no-checksum") \ -_(IP6_INPUT, "ip6-input") \ -_(L2_INPUT, "l2-input") +#define foreach_esp_decrypt_post_next \ + _ (DROP, "error-drop") \ + _ (IP4_INPUT, "ip4-input-no-checksum") \ + _ (IP6_INPUT, "ip6-input") \ + _ (MPLS_INPUT, "mpls-input") \ + _ (L2_INPUT, "l2-input") #define _(v, s) ESP_DECRYPT_POST_NEXT_##v, typedef enum @@ -57,20 +58,20 @@ typedef enum ESP_DECRYPT_POST_N_NEXT, } esp_decrypt_post_next_t; -#define foreach_esp_decrypt_error \ - _(RX_PKTS, "ESP pkts received") \ - _(RX_POST_PKTS, "ESP-POST pkts received") \ - _(DECRYPTION_FAILED, "ESP decryption failed") \ - _(INTEG_ERROR, "Integrity check failed") \ - _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \ - _(REPLAY, "SA replayed packet") \ - _(RUNT, "undersized packet") \ - _(NO_BUFFERS, "no buffers (packet dropped)") \ - _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \ - _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \ - _(TUN_NO_PROTO, "no tunnel protocol") \ - _(UNSUP_PAYLOAD, "unsupported payload") \ - +#define foreach_esp_decrypt_error \ + _ (RX_PKTS, "ESP pkts received") \ + _ (RX_POST_PKTS, "ESP-POST pkts received") \ + _ (HANDOFF, "hand-off") \ + _ (DECRYPTION_FAILED, "ESP decryption failed") \ + _ (INTEG_ERROR, "Integrity check failed") \ + _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \ + _ (REPLAY, "SA replayed packet") \ + _ (RUNT, "undersized packet") \ + _ (NO_BUFFERS, "no buffers (packet dropped)") \ + _ (OVERSIZED_HEADER, "buffer with oversized header (dropped)") \ + _ (NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \ + _ (TUN_NO_PROTO, "no tunnel protocol") \ + _ (UNSUP_PAYLOAD, "unsupported payload") typedef enum { @@ -153,7 +154,7 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_crypto_op_t *op = ops; u32 n_fail, n_ops = vec_len (ops); - if (n_ops == 0) + if (PREDICT_TRUE (n_ops == 0)) return; n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops); @@ -203,6 +204,7 @@ esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last, return pointer to it */ static_always_inline u8 * esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first, + esp_decrypt_packet_data_t * pd, esp_decrypt_packet_data2_t * pd2, u16 icv_sz, u16 * dif) { vlib_buffer_t *before_last, *bp; @@ -221,6 +223,8 @@ esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first, clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz, first_sz); before_last->current_length -= first_sz; + if (before_last == first) + pd->current_length -= first_sz; clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz); if (dif) dif[0] = first_sz; @@ -269,11 +273,12 @@ esp_insert_esn (vlib_main_t * vm, ipsec_sa_t * sa, static_always_inline u8 * esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first, + esp_decrypt_packet_data_t * pd, esp_decrypt_packet_data2_t * pd2, u16 icv_sz, ipsec_sa_t * sa, u8 * extra_esn, u32 * len) { u16 dif = 0; - u8 *digest = esp_move_icv (vm, first, pd2, icv_sz, &dif); + u8 *digest = esp_move_icv (vm, first, pd, pd2, icv_sz, &dif); if (dif) *len -= dif; @@ -399,6 +404,7 @@ esp_decrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, static_always_inline u32 esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, + esp_decrypt_packet_data_t * pd, esp_decrypt_packet_data2_t * pd2, ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz, u8 * start, u32 start_len, u8 ** tag, u16 * n_ch) @@ -425,7 +431,7 @@ esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, if (pd2->lb->current_length < icv_sz) { u16 dif = 0; - *tag = esp_move_icv (vm, b, pd2, icv_sz, &dif); + *tag = esp_move_icv (vm, b, pd, pd2, icv_sz, &dif); /* this chunk does not contain crypto data */ n_chunks -= 1; @@ -507,7 +513,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, { u8 extra_esn = 0; op->digest = - esp_move_icv_esn (vm, b, pd2, icv_sz, sa0, + esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0, &extra_esn, &op->len); if (extra_esn) @@ -559,34 +565,29 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, op->key_index = sa0->crypto_key_index; op->iv = payload; - if (ipsec_sa_is_set_IS_AEAD (sa0)) + if (ipsec_sa_is_set_IS_CTR (sa0)) { - esp_header_t *esp0; - esp_aead_t *aad; - u8 *scratch; - - /* - * construct the AAD and the nonce (Salt || IV) in a scratch - * space in front of the IP header. - */ - scratch = payload - esp_sz; - esp0 = (esp_header_t *) (scratch); - - scratch -= (sizeof (*aad) + pd->hdr_sz); - op->aad = scratch; - - op->aad_len = esp_aad_fill (op->aad, esp0, sa0); - - /* - * we don't need to refer to the ESP header anymore so we - * can overwrite it with the salt and use the IV where it is - * to form the nonce = (Salt + IV) - */ - op->iv -= sizeof (sa0->salt); - clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt)); - - op->tag = payload + len; - op->tag_len = 16; + /* construct nonce in a scratch space in front of the IP header */ + esp_ctr_nonce_t *nonce = + (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - + sizeof (*nonce)); + if (ipsec_sa_is_set_IS_AEAD (sa0)) + { + /* constuct aad in a scratch space in front of the nonce */ + esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz); + op->aad = (u8 *) nonce - sizeof (esp_aead_t); + op->aad_len = esp_aad_fill (op->aad, esp0, sa0); + op->tag = payload + len; + op->tag_len = 16; + } + else + { + nonce->ctr = clib_host_to_net_u32 (1); + } + nonce->salt = sa0->salt; + ASSERT (sizeof (u64) == iv_sz); + nonce->iv = *(u64 *) op->iv; + op->iv = (u8 *) nonce; } op->src = op->dst = payload += iv_sz; op->len = len - iv_sz; @@ -597,7 +598,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, /* buffer is chained */ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; op->chunk_index = vec_len (ptd->chunks); - esp_decrypt_chain_crypto (vm, ptd, pd2, sa0, b, icv_sz, + esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz, payload, len - pd->iv_sz + pd->icv_sz, &op->tag, &op->n_chunks); } @@ -606,17 +607,14 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, } } -static_always_inline int -esp_decrypt_prepare_async_frame (vlib_main_t * vm, - vlib_node_runtime_t * node, - ipsec_per_thread_data_t * ptd, - vnet_crypto_async_frame_t ** f, - ipsec_sa_t * sa0, u8 * payload, u16 len, - u8 icv_sz, u8 iv_sz, - esp_decrypt_packet_data_t * pd, - esp_decrypt_packet_data2_t * pd2, u32 bi, - vlib_buffer_t * b, u16 * next, - u16 async_next) +static_always_inline esp_decrypt_error_t +esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node, + ipsec_per_thread_data_t *ptd, + vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0, + u8 *payload, u16 len, u8 icv_sz, u8 iv_sz, + esp_decrypt_packet_data_t *pd, + esp_decrypt_packet_data2_t *pd2, u32 bi, + vlib_buffer_t *b, u16 *next, u16 async_next) { const u8 esp_sz = sizeof (esp_header_t); u32 current_protect_index = vnet_buffer (b)->ipsec.protect_index; @@ -647,7 +645,7 @@ esp_decrypt_prepare_async_frame (vlib_main_t * vm, if (pd2->lb->current_length < icv_sz) { u8 extra_esn = 0; - tag = esp_move_icv_esn (vm, b, pd2, icv_sz, sa0, + tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0, &extra_esn, &integ_len); if (extra_esn) @@ -676,9 +674,7 @@ esp_decrypt_prepare_async_frame (vlib_main_t * vm, 0, &integ_len) < 0) { /* allocate buffer failed, will not add to frame and drop */ - b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS]; - next[0] = ESP_DECRYPT_NEXT_DROP; - return 0; + return (ESP_DECRYPT_ERROR_NO_BUFFERS); } } else @@ -693,32 +689,27 @@ out: len -= esp_sz; iv = payload; - if (ipsec_sa_is_set_IS_AEAD (sa0)) + if (ipsec_sa_is_set_IS_CTR (sa0)) { - esp_header_t *esp0; - u8 *scratch; - - /* - * construct the AAD and the nonce (Salt || IV) in a scratch - * space in front of the IP header. - */ - scratch = payload - esp_sz; - esp0 = (esp_header_t *) (scratch); - - scratch -= (sizeof (esp_aead_t) + pd->hdr_sz); - aad = scratch; - - esp_aad_fill (aad, esp0, sa0); - - /* - * we don't need to refer to the ESP header anymore so we - * can overwrite it with the salt and use the IV where it is - * to form the nonce = (Salt + IV) - */ - iv -= sizeof (sa0->salt); - clib_memcpy_fast (iv, &sa0->salt, sizeof (sa0->salt)); - - tag = payload + len; + /* construct nonce in a scratch space in front of the IP header */ + esp_ctr_nonce_t *nonce = + (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce)); + if (ipsec_sa_is_set_IS_AEAD (sa0)) + { + /* constuct aad in a scratch space in front of the nonce */ + esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz); + aad = (u8 *) nonce - sizeof (esp_aead_t); + esp_aad_fill (aad, esp0, sa0); + tag = payload + len; + } + else + { + nonce->ctr = clib_host_to_net_u32 (1); + } + nonce->salt = sa0->salt; + ASSERT (sizeof (u64) == iv_sz); + nonce->iv = *(u64 *) iv; + iv = (u8 *) nonce; } crypto_start_offset = (payload += iv_sz) - b->data; @@ -729,7 +720,7 @@ out: /* buffer is chained */ flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; - crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd2, sa0, b, icv_sz, + crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz, payload, len - pd->iv_sz + pd->icv_sz, &tag, 0); @@ -738,15 +729,14 @@ out: *async_pd = *pd; *async_pd2 = *pd2; pd->protect_index = current_protect_index; - next[0] = ESP_DECRYPT_NEXT_PENDING; /* for AEAD integ_len - crypto_len will be negative, it is ok since it * is ignored by the engine. */ - return vnet_crypto_async_add_to_frame (vm, f, key_index, crypto_len, - integ_len - crypto_len, - crypto_start_offset, - integ_start_offset, - bi, async_next, iv, tag, aad, flags); + vnet_crypto_async_add_to_frame ( + vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset, + integ_start_offset, bi, async_next, iv, tag, aad, flags); + + return (ESP_DECRYPT_ERROR_RX_PKTS); } static_always_inline void @@ -755,8 +745,7 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, esp_decrypt_packet_data2_t * pd2, vlib_buffer_t * b, u16 * next, int is_ip6, int is_tun, int is_async) { - ipsec_main_t *im = &ipsec_main; - ipsec_sa_t *sa0 = vec_elt_at_index (im->sad, pd->sa_index); + ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index); vlib_buffer_t *lb = b; const u8 esp_sz = sizeof (esp_header_t); const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6; @@ -910,6 +899,13 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, b->current_length = pd->current_length - adv; esp_remove_tail (vm, b, lb, tail); } + else if (next_header == IP_PROTOCOL_MPLS_IN_IP) + { + next[0] = ESP_DECRYPT_NEXT_MPLS_INPUT; + b->current_data = pd->current_data + adv; + b->current_length = pd->current_length - adv; + esp_remove_tail (vm, b, lb, tail); + } else { if (is_tun && next_header == IP_PROTOCOL_GRE) @@ -1012,24 +1008,10 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, } } -/* when submitting a frame is failed, drop all buffers in the frame */ -static_always_inline void -esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f, - vlib_buffer_t ** b, u16 * next) -{ - u32 n_drop = f->n_elts; - while (--n_drop) - { - (b - n_drop)[0]->error = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; - (next - n_drop)[0] = ESP_DECRYPT_NEXT_DROP; - } - vnet_crypto_async_reset_frame (f); -} - always_inline uword -esp_decrypt_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame, - int is_ip6, int is_tun, u16 async_next) +esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_frame_t *from_frame, int is_ip6, int is_tun, + u16 async_next_node) { ipsec_main_t *im = &ipsec_main; u32 thread_index = vm->thread_index; @@ -1038,7 +1020,12 @@ esp_decrypt_inline (vlib_main_t * vm, u32 *from = vlib_frame_vector_args (from_frame); u32 n_left = from_frame->n_vectors; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; - u16 nexts[VLIB_FRAME_SIZE], *next = nexts; + vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE]; + u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0; + u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0; + u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0; + u32 sync_bi[VLIB_FRAME_SIZE]; + u32 noop_bi[VLIB_FRAME_SIZE]; esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data; esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2; esp_decrypt_packet_data_t cpd = { }; @@ -1048,9 +1035,10 @@ esp_decrypt_inline (vlib_main_t * vm, vnet_crypto_op_t _op, *op = &_op; vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops; vnet_crypto_op_t **integ_ops = &ptd->integ_ops; - vnet_crypto_async_frame_t *async_frame = 0; int is_async = im->async_mode; - vnet_crypto_async_op_id_t last_async_op = ~0; + vnet_crypto_async_op_id_t async_op = ~0; + vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS]; + esp_decrypt_error_t err; vlib_get_buffers (vm, from, b, n_left); if (!is_async) @@ -1060,13 +1048,16 @@ esp_decrypt_inline (vlib_main_t * vm, vec_reset_length (ptd->chained_crypto_ops); vec_reset_length (ptd->chained_integ_ops); } + vec_reset_length (ptd->async_frames); vec_reset_length (ptd->chunks); - clib_memset_u16 (nexts, -1, n_left); + clib_memset (sync_nexts, -1, sizeof (sync_nexts)); + clib_memset (async_frames, 0, sizeof (async_frames)); while (n_left > 0) { u8 *payload; + err = ESP_DECRYPT_ERROR_RX_PKTS; if (n_left > 2) { u8 *p; @@ -1080,8 +1071,9 @@ esp_decrypt_inline (vlib_main_t * vm, u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]); if (n_bufs == 0) { - b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS]; - next[0] = ESP_DECRYPT_NEXT_DROP; + err = ESP_DECRYPT_ERROR_NO_BUFFERS; + esp_set_next_index (b[0], node, err, n_noop, noop_nexts, + ESP_DECRYPT_NEXT_DROP); goto next; } @@ -1095,7 +1087,7 @@ esp_decrypt_inline (vlib_main_t * vm, current_sa_bytes = current_sa_pkts = 0; current_sa_index = vnet_buffer (b[0])->ipsec.sad_index; - sa0 = pool_elt_at_index (im->sad, current_sa_index); + sa0 = ipsec_sa_get (current_sa_index); /* fetch the second cacheline ASAP */ CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD); @@ -1103,33 +1095,40 @@ esp_decrypt_inline (vlib_main_t * vm, cpd.iv_sz = sa0->crypto_iv_size; cpd.flags = sa0->flags; cpd.sa_index = current_sa_index; + is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0); + } - /* submit frame when op_id is different then the old one */ - if (is_async && last_async_op != sa0->crypto_async_dec_op_id) + if (is_async) + { + async_op = sa0->crypto_async_dec_op_id; + + /* get a frame for this op if we don't yet have one or it's full + */ + if (NULL == async_frames[async_op] || + vnet_crypto_async_frame_is_full (async_frames[async_op])) { - if (async_frame && async_frame->n_elts) - { - if (vnet_crypto_async_submit_open_frame (vm, async_frame)) - esp_async_recycle_failed_submit (async_frame, b, next); - } - async_frame = - vnet_crypto_async_get_frame (vm, sa0->crypto_async_dec_op_id); - last_async_op = sa0->crypto_async_dec_op_id; + async_frames[async_op] = + vnet_crypto_async_get_frame (vm, async_op); + /* Save the frame to the list we'll submit at the end */ + vec_add1 (ptd->async_frames, async_frames[async_op]); } } - if (PREDICT_FALSE (~0 == sa0->decrypt_thread_index)) + if (PREDICT_FALSE (~0 == sa0->thread_index)) { /* this is the first packet to use this SA, claim the SA * for this thread. this could happen simultaneously on * another thread */ - clib_atomic_cmp_and_swap (&sa0->decrypt_thread_index, ~0, + clib_atomic_cmp_and_swap (&sa0->thread_index, ~0, ipsec_sa_assign_thread (thread_index)); } - if (PREDICT_TRUE (thread_index != sa0->decrypt_thread_index)) + if (PREDICT_FALSE (thread_index != sa0->thread_index)) { - next[0] = ESP_DECRYPT_NEXT_HANDOFF; + vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index; + err = ESP_DECRYPT_ERROR_HANDOFF; + esp_set_next_index (b[0], node, err, n_noop, noop_nexts, + ESP_DECRYPT_NEXT_HANDOFF); goto next; } @@ -1160,15 +1159,17 @@ esp_decrypt_inline (vlib_main_t * vm, /* anti-reply check */ if (ipsec_sa_anti_replay_check (sa0, pd->seq)) { - b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY]; - next[0] = ESP_DECRYPT_NEXT_DROP; + err = ESP_DECRYPT_ERROR_REPLAY; + esp_set_next_index (b[0], node, err, n_noop, noop_nexts, + ESP_DECRYPT_NEXT_DROP); goto next; } if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz) { - b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT]; - next[0] = ESP_DECRYPT_NEXT_DROP; + err = ESP_DECRYPT_ERROR_RUNT; + esp_set_next_index (b[0], node, err, n_noop, noop_nexts, + ESP_DECRYPT_NEXT_DROP); goto next; } @@ -1178,31 +1179,44 @@ esp_decrypt_inline (vlib_main_t * vm, if (is_async) { - int ret = esp_decrypt_prepare_async_frame (vm, node, ptd, - &async_frame, - sa0, payload, len, - cpd.icv_sz, - cpd.iv_sz, - pd, pd2, - from[b - bufs], - b[0], next, async_next); - if (PREDICT_FALSE (ret < 0)) + + err = esp_decrypt_prepare_async_frame ( + vm, node, ptd, async_frames[async_op], sa0, payload, len, + cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next, + async_next_node); + if (ESP_DECRYPT_ERROR_RX_PKTS != err) { - esp_async_recycle_failed_submit (async_frame, b, next); - goto next; + esp_set_next_index (b[0], node, err, n_noop, noop_nexts, + ESP_DECRYPT_NEXT_DROP); } } else - esp_decrypt_prepare_sync_op (vm, node, ptd, &crypto_ops, &integ_ops, - op, sa0, payload, len, cpd.icv_sz, - cpd.iv_sz, pd, pd2, b[0], next, - b - bufs); + esp_decrypt_prepare_sync_op ( + vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len, + cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, b - bufs); /* next */ next: + if (ESP_DECRYPT_ERROR_RX_PKTS != err) + { + noop_bi[n_noop] = from[b - bufs]; + n_noop++; + noop_next++; + } + else if (!is_async) + { + sync_bi[n_sync] = from[b - bufs]; + sync_bufs[n_sync] = b[0]; + n_sync++; + sync_next++; + pd += 1; + pd2 += 1; + } + else + { + n_async++; + async_next++; + } n_left -= 1; - next += 1; - pd += 1; - pd2 += 1; b += 1; } @@ -1211,44 +1225,47 @@ esp_decrypt_inline (vlib_main_t * vm, current_sa_index, current_sa_pkts, current_sa_bytes); - if (is_async) + if (n_async) { - if (async_frame && async_frame->n_elts) + /* submit all of the open frames */ + vnet_crypto_async_frame_t **async_frame; + + vec_foreach (async_frame, ptd->async_frames) { - if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0) - esp_async_recycle_failed_submit (async_frame, b, next); + if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0) + { + n_noop += esp_async_recycle_failed_submit ( + vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR, + n_sync, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP); + vnet_crypto_async_reset_frame (*async_frame); + vnet_crypto_async_free_frame (vm, *async_frame); + } } - - /* no post process in async */ - n_left = from_frame->n_vectors; - vlib_node_increment_counter (vm, node->node_index, - ESP_DECRYPT_ERROR_RX_PKTS, n_left); - vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left); - - return n_left; } - else + + if (n_sync) { - esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, + esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts, ESP_DECRYPT_ERROR_INTEG_ERROR); - esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts, - ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR); + esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs, + sync_nexts, ptd->chunks, + ESP_DECRYPT_ERROR_INTEG_ERROR); - esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, + esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts, ESP_DECRYPT_ERROR_DECRYPTION_FAILED); - esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts, - ptd->chunks, + esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs, + sync_nexts, ptd->chunks, ESP_DECRYPT_ERROR_DECRYPTION_FAILED); } /* Post decryption ronud - adjust packet data start and length and next node */ - n_left = from_frame->n_vectors; - next = nexts; + n_left = n_sync; + sync_next = sync_nexts; pd = pkt_data; pd2 = pkt_data2; - b = bufs; + b = sync_bufs; while (n_left) { @@ -1272,8 +1289,8 @@ esp_decrypt_inline (vlib_main_t * vm, if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) current_sa_index = vnet_buffer (b[0])->ipsec.sad_index; - if (next[0] >= ESP_DECRYPT_N_NEXT) - esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6, + if (sync_next[0] >= ESP_DECRYPT_N_NEXT) + esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], sync_next, is_ip6, is_tun, 0); /* trace: */ @@ -1281,7 +1298,7 @@ esp_decrypt_inline (vlib_main_t * vm, { esp_decrypt_trace_t *tr; tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); - sa0 = pool_elt_at_index (im->sad, current_sa_index); + sa0 = ipsec_sa_get (current_sa_index); tr->crypto_alg = sa0->crypto_alg; tr->integ_alg = sa0->integ_alg; tr->seq = pd->seq; @@ -1291,19 +1308,22 @@ esp_decrypt_inline (vlib_main_t * vm, /* next */ n_left -= 1; - next += 1; + sync_next += 1; pd += 1; pd2 += 1; b += 1; } - n_left = from_frame->n_vectors; - vlib_node_increment_counter (vm, node->node_index, - ESP_DECRYPT_ERROR_RX_PKTS, n_left); + vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); - vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left); + if (n_sync) + vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync); - return n_left; + if (n_noop) + vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop); + + return (from_frame->n_vectors); } always_inline uword @@ -1311,7 +1331,6 @@ esp_decrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip6, int is_tun) { - ipsec_main_t *im = &ipsec_main; u32 *from = vlib_frame_vector_args (from_frame); u32 n_left = from_frame->n_vectors; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; @@ -1341,12 +1360,12 @@ esp_decrypt_post_inline (vlib_main_t * vm, /*trace: */ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { - ipsec_sa_t *sa0 = pool_elt_at_index (im->sad, pd->sa_index); + ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index); esp_decrypt_trace_t *tr; esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b[0]))->decrypt_data; tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); - sa0 = pool_elt_at_index (im->sad, async_pd->sa_index); + sa0 = ipsec_sa_get (async_pd->sa_index); tr->crypto_alg = sa0->crypto_alg; tr->integ_alg = sa0->integ_alg; @@ -1429,23 +1448,6 @@ VLIB_NODE_FN (esp6_decrypt_tun_post_node) (vlib_main_t * vm, return esp_decrypt_post_inline (vm, node, from_frame, 1, 1); } -VLIB_NODE_FN (esp_decrypt_pending_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return from_frame->n_vectors; -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (esp_decrypt_pending_node) = { - .name = "esp-decrypt-pending", - .vector_size = sizeof (u32), - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_next_nodes = 0 -}; -/* *INDENT-ON* */ - /* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp4_decrypt_node) = { .name = "esp4-decrypt", @@ -1461,9 +1463,9 @@ VLIB_REGISTER_NODE (esp4_decrypt_node) = { [ESP_DECRYPT_NEXT_DROP] = "ip4-drop", [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum", [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input", + [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop", [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input", [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff", - [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending" }, }; @@ -1493,9 +1495,9 @@ VLIB_REGISTER_NODE (esp6_decrypt_node) = { [ESP_DECRYPT_NEXT_DROP] = "ip6-drop", [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum", [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input", + [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop", [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input", [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff", - [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending" }, }; @@ -1523,9 +1525,9 @@ VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = { [ESP_DECRYPT_NEXT_DROP] = "ip4-drop", [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum", [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input", + [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input", [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input", [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff", - [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending" }, }; @@ -1553,9 +1555,9 @@ VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = { [ESP_DECRYPT_NEXT_DROP] = "ip6-drop", [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum", [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input", + [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input", [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input", [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff", - [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending" }, }; @@ -1572,6 +1574,29 @@ VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node) = { }; /* *INDENT-ON* */ +#ifndef CLIB_MARCH_VARIANT + +static clib_error_t * +esp_decrypt_init (vlib_main_t *vm) +{ + ipsec_main_t *im = &ipsec_main; + + im->esp4_dec_fq_index = + vlib_frame_queue_main_init (esp4_decrypt_node.index, 0); + im->esp6_dec_fq_index = + vlib_frame_queue_main_init (esp6_decrypt_node.index, 0); + im->esp4_dec_tun_fq_index = + vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0); + im->esp6_dec_tun_fq_index = + vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0); + + return 0; +} + +VLIB_INIT_FUNCTION (esp_decrypt_init); + +#endif + /* * fd.io coding-style-patch-verification: ON *