X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fesp_decrypt.c;h=21159fba84b2b7f4b859b065fc4f199ee54b5217;hb=0464310fd3d4234e5b0aaf730360a1db2b5f7384;hp=c13657d081945a9433932c89e1ab754281f1897f;hpb=e631ece4aa32b33651ed458200ab551ffb8fbb47;p=vpp.git diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index c13657d0819..21159fba84b 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -14,7 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include #include #include @@ -92,10 +91,14 @@ typedef struct u32 seq; u32 sa_seq; u32 sa_seq_hi; + u32 pkt_seq_hi; ipsec_crypto_alg_t crypto_alg; ipsec_integ_alg_t integ_alg; } esp_decrypt_trace_t; +/* The number of byres in the hisequence number */ +#define N_HI_ESN_BYTES 4 + /* packet trace format function */ static u8 * format_esp_decrypt_trace (u8 * s, va_list * args) @@ -104,11 +107,11 @@ format_esp_decrypt_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *); - s = - format (s, - "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u", - format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg, - t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi); + s = format (s, + "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u " + "pkt-seq-hi %u", + format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg, + t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi, t->pkt_seq_hi); return s; } @@ -235,40 +238,41 @@ esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first, return lb_curr; } -static_always_inline i16 -esp_insert_esn (vlib_main_t * vm, ipsec_sa_t * sa, - esp_decrypt_packet_data2_t * pd2, u32 * data_len, - u8 ** digest, u16 * len, vlib_buffer_t * b, u8 * payload) +static_always_inline u16 +esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd, + esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest, + u16 *len, vlib_buffer_t *b, u8 *payload) { if (!ipsec_sa_is_set_USE_ESN (sa)) return 0; - /* shift ICV by 4 bytes to insert ESN */ - u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi); - u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa->seq_hi); + u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi); + u8 tmp[ESP_MAX_ICV_SIZE]; if (pd2->icv_removed) { u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb); - if (space_left >= sz) + if (space_left >= N_HI_ESN_BYTES) { - clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, sz); - *data_len += sz; + clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, + N_HI_ESN_BYTES); + *data_len += N_HI_ESN_BYTES; } else - return sz; + return N_HI_ESN_BYTES; len[0] = b->current_length; } else { clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE); - clib_memcpy_fast (payload + len[0], &seq_hi, sz); - clib_memcpy_fast (payload + len[0] + sz, tmp, ESP_MAX_ICV_SIZE); - *data_len += sz; - *digest += sz; + clib_memcpy_fast (payload + len[0], &seq_hi, N_HI_ESN_BYTES); + clib_memcpy_fast (payload + len[0] + N_HI_ESN_BYTES, tmp, + ESP_MAX_ICV_SIZE); + *data_len += N_HI_ESN_BYTES; + *digest += N_HI_ESN_BYTES; } - return sz; + return N_HI_ESN_BYTES; } static_always_inline u8 * @@ -284,14 +288,14 @@ esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first, if (ipsec_sa_is_set_USE_ESN (sa)) { - u8 sz = sizeof (sa->seq_hi); - u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi); + u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi); u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb); - if (space_left >= sz) + if (space_left >= N_HI_ESN_BYTES) { - clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, sz); - *len += sz; + clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, + N_HI_ESN_BYTES); + *len += N_HI_ESN_BYTES; } else { @@ -299,7 +303,8 @@ esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first, * (with ICV data) */ ASSERT (pd2->icv_removed); vlib_buffer_t *tmp = vlib_get_buffer (vm, pd2->free_buffer_index); - clib_memcpy_fast (vlib_buffer_get_current (tmp) - sz, &seq_hi, sz); + clib_memcpy_fast (vlib_buffer_get_current (tmp) - N_HI_ESN_BYTES, + &seq_hi, N_HI_ESN_BYTES); extra_esn[0] = 1; } } @@ -307,11 +312,12 @@ esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first, } static_always_inline int -esp_decrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, - esp_decrypt_packet_data2_t * pd2, - ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz, - u8 * start_src, u32 start_len, - u8 ** digest, u16 * n_ch, u32 * integ_total_len) +esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, + const esp_decrypt_packet_data_t *pd, + esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0, + vlib_buffer_t *b, u8 icv_sz, u8 *start_src, + u32 start_len, u8 **digest, u16 *n_ch, + u32 *integ_total_len) { vnet_crypto_op_chunk_t *ch; vlib_buffer_t *cb = vlib_get_buffer (vm, b->next_buffer); @@ -334,19 +340,19 @@ esp_decrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, ch->len = cb->current_length - icv_sz; if (ipsec_sa_is_set_USE_ESN (sa0)) { - u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi); - u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi); + u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi); + u8 tmp[ESP_MAX_ICV_SIZE]; u8 *esn; vlib_buffer_t *tmp_b; u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb); - if (space_left < sz) + if (space_left < N_HI_ESN_BYTES) { if (pd2->icv_removed) { /* use pre-data area from the last bufer that was removed from the chain */ tmp_b = vlib_get_buffer (vm, pd2->free_buffer_index); - esn = tmp_b->data - sz; + esn = tmp_b->data - N_HI_ESN_BYTES; } else { @@ -358,28 +364,29 @@ esp_decrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, esn = tmp_b->data; pd2->free_buffer_index = tmp_bi; } - clib_memcpy_fast (esn, &seq_hi, sz); + clib_memcpy_fast (esn, &seq_hi, N_HI_ESN_BYTES); vec_add2 (ptd->chunks, ch, 1); n_chunks += 1; ch->src = esn; - ch->len = sz; + ch->len = N_HI_ESN_BYTES; } else { if (pd2->icv_removed) { - clib_memcpy_fast (vlib_buffer_get_tail - (pd2->lb), &seq_hi, sz); + clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), + &seq_hi, N_HI_ESN_BYTES); } else { clib_memcpy_fast (tmp, *digest, ESP_MAX_ICV_SIZE); - clib_memcpy_fast (*digest, &seq_hi, sz); - clib_memcpy_fast (*digest + sz, tmp, ESP_MAX_ICV_SIZE); - *digest += sz; + clib_memcpy_fast (*digest, &seq_hi, N_HI_ESN_BYTES); + clib_memcpy_fast (*digest + N_HI_ESN_BYTES, tmp, + ESP_MAX_ICV_SIZE); + *digest += N_HI_ESN_BYTES; } - ch->len += sz; + ch->len += N_HI_ESN_BYTES; } } total_len += ch->len; @@ -540,7 +547,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; op->chunk_index = vec_len (ptd->chunks); - if (esp_decrypt_chain_integ (vm, ptd, pd2, sa0, b, icv_sz, + if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz, payload, pd->current_length, &op->digest, &op->n_chunks, 0) < 0) { @@ -550,7 +557,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, } } else - esp_insert_esn (vm, sa0, pd2, &op->len, &op->digest, &len, b, + esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b, payload); out: vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES); @@ -576,7 +583,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, /* constuct aad in a scratch space in front of the nonce */ esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz); op->aad = (u8 *) nonce - sizeof (esp_aead_t); - op->aad_len = esp_aad_fill (op->aad, esp0, sa0); + op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi); op->tag = payload + len; op->tag_len = 16; } @@ -617,7 +624,6 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, u16 *next, u16 async_next) { const u8 esp_sz = sizeof (esp_header_t); - u32 current_protect_index = vnet_buffer (b)->ipsec.protect_index; esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data; esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b); u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0; @@ -671,16 +677,16 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node, tag = vlib_buffer_get_tail (pd2->lb) - icv_sz; flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; - if (esp_decrypt_chain_integ (vm, ptd, pd2, sa0, b, icv_sz, payload, - pd->current_length, &tag, - 0, &integ_len) < 0) + if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz, + payload, pd->current_length, &tag, 0, + &integ_len) < 0) { /* allocate buffer failed, will not add to frame and drop */ return (ESP_DECRYPT_ERROR_NO_BUFFERS); } } else - esp_insert_esn (vm, sa0, pd2, &integ_len, &tag, &len, b, payload); + esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload); } else key_index = sa0->crypto_key_index; @@ -701,7 +707,7 @@ out: /* constuct aad in a scratch space in front of the nonce */ esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz); aad = (u8 *) nonce - sizeof (esp_aead_t); - esp_aad_fill (aad, esp0, sa0); + esp_aad_fill (aad, esp0, sa0, pd->seq_hi); tag = payload + len; } else @@ -730,7 +736,6 @@ out: *async_pd = *pd; *async_pd2 = *pd2; - pd->protect_index = current_protect_index; /* for AEAD integ_len - crypto_len will be negative, it is ok since it * is ignored by the engine. */ @@ -742,10 +747,12 @@ out: } static_always_inline void -esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, - esp_decrypt_packet_data_t * pd, - esp_decrypt_packet_data2_t * pd2, vlib_buffer_t * b, - u16 * next, int is_ip6, int is_tun, int is_async) +esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node, + const u16 *next_by_next_header, + const esp_decrypt_packet_data_t *pd, + const esp_decrypt_packet_data2_t *pd2, + vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun, + int is_async) { ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index); vlib_buffer_t *lb = b; @@ -776,14 +783,19 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any * implementation, sequential or batching, from decrypting these. */ - if (ipsec_sa_anti_replay_check (sa0, pd->seq)) + if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true, + NULL)) { b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY]; next[0] = ESP_DECRYPT_NEXT_DROP; return; } - ipsec_sa_anti_replay_advance (sa0, pd->seq); + u64 n_lost = + ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, pd->seq_hi); + + vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, vm->thread_index, + pd->sa_index); if (pd->is_chain) { @@ -908,44 +920,49 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, b->current_length = pd->current_length - adv; esp_remove_tail (vm, b, lb, tail); } - else + else if (is_tun && next_header == IP_PROTOCOL_GRE) { - if (is_tun && next_header == IP_PROTOCOL_GRE) - { - gre_header_t *gre; + gre_header_t *gre; - b->current_data = pd->current_data + adv; - b->current_length = pd->current_length - adv - tail; + b->current_data = pd->current_data + adv; + b->current_length = pd->current_length - adv - tail; - gre = vlib_buffer_get_current (b); + gre = vlib_buffer_get_current (b); - vlib_buffer_advance (b, sizeof (*gre)); + vlib_buffer_advance (b, sizeof (*gre)); - switch (clib_net_to_host_u16 (gre->protocol)) - { - case GRE_PROTOCOL_teb: - vnet_update_l2_len (b); - next[0] = ESP_DECRYPT_NEXT_L2_INPUT; - break; - case GRE_PROTOCOL_ip4: - next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; - break; - case GRE_PROTOCOL_ip6: - next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; - break; - default: - b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD]; - next[0] = ESP_DECRYPT_NEXT_DROP; - break; - } - } - else + switch (clib_net_to_host_u16 (gre->protocol)) { - next[0] = ESP_DECRYPT_NEXT_DROP; + case GRE_PROTOCOL_teb: + vnet_update_l2_len (b); + next[0] = ESP_DECRYPT_NEXT_L2_INPUT; + break; + case GRE_PROTOCOL_ip4: + next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; + break; + case GRE_PROTOCOL_ip6: + next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; + break; + default: b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD]; - return; + next[0] = ESP_DECRYPT_NEXT_DROP; + break; } } + else if ((next[0] = vec_elt (next_by_next_header, next_header)) != + (u16) ~0) + { + b->current_data = pd->current_data + adv; + b->current_length = pd->current_length - adv; + esp_remove_tail (vm, b, lb, tail); + } + else + { + next[0] = ESP_DECRYPT_NEXT_DROP; + b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD]; + return; + } + if (is_tun) { if (ipsec_sa_is_set_IS_PROTECT (sa0)) @@ -968,12 +985,8 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, */ const ipsec_tun_protect_t *itp; - if (is_async) - itp = ipsec_tun_protect_get (pd->protect_index); - else - itp = - ipsec_tun_protect_get (vnet_buffer (b)-> - ipsec.protect_index); + itp = + ipsec_tun_protect_get (vnet_buffer (b)->ipsec.protect_index); if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP)) { @@ -1008,6 +1021,10 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, } } } + + if (PREDICT_FALSE (n_lost)) + vlib_increment_simple_counter (&ipsec_sa_lost_counters, vm->thread_index, + pd->sa_index, n_lost); } always_inline uword @@ -1016,6 +1033,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, u16 async_next_node) { ipsec_main_t *im = &ipsec_main; + const u16 *next_by_next_header = im->next_header_registrations; u32 thread_index = vm->thread_index; u16 len; ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index); @@ -1024,7 +1042,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE]; u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0; - u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0; + u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts; u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0; u32 sync_bi[VLIB_FRAME_SIZE]; u32 noop_bi[VLIB_FRAME_SIZE]; @@ -1065,9 +1083,9 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, u8 *p; vlib_prefetch_buffer_header (b[2], LOAD); p = vlib_buffer_get_current (b[1]); - CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); + clib_prefetch_load (p); p -= CLIB_CACHE_LINE_BYTES; - CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); + clib_prefetch_load (p); } u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]); @@ -1092,7 +1110,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, sa0 = ipsec_sa_get (current_sa_index); /* fetch the second cacheline ASAP */ - CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD); + clib_prefetch_load (sa0->cacheline1); cpd.icv_sz = sa0->integ_icv_size; cpd.iv_sz = sa0->crypto_iv_size; cpd.flags = sa0->flags; @@ -1100,22 +1118,6 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0); } - if (is_async) - { - async_op = sa0->crypto_async_dec_op_id; - - /* get a frame for this op if we don't yet have one or it's full - */ - if (NULL == async_frames[async_op] || - vnet_crypto_async_frame_is_full (async_frames[async_op])) - { - async_frames[async_op] = - vnet_crypto_async_get_frame (vm, async_op); - /* Save the frame to the list we'll submit at the end */ - vec_add1 (ptd->async_frames, async_frames[async_op]); - } - } - if (PREDICT_FALSE (~0 == sa0->thread_index)) { /* this is the first packet to use this SA, claim the SA @@ -1164,7 +1166,8 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, pd->current_length = b[0]->current_length; /* anti-reply check */ - if (ipsec_sa_anti_replay_check (sa0, pd->seq)) + if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, ~0, false, + &pd->seq_hi)) { err = ESP_DECRYPT_ERROR_REPLAY; esp_set_next_index (b[0], node, err, n_noop, noop_nexts, @@ -1186,6 +1189,18 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, if (is_async) { + async_op = sa0->crypto_async_dec_op_id; + + /* get a frame for this op if we don't yet have one or it's full + */ + if (NULL == async_frames[async_op] || + vnet_crypto_async_frame_is_full (async_frames[async_op])) + { + async_frames[async_op] = + vnet_crypto_async_get_frame (vm, async_op); + /* Save the frame to the list we'll submit at the end */ + vec_add1 (ptd->async_frames, async_frames[async_op]); + } err = esp_decrypt_prepare_async_frame ( vm, node, ptd, async_frames[async_op], sa0, payload, len, @@ -1219,10 +1234,8 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, pd2 += 1; } else - { - n_async++; - async_next++; - } + async_next++; + n_left -= 1; b += 1; } @@ -1232,21 +1245,24 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, current_sa_index, current_sa_pkts, current_sa_bytes); - if (n_async) - { - /* submit all of the open frames */ - vnet_crypto_async_frame_t **async_frame; + /* submit or free all of the open frames */ + vnet_crypto_async_frame_t **async_frame; - vec_foreach (async_frame, ptd->async_frames) + vec_foreach (async_frame, ptd->async_frames) + { + /* free frame and move on if no ops were successfully added */ + if (PREDICT_FALSE (!(*async_frame)->n_elts)) { - if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0) - { - n_noop += esp_async_recycle_failed_submit ( - vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR, - n_sync, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP); - vnet_crypto_async_reset_frame (*async_frame); - vnet_crypto_async_free_frame (vm, *async_frame); - } + vnet_crypto_async_free_frame (vm, *async_frame); + continue; + } + if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0) + { + n_noop += esp_async_recycle_failed_submit ( + vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR, + n_noop, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP); + vnet_crypto_async_reset_frame (*async_frame); + vnet_crypto_async_free_frame (vm, *async_frame); } } @@ -1297,8 +1313,8 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, current_sa_index = vnet_buffer (b[0])->ipsec.sad_index; if (sync_next[0] >= ESP_DECRYPT_N_NEXT) - esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], sync_next, is_ip6, - is_tun, 0); + esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, b[0], + sync_next, is_ip6, is_tun, 0); /* trace: */ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) @@ -1309,8 +1325,9 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, tr->crypto_alg = sa0->crypto_alg; tr->integ_alg = sa0->integ_alg; tr->seq = pd->seq; - tr->sa_seq = sa0->last_seq; + tr->sa_seq = sa0->seq; tr->sa_seq_hi = sa0->seq_hi; + tr->pkt_seq_hi = pd->seq_hi; } /* next */ @@ -1338,6 +1355,8 @@ esp_decrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip6, int is_tun) { + const ipsec_main_t *im = &ipsec_main; + const u16 *next_by_next_header = im->next_header_registrations; u32 *from = vlib_frame_vector_args (from_frame); u32 n_left = from_frame->n_vectors; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; @@ -1355,13 +1374,13 @@ esp_decrypt_post_inline (vlib_main_t * vm, } if (!pd->is_chain) - esp_decrypt_post_crypto (vm, node, pd, 0, b[0], next, is_ip6, is_tun, - 1); + esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, 0, b[0], + next, is_ip6, is_tun, 1); else { esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]); - esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6, - is_tun, 1); + esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, + b[0], next, is_ip6, is_tun, 1); } /*trace: */ @@ -1377,7 +1396,7 @@ esp_decrypt_post_inline (vlib_main_t * vm, tr->crypto_alg = sa0->crypto_alg; tr->integ_alg = sa0->integ_alg; tr->seq = pd->seq; - tr->sa_seq = sa0->last_seq; + tr->sa_seq = sa0->seq; tr->sa_seq_hi = sa0->seq_hi; }