X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fesp_decrypt.c;h=94f3204b51ffe5d839b6869b4d33ee3015f227da;hb=a4df00f65107d3b34c579c603fb0019d1514d839;hp=f5b6232dbd811761bec2222076018315bb58d64e;hpb=4a58e49cfe03150034a65e147a2ffe8d24391b86;p=vpp.git diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index f5b6232dbd8..94f3204b51f 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -14,7 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include #include #include @@ -58,44 +57,21 @@ typedef enum ESP_DECRYPT_POST_N_NEXT, } esp_decrypt_post_next_t; -#define foreach_esp_decrypt_error \ - _(RX_PKTS, "ESP pkts received") \ - _(RX_POST_PKTS, "ESP-POST pkts received") \ - _(DECRYPTION_FAILED, "ESP decryption failed") \ - _(INTEG_ERROR, "Integrity check failed") \ - _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \ - _(REPLAY, "SA replayed packet") \ - _(RUNT, "undersized packet") \ - _(NO_BUFFERS, "no buffers (packet dropped)") \ - _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \ - _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \ - _(TUN_NO_PROTO, "no tunnel protocol") \ - _(UNSUP_PAYLOAD, "unsupported payload") \ - - -typedef enum -{ -#define _(sym,str) ESP_DECRYPT_ERROR_##sym, - foreach_esp_decrypt_error -#undef _ - ESP_DECRYPT_N_ERROR, -} esp_decrypt_error_t; - -static char *esp_decrypt_error_strings[] = { -#define _(sym,string) string, - foreach_esp_decrypt_error -#undef _ -}; - typedef struct { u32 seq; u32 sa_seq; u32 sa_seq_hi; + u32 pkt_seq_hi; ipsec_crypto_alg_t crypto_alg; ipsec_integ_alg_t integ_alg; } esp_decrypt_trace_t; +typedef vl_counter_esp_decrypt_enum_t esp_decrypt_error_t; + +/* The number of byres in the hisequence number */ +#define N_HI_ESN_BYTES 4 + /* packet trace format function */ static u8 * format_esp_decrypt_trace (u8 * s, va_list * args) @@ -104,11 +80,11 @@ format_esp_decrypt_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *); - s = - format (s, - "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u", - format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg, - t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi); + s = format (s, + "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u " + "pkt-seq-hi %u", + format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg, + t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi, t->pkt_seq_hi); return s; } @@ -137,8 +113,9 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node, err = e; else err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; - b[bi]->error = node->errors[err]; - nexts[bi] = ESP_DECRYPT_NEXT_DROP; + esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi, + nexts, ESP_DECRYPT_NEXT_DROP, + vnet_buffer (b[bi])->ipsec.sad_index); n_fail--; } op++; @@ -154,7 +131,7 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_crypto_op_t *op = ops; u32 n_fail, n_ops = vec_len (ops); - if (n_ops == 0) + if (PREDICT_TRUE (n_ops == 0)) return; n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops); @@ -169,8 +146,9 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node, err = e; else err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; - b[bi]->error = node->errors[err]; - nexts[bi] = ESP_DECRYPT_NEXT_DROP; + esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi, + nexts, ESP_DECRYPT_NEXT_DROP, + vnet_buffer (b[bi])->ipsec.sad_index); n_fail--; } op++; @@ -183,6 +161,9 @@ esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last, { vlib_buffer_t *before_last = b; + if (b != last) + b->total_length_not_including_first_buffer -= tail; + if (last->current_length > tail) { last->current_length -= tail; @@ -200,6 +181,37 @@ esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last, before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT; } +always_inline void +esp_remove_tail_and_tfc_padding (vlib_main_t *vm, vlib_node_runtime_t *node, + const esp_decrypt_packet_data_t *pd, + vlib_buffer_t *b, vlib_buffer_t *last, + u16 *next, u16 tail, int is_ip6) +{ + const u16 total_buffer_length = vlib_buffer_length_in_chain (vm, b); + u16 ip_packet_length; + if (is_ip6) + { + const ip6_header_t *ip6 = vlib_buffer_get_current (b); + ip_packet_length = + clib_net_to_host_u16 (ip6->payload_length) + sizeof (ip6_header_t); + } + else + { + const ip4_header_t *ip4 = vlib_buffer_get_current (b); + ip_packet_length = clib_net_to_host_u16 (ip4->length); + } + /* In case of TFC padding, the size of the buffer data needs to be adjusted + * to the ip packet length */ + if (PREDICT_FALSE (total_buffer_length < ip_packet_length + tail)) + { + esp_decrypt_set_next_index (b, node, vm->thread_index, + ESP_DECRYPT_ERROR_NO_TAIL_SPACE, 0, next, + ESP_DECRYPT_NEXT_DROP, pd->sa_index); + return; + } + esp_remove_tail (vm, b, last, total_buffer_length - ip_packet_length); +} + /* ICV is splitted in last two buffers so move it to the last buffer and return pointer to it */ static_always_inline u8 * @@ -225,9 +237,12 @@ esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first, before_last->current_length -= first_sz; if (before_last == first) pd->current_length -= first_sz; + else + first->total_length_not_including_first_buffer -= first_sz; clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz); if (dif) dif[0] = first_sz; + first->total_length_not_including_first_buffer -= last_sz; pd2->lb = before_last; pd2->icv_removed = 1; pd2->free_buffer_index = before_last->next_buffer; @@ -235,40 +250,41 @@ esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first, return lb_curr; } -static_always_inline i16 -esp_insert_esn (vlib_main_t * vm, ipsec_sa_t * sa, - esp_decrypt_packet_data2_t * pd2, u32 * data_len, - u8 ** digest, u16 * len, vlib_buffer_t * b, u8 * payload) +static_always_inline u16 +esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd, + esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest, + u16 *len, vlib_buffer_t *b, u8 *payload) { if (!ipsec_sa_is_set_USE_ESN (sa)) return 0; - /* shift ICV by 4 bytes to insert ESN */ - u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi); - u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa->seq_hi); + u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi); + u8 tmp[ESP_MAX_ICV_SIZE]; if (pd2->icv_removed) { u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb); - if (space_left >= sz) + if (space_left >= N_HI_ESN_BYTES) { - clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, sz); - *data_len += sz; + clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, + N_HI_ESN_BYTES); + *data_len += N_HI_ESN_BYTES; } else - return sz; + return N_HI_ESN_BYTES; len[0] = b->current_length; } else { clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE); - clib_memcpy_fast (payload + len[0], &seq_hi, sz); - clib_memcpy_fast (payload + len[0] + sz, tmp, ESP_MAX_ICV_SIZE); - *data_len += sz; - *digest += sz; + clib_memcpy_fast (payload + len[0], &seq_hi, N_HI_ESN_BYTES); + clib_memcpy_fast (payload + len[0] + N_HI_ESN_BYTES, tmp, + ESP_MAX_ICV_SIZE); + *data_len += N_HI_ESN_BYTES; + *digest += N_HI_ESN_BYTES; } - return sz; + return N_HI_ESN_BYTES; } static_always_inline u8 * @@ -284,14 +300,14 @@ esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first, if (ipsec_sa_is_set_USE_ESN (sa)) { - u8 sz = sizeof (sa->seq_hi); - u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi); + u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi); u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb); - if (space_left >= sz) + if (space_left >= N_HI_ESN_BYTES) { - clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, sz); - *len += sz; + clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, + N_HI_ESN_BYTES); + *len += N_HI_ESN_BYTES; } else { @@ -299,7 +315,8 @@ esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first, * (with ICV data) */ ASSERT (pd2->icv_removed); vlib_buffer_t *tmp = vlib_get_buffer (vm, pd2->free_buffer_index); - clib_memcpy_fast (vlib_buffer_get_current (tmp) - sz, &seq_hi, sz); + clib_memcpy_fast (vlib_buffer_get_current (tmp) - N_HI_ESN_BYTES, + &seq_hi, N_HI_ESN_BYTES); extra_esn[0] = 1; } } @@ -307,11 +324,12 @@ esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first, } static_always_inline int -esp_decrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, - esp_decrypt_packet_data2_t * pd2, - ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz, - u8 * start_src, u32 start_len, - u8 ** digest, u16 * n_ch, u32 * integ_total_len) +esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, + const esp_decrypt_packet_data_t *pd, + esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0, + vlib_buffer_t *b, u8 icv_sz, u8 *start_src, + u32 start_len, u8 **digest, u16 *n_ch, + u32 *integ_total_len) { vnet_crypto_op_chunk_t *ch; vlib_buffer_t *cb = vlib_get_buffer (vm, b->next_buffer); @@ -334,19 +352,19 @@ esp_decrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, ch->len = cb->current_length - icv_sz; if (ipsec_sa_is_set_USE_ESN (sa0)) { - u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi); - u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi); + u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi); + u8 tmp[ESP_MAX_ICV_SIZE]; u8 *esn; vlib_buffer_t *tmp_b; u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb); - if (space_left < sz) + if (space_left < N_HI_ESN_BYTES) { if (pd2->icv_removed) { /* use pre-data area from the last bufer that was removed from the chain */ tmp_b = vlib_get_buffer (vm, pd2->free_buffer_index); - esn = tmp_b->data - sz; + esn = tmp_b->data - N_HI_ESN_BYTES; } else { @@ -358,28 +376,29 @@ esp_decrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, esn = tmp_b->data; pd2->free_buffer_index = tmp_bi; } - clib_memcpy_fast (esn, &seq_hi, sz); + clib_memcpy_fast (esn, &seq_hi, N_HI_ESN_BYTES); vec_add2 (ptd->chunks, ch, 1); n_chunks += 1; ch->src = esn; - ch->len = sz; + ch->len = N_HI_ESN_BYTES; } else { if (pd2->icv_removed) { - clib_memcpy_fast (vlib_buffer_get_tail - (pd2->lb), &seq_hi, sz); + clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), + &seq_hi, N_HI_ESN_BYTES); } else { clib_memcpy_fast (tmp, *digest, ESP_MAX_ICV_SIZE); - clib_memcpy_fast (*digest, &seq_hi, sz); - clib_memcpy_fast (*digest + sz, tmp, ESP_MAX_ICV_SIZE); - *digest += sz; + clib_memcpy_fast (*digest, &seq_hi, N_HI_ESN_BYTES); + clib_memcpy_fast (*digest + N_HI_ESN_BYTES, tmp, + ESP_MAX_ICV_SIZE); + *digest += N_HI_ESN_BYTES; } - ch->len += sz; + ch->len += N_HI_ESN_BYTES; } } total_len += ch->len; @@ -475,18 +494,16 @@ esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, return total_len; } -static_always_inline void -esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, - ipsec_per_thread_data_t * ptd, - vnet_crypto_op_t *** crypto_ops, - vnet_crypto_op_t *** integ_ops, - vnet_crypto_op_t * op, - ipsec_sa_t * sa0, u8 * payload, - u16 len, u8 icv_sz, u8 iv_sz, - esp_decrypt_packet_data_t * pd, - esp_decrypt_packet_data2_t * pd2, - vlib_buffer_t * b, u16 * next, u32 index) +static_always_inline esp_decrypt_error_t +esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, + ipsec_sa_t *sa0, u8 *payload, u16 len, u8 icv_sz, + u8 iv_sz, esp_decrypt_packet_data_t *pd, + esp_decrypt_packet_data2_t *pd2, vlib_buffer_t *b, + u32 index) { + vnet_crypto_op_t **crypto_ops; + vnet_crypto_op_t **integ_ops; + vnet_crypto_op_t _op, *op = &_op; const u8 esp_sz = sizeof (esp_header_t); if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE)) @@ -503,6 +520,8 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, if (pd->is_chain) { /* buffer is chained */ + integ_ops = &ptd->chained_integ_ops; + op->len = pd->current_length; /* special case when ICV is splitted and needs to be reassembled @@ -528,8 +547,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, { /* we now have a single buffer of crypto data, adjust * the length (second buffer contains only ICV) */ - *integ_ops = &ptd->integ_ops; - *crypto_ops = &ptd->crypto_ops; + integ_ops = &ptd->integ_ops; len = b->current_length; goto out; } @@ -540,20 +558,19 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; op->chunk_index = vec_len (ptd->chunks); - if (esp_decrypt_chain_integ (vm, ptd, pd2, sa0, b, icv_sz, + if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz, payload, pd->current_length, &op->digest, &op->n_chunks, 0) < 0) - { - b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS]; - next[0] = ESP_DECRYPT_NEXT_DROP; - return; - } + return ESP_DECRYPT_ERROR_NO_BUFFERS; } else - esp_insert_esn (vm, sa0, pd2, &op->len, &op->digest, &len, b, - payload); + { + integ_ops = &ptd->integ_ops; + esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b, + payload); + } out: - vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES); + vec_add_aligned (*integ_ops, op, 1, CLIB_CACHE_LINE_BYTES); } payload += esp_sz; @@ -565,34 +582,35 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, op->key_index = sa0->crypto_key_index; op->iv = payload; - if (ipsec_sa_is_set_IS_AEAD (sa0)) + if (ipsec_sa_is_set_IS_CTR (sa0)) { - esp_header_t *esp0; - esp_aead_t *aad; - u8 *scratch; - - /* - * construct the AAD and the nonce (Salt || IV) in a scratch - * space in front of the IP header. - */ - scratch = payload - esp_sz; - esp0 = (esp_header_t *) (scratch); - - scratch -= (sizeof (*aad) + pd->hdr_sz); - op->aad = scratch; - - op->aad_len = esp_aad_fill (op->aad, esp0, sa0); - - /* - * we don't need to refer to the ESP header anymore so we - * can overwrite it with the salt and use the IV where it is - * to form the nonce = (Salt + IV) - */ - op->iv -= sizeof (sa0->salt); - clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt)); - - op->tag = payload + len; - op->tag_len = 16; + /* construct nonce in a scratch space in front of the IP header */ + esp_ctr_nonce_t *nonce = + (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - + sizeof (*nonce)); + if (ipsec_sa_is_set_IS_AEAD (sa0)) + { + /* constuct aad in a scratch space in front of the nonce */ + esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz); + op->aad = (u8 *) nonce - sizeof (esp_aead_t); + op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi); + op->tag = payload + len; + op->tag_len = 16; + if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0))) + { + /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */ + payload -= iv_sz; + len += iv_sz; + } + } + else + { + nonce->ctr = clib_host_to_net_u32 (1); + } + nonce->salt = sa0->salt; + ASSERT (sizeof (u64) == iv_sz); + nonce->iv = *(u64 *) op->iv; + op->iv = (u8 *) nonce; } op->src = op->dst = payload += iv_sz; op->len = len - iv_sz; @@ -606,30 +624,32 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz, payload, len - pd->iv_sz + pd->icv_sz, &op->tag, &op->n_chunks); + crypto_ops = &ptd->chained_crypto_ops; + } + else + { + crypto_ops = &ptd->crypto_ops; } - vec_add_aligned (*(crypto_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES); + vec_add_aligned (*crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES); } + + return ESP_DECRYPT_ERROR_RX_PKTS; } -static_always_inline int -esp_decrypt_prepare_async_frame (vlib_main_t * vm, - vlib_node_runtime_t * node, - ipsec_per_thread_data_t * ptd, - vnet_crypto_async_frame_t ** f, - ipsec_sa_t * sa0, u8 * payload, u16 len, - u8 icv_sz, u8 iv_sz, - esp_decrypt_packet_data_t * pd, - esp_decrypt_packet_data2_t * pd2, u32 bi, - vlib_buffer_t * b, u16 * next, - u16 async_next) +static_always_inline esp_decrypt_error_t +esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, + vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0, + u8 *payload, u16 len, u8 icv_sz, u8 iv_sz, + esp_decrypt_packet_data_t *pd, + esp_decrypt_packet_data2_t *pd2, u32 bi, + vlib_buffer_t *b, u16 async_next) { const u8 esp_sz = sizeof (esp_header_t); - u32 current_protect_index = vnet_buffer (b)->ipsec.protect_index; esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data; esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b); u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0; - u32 key_index; + const u32 key_index = sa0->crypto_key_index; u32 crypto_len, integ_len = 0; i16 crypto_start_offset, integ_start_offset = 0; u8 flags = 0; @@ -637,9 +657,10 @@ esp_decrypt_prepare_async_frame (vlib_main_t * vm, if (!ipsec_sa_is_set_IS_AEAD (sa0)) { /* linked algs */ - key_index = sa0->linked_key_index; integ_start_offset = payload - b->data; integ_len = len; + if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE)) + flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK; if (pd->is_chain) { @@ -677,21 +698,17 @@ esp_decrypt_prepare_async_frame (vlib_main_t * vm, tag = vlib_buffer_get_tail (pd2->lb) - icv_sz; flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; - if (esp_decrypt_chain_integ (vm, ptd, pd2, sa0, b, icv_sz, payload, - pd->current_length, &tag, - 0, &integ_len) < 0) + if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz, + payload, pd->current_length, &tag, 0, + &integ_len) < 0) { /* allocate buffer failed, will not add to frame and drop */ - b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS]; - next[0] = ESP_DECRYPT_NEXT_DROP; - return -1; + return (ESP_DECRYPT_ERROR_NO_BUFFERS); } } else - esp_insert_esn (vm, sa0, pd2, &integ_len, &tag, &len, b, payload); + esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload); } - else - key_index = sa0->crypto_key_index; out: /* crypto */ @@ -699,32 +716,33 @@ out: len -= esp_sz; iv = payload; - if (ipsec_sa_is_set_IS_AEAD (sa0)) + if (ipsec_sa_is_set_IS_CTR (sa0)) { - esp_header_t *esp0; - u8 *scratch; - - /* - * construct the AAD and the nonce (Salt || IV) in a scratch - * space in front of the IP header. - */ - scratch = payload - esp_sz; - esp0 = (esp_header_t *) (scratch); - - scratch -= (sizeof (esp_aead_t) + pd->hdr_sz); - aad = scratch; - - esp_aad_fill (aad, esp0, sa0); - - /* - * we don't need to refer to the ESP header anymore so we - * can overwrite it with the salt and use the IV where it is - * to form the nonce = (Salt + IV) - */ - iv -= sizeof (sa0->salt); - clib_memcpy_fast (iv, &sa0->salt, sizeof (sa0->salt)); - - tag = payload + len; + /* construct nonce in a scratch space in front of the IP header */ + esp_ctr_nonce_t *nonce = + (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce)); + if (ipsec_sa_is_set_IS_AEAD (sa0)) + { + /* constuct aad in a scratch space in front of the nonce */ + esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz); + aad = (u8 *) nonce - sizeof (esp_aead_t); + esp_aad_fill (aad, esp0, sa0, pd->seq_hi); + tag = payload + len; + if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0))) + { + /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */ + payload -= iv_sz; + len += iv_sz; + } + } + else + { + nonce->ctr = clib_host_to_net_u32 (1); + } + nonce->salt = sa0->salt; + ASSERT (sizeof (u64) == iv_sz); + nonce->iv = *(u64 *) iv; + iv = (u8 *) nonce; } crypto_start_offset = (payload += iv_sz) - b->data; @@ -743,30 +761,31 @@ out: *async_pd = *pd; *async_pd2 = *pd2; - pd->protect_index = current_protect_index; /* for AEAD integ_len - crypto_len will be negative, it is ok since it * is ignored by the engine. */ - return vnet_crypto_async_add_to_frame (vm, f, key_index, crypto_len, - integ_len - crypto_len, - crypto_start_offset, - integ_start_offset, - bi, async_next, iv, tag, aad, flags); + vnet_crypto_async_add_to_frame ( + vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset, + integ_start_offset, bi, async_next, iv, tag, aad, flags); + + return (ESP_DECRYPT_ERROR_RX_PKTS); } static_always_inline void -esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, - esp_decrypt_packet_data_t * pd, - esp_decrypt_packet_data2_t * pd2, vlib_buffer_t * b, - u16 * next, int is_ip6, int is_tun, int is_async) +esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node, + const u16 *next_by_next_header, + const esp_decrypt_packet_data_t *pd, + const esp_decrypt_packet_data2_t *pd2, + vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun, + int is_async) { - ipsec_main_t *im = &ipsec_main; - ipsec_sa_t *sa0 = vec_elt_at_index (im->sad, pd->sa_index); + ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index); vlib_buffer_t *lb = b; const u8 esp_sz = sizeof (esp_header_t); const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6; u8 pad_length = 0, next_header = 0; u16 icv_sz; + u64 n_lost; /* * redo the anti-reply check @@ -775,29 +794,50 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, * check above we did so against the state of the window (W), * after packet s-1. So each of the packets in the sequence will be * accepted. - * This time s will be cheked against Ws-1, s+1 chceked against Ws - * (i.e. the window state is updated/advnaced) - * so this time the successive s+! packet will be dropped. + * This time s will be cheked against Ws-1, s+1 checked against Ws + * (i.e. the window state is updated/advanced) + * so this time the successive s+1 packet will be dropped. * This is a consequence of batching the decrypts. If the - * check-dcrypt-advance process was done for each packet it would + * check-decrypt-advance process was done for each packet it would * be fine. But we batch the decrypts because it's much more efficient * to do so in SW and if we offload to HW and the process is async. * * You're probably thinking, but this means an attacker can send the - * above sequence and cause VPP to perform decrpyts that will fail, + * above sequence and cause VPP to perform decrypts that will fail, * and that's true. But if the attacker can determine s (a valid * sequence number in the window) which is non-trivial, it can generate * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any * implementation, sequential or batching, from decrypting these. */ - if (ipsec_sa_anti_replay_check (sa0, pd->seq)) + if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0))) { - b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY]; - next[0] = ESP_DECRYPT_NEXT_DROP; - return; + if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true, + NULL, true)) + { + esp_decrypt_set_next_index (b, node, vm->thread_index, + ESP_DECRYPT_ERROR_REPLAY, 0, next, + ESP_DECRYPT_NEXT_DROP, pd->sa_index); + return; + } + n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, + pd->seq_hi, true); + } + else + { + if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true, + NULL, false)) + { + esp_decrypt_set_next_index (b, node, vm->thread_index, + ESP_DECRYPT_ERROR_REPLAY, 0, next, + ESP_DECRYPT_NEXT_DROP, pd->sa_index); + return; + } + n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, + pd->seq_hi, false); } - ipsec_sa_anti_replay_advance (sa0, pd->seq); + vlib_prefetch_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], + vm->thread_index, pd->sa_index); if (pd->is_chain) { @@ -856,7 +896,8 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, u16 adv = pd->iv_sz + esp_sz; u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz; u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz; - b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; + b->flags &= + ~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT); if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */ { @@ -906,14 +947,16 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; b->current_data = pd->current_data + adv; b->current_length = pd->current_length - adv; - esp_remove_tail (vm, b, lb, tail); + esp_remove_tail_and_tfc_padding (vm, node, pd, b, lb, next, tail, + false); } else if (next_header == IP_PROTOCOL_IPV6) { next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; b->current_data = pd->current_data + adv; b->current_length = pd->current_length - adv; - esp_remove_tail (vm, b, lb, tail); + esp_remove_tail_and_tfc_padding (vm, node, pd, b, lb, next, tail, + true); } else if (next_header == IP_PROTOCOL_MPLS_IN_IP) { @@ -922,44 +965,51 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, b->current_length = pd->current_length - adv; esp_remove_tail (vm, b, lb, tail); } - else + else if (is_tun && next_header == IP_PROTOCOL_GRE) { - if (is_tun && next_header == IP_PROTOCOL_GRE) - { - gre_header_t *gre; + gre_header_t *gre; - b->current_data = pd->current_data + adv; - b->current_length = pd->current_length - adv - tail; + b->current_data = pd->current_data + adv; + b->current_length = pd->current_length - adv - tail; - gre = vlib_buffer_get_current (b); + gre = vlib_buffer_get_current (b); - vlib_buffer_advance (b, sizeof (*gre)); + vlib_buffer_advance (b, sizeof (*gre)); - switch (clib_net_to_host_u16 (gre->protocol)) - { - case GRE_PROTOCOL_teb: - vnet_update_l2_len (b); - next[0] = ESP_DECRYPT_NEXT_L2_INPUT; - break; - case GRE_PROTOCOL_ip4: - next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; - break; - case GRE_PROTOCOL_ip6: - next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; - break; - default: - b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD]; - next[0] = ESP_DECRYPT_NEXT_DROP; - break; - } - } - else + switch (clib_net_to_host_u16 (gre->protocol)) { - next[0] = ESP_DECRYPT_NEXT_DROP; - b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD]; - return; + case GRE_PROTOCOL_teb: + vnet_update_l2_len (b); + next[0] = ESP_DECRYPT_NEXT_L2_INPUT; + break; + case GRE_PROTOCOL_ip4: + next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; + break; + case GRE_PROTOCOL_ip6: + next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; + break; + default: + esp_decrypt_set_next_index ( + b, node, vm->thread_index, ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0, + next, ESP_DECRYPT_NEXT_DROP, pd->sa_index); + break; } } + else if ((next[0] = vec_elt (next_by_next_header, next_header)) != + (u16) ~0) + { + b->current_data = pd->current_data + adv; + b->current_length = pd->current_length - adv; + esp_remove_tail (vm, b, lb, tail); + } + else + { + esp_decrypt_set_next_index (b, node, vm->thread_index, + ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0, next, + ESP_DECRYPT_NEXT_DROP, pd->sa_index); + return; + } + if (is_tun) { if (ipsec_sa_is_set_IS_PROTECT (sa0)) @@ -982,12 +1032,8 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, */ const ipsec_tun_protect_t *itp; - if (is_async) - itp = ipsec_tun_protect_get (pd->protect_index); - else - itp = - ipsec_tun_protect_get (vnet_buffer (b)-> - ipsec.protect_index); + itp = + ipsec_tun_protect_get (vnet_buffer (b)->ipsec.protect_index); if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP)) { @@ -1000,8 +1046,10 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, !ip46_address_is_equal_v4 (&itp->itp_tun.dst, &ip4->src_address)) { - next[0] = ESP_DECRYPT_NEXT_DROP; - b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO]; + esp_decrypt_set_next_index ( + b, node, vm->thread_index, + ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next, + ESP_DECRYPT_NEXT_DROP, pd->sa_index); } } else if (next_header == IP_PROTOCOL_IPV6) @@ -1015,41 +1063,51 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, !ip46_address_is_equal_v6 (&itp->itp_tun.dst, &ip6->src_address)) { - next[0] = ESP_DECRYPT_NEXT_DROP; - b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO]; + esp_decrypt_set_next_index ( + b, node, vm->thread_index, + ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next, + ESP_DECRYPT_NEXT_DROP, pd->sa_index); } } } } } + + if (PREDICT_FALSE (n_lost)) + vlib_increment_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], + vm->thread_index, pd->sa_index, n_lost); } always_inline uword -esp_decrypt_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame, - int is_ip6, int is_tun, u16 async_next) +esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_frame_t *from_frame, int is_ip6, int is_tun, + u16 async_next_node) { ipsec_main_t *im = &ipsec_main; + const u16 *next_by_next_header = im->next_header_registrations; u32 thread_index = vm->thread_index; u16 len; ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index); u32 *from = vlib_frame_vector_args (from_frame); u32 n_left = from_frame->n_vectors; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; - u16 nexts[VLIB_FRAME_SIZE], *next = nexts; + vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE]; + u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0; + u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts; + u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0; + u32 sync_bi[VLIB_FRAME_SIZE]; + u32 noop_bi[VLIB_FRAME_SIZE]; esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data; esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2; esp_decrypt_packet_data_t cpd = { }; u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0; const u8 esp_sz = sizeof (esp_header_t); ipsec_sa_t *sa0 = 0; - vnet_crypto_op_t _op, *op = &_op; - vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops; - vnet_crypto_op_t **integ_ops = &ptd->integ_ops; - vnet_crypto_async_frame_t *async_frame = 0; + bool anti_replay_result; int is_async = im->async_mode; - vnet_crypto_async_op_id_t last_async_op = ~0; - u16 n_async_drop = 0; + vnet_crypto_async_op_id_t async_op = ~0; + vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS]; + esp_decrypt_error_t err; vlib_get_buffers (vm, from, b, n_left); if (!is_async) @@ -1059,30 +1117,33 @@ esp_decrypt_inline (vlib_main_t * vm, vec_reset_length (ptd->chained_crypto_ops); vec_reset_length (ptd->chained_integ_ops); } + vec_reset_length (ptd->async_frames); vec_reset_length (ptd->chunks); - clib_memset_u16 (nexts, -1, n_left); + clib_memset (sync_nexts, -1, sizeof (sync_nexts)); + clib_memset (async_frames, 0, sizeof (async_frames)); while (n_left > 0) { u8 *payload; + err = ESP_DECRYPT_ERROR_RX_PKTS; if (n_left > 2) { u8 *p; vlib_prefetch_buffer_header (b[2], LOAD); p = vlib_buffer_get_current (b[1]); - CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); + clib_prefetch_load (p); p -= CLIB_CACHE_LINE_BYTES; - CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); + clib_prefetch_load (p); } u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]); if (n_bufs == 0) { - b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS]; - esp_set_next_index (is_async, from, nexts, from[b - bufs], - &n_async_drop, ESP_DECRYPT_NEXT_DROP, next); - next[0] = ESP_DECRYPT_NEXT_DROP; + err = ESP_DECRYPT_ERROR_NO_BUFFERS; + esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop, + noop_nexts, ESP_DECRYPT_NEXT_DROP, + vnet_buffer (b[0])->ipsec.sad_index); goto next; } @@ -1090,52 +1151,40 @@ esp_decrypt_inline (vlib_main_t * vm, { if (current_sa_pkts) vlib_increment_combined_counter (&ipsec_sa_counters, thread_index, - current_sa_index, - current_sa_pkts, + current_sa_index, current_sa_pkts, current_sa_bytes); current_sa_bytes = current_sa_pkts = 0; current_sa_index = vnet_buffer (b[0])->ipsec.sad_index; - sa0 = pool_elt_at_index (im->sad, current_sa_index); + vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index, + current_sa_index); + sa0 = ipsec_sa_get (current_sa_index); /* fetch the second cacheline ASAP */ - CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD); + clib_prefetch_load (sa0->cacheline1); cpd.icv_sz = sa0->integ_icv_size; cpd.iv_sz = sa0->crypto_iv_size; cpd.flags = sa0->flags; cpd.sa_index = current_sa_index; - - /* submit frame when op_id is different then the old one */ - if (is_async && last_async_op != sa0->crypto_async_dec_op_id) - { - if (async_frame && async_frame->n_elts) - { - if (vnet_crypto_async_submit_open_frame (vm, async_frame)) - esp_async_recycle_failed_submit (async_frame, b, from, - nexts, &n_async_drop, - ESP_DECRYPT_NEXT_DROP, - ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR); - } - async_frame = - vnet_crypto_async_get_frame (vm, sa0->crypto_async_dec_op_id); - last_async_op = sa0->crypto_async_dec_op_id; - } + is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0); } - if (PREDICT_FALSE (~0 == sa0->decrypt_thread_index)) + if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index)) { /* this is the first packet to use this SA, claim the SA * for this thread. this could happen simultaneously on * another thread */ - clib_atomic_cmp_and_swap (&sa0->decrypt_thread_index, ~0, + clib_atomic_cmp_and_swap (&sa0->thread_index, ~0, ipsec_sa_assign_thread (thread_index)); } - if (PREDICT_FALSE (thread_index != sa0->decrypt_thread_index)) + if (PREDICT_FALSE (thread_index != sa0->thread_index)) { - esp_set_next_index (is_async, from, nexts, from[b - bufs], - &n_async_drop, ESP_DECRYPT_NEXT_HANDOFF, next); - next[0] = ESP_DECRYPT_NEXT_HANDOFF; + vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index; + err = ESP_DECRYPT_ERROR_HANDOFF; + esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop, + noop_nexts, ESP_DECRYPT_NEXT_HANDOFF, + current_sa_index); goto next; } @@ -1156,27 +1205,37 @@ esp_decrypt_inline (vlib_main_t * vm, /* find last buffer in the chain */ while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT) pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer); - - crypto_ops = &ptd->chained_crypto_ops; - integ_ops = &ptd->chained_integ_ops; } pd->current_length = b[0]->current_length; /* anti-reply check */ - if (ipsec_sa_anti_replay_check (sa0, pd->seq)) + if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0))) + { + anti_replay_result = ipsec_sa_anti_replay_and_sn_advance ( + sa0, pd->seq, ~0, false, &pd->seq_hi, true); + } + else + { + anti_replay_result = ipsec_sa_anti_replay_and_sn_advance ( + sa0, pd->seq, ~0, false, &pd->seq_hi, false); + } + + if (anti_replay_result) { - b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY]; - esp_set_next_index (is_async, from, nexts, from[b - bufs], - &n_async_drop, ESP_DECRYPT_NEXT_DROP, next); + err = ESP_DECRYPT_ERROR_REPLAY; + esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop, + noop_nexts, ESP_DECRYPT_NEXT_DROP, + current_sa_index); goto next; } if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz) { - b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT]; - esp_set_next_index (is_async, from, nexts, from[b - bufs], - &n_async_drop, ESP_DECRYPT_NEXT_DROP, next); + err = ESP_DECRYPT_ERROR_RUNT; + esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop, + noop_nexts, ESP_DECRYPT_NEXT_DROP, + current_sa_index); goto next; } @@ -1186,42 +1245,70 @@ esp_decrypt_inline (vlib_main_t * vm, if (is_async) { - int ret = esp_decrypt_prepare_async_frame (vm, node, ptd, - &async_frame, - sa0, payload, len, - cpd.icv_sz, - cpd.iv_sz, - pd, pd2, - from[b - bufs], - b[0], next, async_next); - if (PREDICT_FALSE (ret < 0)) + async_op = sa0->crypto_async_dec_op_id; + + /* get a frame for this op if we don't yet have one or it's full + */ + if (NULL == async_frames[async_op] || + vnet_crypto_async_frame_is_full (async_frames[async_op])) { - b[0]->error = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; - esp_set_next_index (1, from, nexts, from[b - bufs], - &n_async_drop, ESP_DECRYPT_NEXT_DROP, next); - /* when next[0] is ESP_DECRYPT_NEXT_DROP we only have to drop - * the current packet. Otherwise it is frame submission error - * thus we have to drop the whole frame. - */ - if (next[0] != ESP_DECRYPT_NEXT_DROP && async_frame->n_elts) - esp_async_recycle_failed_submit (async_frame, b, from, - nexts, &n_async_drop, - ESP_DECRYPT_NEXT_DROP, - ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR); - goto next; + async_frames[async_op] = + vnet_crypto_async_get_frame (vm, async_op); + if (PREDICT_FALSE (!async_frames[async_op])) + { + err = ESP_DECRYPT_ERROR_NO_AVAIL_FRAME; + esp_decrypt_set_next_index ( + b[0], node, thread_index, err, n_noop, noop_nexts, + ESP_DECRYPT_NEXT_DROP, current_sa_index); + goto next; + } + + /* Save the frame to the list we'll submit at the end */ + vec_add1 (ptd->async_frames, async_frames[async_op]); + } + + err = esp_decrypt_prepare_async_frame ( + vm, ptd, async_frames[async_op], sa0, payload, len, cpd.icv_sz, + cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next_node); + if (ESP_DECRYPT_ERROR_RX_PKTS != err) + { + esp_decrypt_set_next_index ( + b[0], node, thread_index, err, n_noop, noop_nexts, + ESP_DECRYPT_NEXT_DROP, current_sa_index); } } else - esp_decrypt_prepare_sync_op (vm, node, ptd, &crypto_ops, &integ_ops, - op, sa0, payload, len, cpd.icv_sz, - cpd.iv_sz, pd, pd2, b[0], next, - b - bufs); + { + err = esp_decrypt_prepare_sync_op (vm, ptd, sa0, payload, len, + cpd.icv_sz, cpd.iv_sz, pd, pd2, + b[0], n_sync); + if (err != ESP_DECRYPT_ERROR_RX_PKTS) + { + esp_decrypt_set_next_index (b[0], node, thread_index, err, 0, + sync_next, ESP_DECRYPT_NEXT_DROP, + current_sa_index); + } + } /* next */ next: + if (ESP_DECRYPT_ERROR_RX_PKTS != err) + { + noop_bi[n_noop] = from[b - bufs]; + n_noop++; + } + else if (!is_async) + { + sync_bi[n_sync] = from[b - bufs]; + sync_bufs[n_sync] = b[0]; + n_sync++; + sync_next++; + pd += 1; + pd2 += 1; + } + else + async_next++; + n_left -= 1; - next += 1; - pd += 1; - pd2 += 1; b += 1; } @@ -1230,47 +1317,51 @@ esp_decrypt_inline (vlib_main_t * vm, current_sa_index, current_sa_pkts, current_sa_bytes); - if (is_async) + /* submit or free all of the open frames */ + vnet_crypto_async_frame_t **async_frame; + + vec_foreach (async_frame, ptd->async_frames) { - if (async_frame && async_frame->n_elts) + /* free frame and move on if no ops were successfully added */ + if (PREDICT_FALSE (!(*async_frame)->n_elts)) { - if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0) - esp_async_recycle_failed_submit (async_frame, b, from, nexts, - &n_async_drop, - ESP_DECRYPT_NEXT_DROP, - ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR); + vnet_crypto_async_free_frame (vm, *async_frame); + continue; + } + if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0) + { + n_noop += esp_async_recycle_failed_submit ( + vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR, + IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi, noop_nexts, + ESP_DECRYPT_NEXT_DROP, false); + vnet_crypto_async_reset_frame (*async_frame); + vnet_crypto_async_free_frame (vm, *async_frame); } - - /* no post process in async */ - vlib_node_increment_counter (vm, node->node_index, - ESP_DECRYPT_ERROR_RX_PKTS, n_left); - if (n_async_drop) - vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop); - - return n_left; } - else + + if (n_sync) { - esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, + esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts, ESP_DECRYPT_ERROR_INTEG_ERROR); - esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts, - ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR); + esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs, + sync_nexts, ptd->chunks, + ESP_DECRYPT_ERROR_INTEG_ERROR); - esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, + esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts, ESP_DECRYPT_ERROR_DECRYPTION_FAILED); - esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts, - ptd->chunks, + esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs, + sync_nexts, ptd->chunks, ESP_DECRYPT_ERROR_DECRYPTION_FAILED); } /* Post decryption ronud - adjust packet data start and length and next node */ - n_left = from_frame->n_vectors; - next = nexts; + n_left = n_sync; + sync_next = sync_nexts; pd = pkt_data; pd2 = pkt_data2; - b = bufs; + b = sync_bufs; while (n_left) { @@ -1294,38 +1385,42 @@ esp_decrypt_inline (vlib_main_t * vm, if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) current_sa_index = vnet_buffer (b[0])->ipsec.sad_index; - if (next[0] >= ESP_DECRYPT_N_NEXT) - esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6, - is_tun, 0); + if (sync_next[0] >= ESP_DECRYPT_N_NEXT) + esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, b[0], + sync_next, is_ip6, is_tun, 0); /* trace: */ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { esp_decrypt_trace_t *tr; tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); - sa0 = pool_elt_at_index (im->sad, current_sa_index); + sa0 = ipsec_sa_get (current_sa_index); tr->crypto_alg = sa0->crypto_alg; tr->integ_alg = sa0->integ_alg; tr->seq = pd->seq; - tr->sa_seq = sa0->last_seq; + tr->sa_seq = sa0->seq; tr->sa_seq_hi = sa0->seq_hi; + tr->pkt_seq_hi = pd->seq_hi; } /* next */ n_left -= 1; - next += 1; + sync_next += 1; pd += 1; pd2 += 1; b += 1; } - n_left = from_frame->n_vectors; - vlib_node_increment_counter (vm, node->node_index, - ESP_DECRYPT_ERROR_RX_PKTS, n_left); + vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); - vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left); + if (n_sync) + vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync); - return n_left; + if (n_noop) + vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop); + + return (from_frame->n_vectors); } always_inline uword @@ -1333,7 +1428,8 @@ esp_decrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip6, int is_tun) { - ipsec_main_t *im = &ipsec_main; + const ipsec_main_t *im = &ipsec_main; + const u16 *next_by_next_header = im->next_header_registrations; u32 *from = vlib_frame_vector_args (from_frame); u32 n_left = from_frame->n_vectors; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; @@ -1351,29 +1447,29 @@ esp_decrypt_post_inline (vlib_main_t * vm, } if (!pd->is_chain) - esp_decrypt_post_crypto (vm, node, pd, 0, b[0], next, is_ip6, is_tun, - 1); + esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, 0, b[0], + next, is_ip6, is_tun, 1); else { esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]); - esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6, - is_tun, 1); + esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, + b[0], next, is_ip6, is_tun, 1); } /*trace: */ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { - ipsec_sa_t *sa0 = pool_elt_at_index (im->sad, pd->sa_index); + ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index); esp_decrypt_trace_t *tr; esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b[0]))->decrypt_data; tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); - sa0 = pool_elt_at_index (im->sad, async_pd->sa_index); + sa0 = ipsec_sa_get (async_pd->sa_index); tr->crypto_alg = sa0->crypto_alg; tr->integ_alg = sa0->integ_alg; tr->seq = pd->seq; - tr->sa_seq = sa0->last_seq; + tr->sa_seq = sa0->seq; tr->sa_seq_hi = sa0->seq_hi; } @@ -1451,15 +1547,14 @@ VLIB_NODE_FN (esp6_decrypt_tun_post_node) (vlib_main_t * vm, return esp_decrypt_post_inline (vm, node, from_frame, 1, 1); } -/* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp4_decrypt_node) = { .name = "esp4-decrypt", .vector_size = sizeof (u32), .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), - .error_strings = esp_decrypt_error_strings, + .n_errors = ESP_DECRYPT_N_ERROR, + .error_counters = esp_decrypt_error_counters, .n_next_nodes = ESP_DECRYPT_N_NEXT, .next_nodes = { @@ -1478,8 +1573,8 @@ VLIB_REGISTER_NODE (esp4_decrypt_post_node) = { .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), - .error_strings = esp_decrypt_error_strings, + .n_errors = ESP_DECRYPT_N_ERROR, + .error_counters = esp_decrypt_error_counters, .sibling_of = "esp4-decrypt", }; @@ -1490,8 +1585,8 @@ VLIB_REGISTER_NODE (esp6_decrypt_node) = { .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), - .error_strings = esp_decrypt_error_strings, + .n_errors = ESP_DECRYPT_N_ERROR, + .error_counters = esp_decrypt_error_counters, .n_next_nodes = ESP_DECRYPT_N_NEXT, .next_nodes = { @@ -1510,8 +1605,8 @@ VLIB_REGISTER_NODE (esp6_decrypt_post_node) = { .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), - .error_strings = esp_decrypt_error_strings, + .n_errors = ESP_DECRYPT_N_ERROR, + .error_counters = esp_decrypt_error_counters, .sibling_of = "esp6-decrypt", }; @@ -1521,8 +1616,8 @@ VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = { .vector_size = sizeof (u32), .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), - .error_strings = esp_decrypt_error_strings, + .n_errors = ESP_DECRYPT_N_ERROR, + .error_counters = esp_decrypt_error_counters, .n_next_nodes = ESP_DECRYPT_N_NEXT, .next_nodes = { [ESP_DECRYPT_NEXT_DROP] = "ip4-drop", @@ -1540,8 +1635,8 @@ VLIB_REGISTER_NODE (esp4_decrypt_tun_post_node) = { .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), - .error_strings = esp_decrypt_error_strings, + .n_errors = ESP_DECRYPT_N_ERROR, + .error_counters = esp_decrypt_error_counters, .sibling_of = "esp4-decrypt-tun", }; @@ -1551,8 +1646,8 @@ VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = { .vector_size = sizeof (u32), .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), - .error_strings = esp_decrypt_error_strings, + .n_errors = ESP_DECRYPT_N_ERROR, + .error_counters = esp_decrypt_error_counters, .n_next_nodes = ESP_DECRYPT_N_NEXT, .next_nodes = { [ESP_DECRYPT_NEXT_DROP] = "ip6-drop", @@ -1570,12 +1665,34 @@ VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node) = { .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), - .error_strings = esp_decrypt_error_strings, + .n_errors = ESP_DECRYPT_N_ERROR, + .error_counters = esp_decrypt_error_counters, .sibling_of = "esp6-decrypt-tun", }; -/* *INDENT-ON* */ + +#ifndef CLIB_MARCH_VARIANT + +static clib_error_t * +esp_decrypt_init (vlib_main_t *vm) +{ + ipsec_main_t *im = &ipsec_main; + + im->esp4_dec_fq_index = + vlib_frame_queue_main_init (esp4_decrypt_node.index, 0); + im->esp6_dec_fq_index = + vlib_frame_queue_main_init (esp6_decrypt_node.index, 0); + im->esp4_dec_tun_fq_index = + vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0); + im->esp6_dec_tun_fq_index = + vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0); + + return 0; +} + +VLIB_INIT_FUNCTION (esp_decrypt_init); + +#endif /* * fd.io coding-style-patch-verification: ON