X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fesp_decrypt.c;h=366894805381ce86f9c3f7b5e39884f26f78fee0;hb=7a6f5a4fee17bd34f70f9eb25a094764be39d81b;hp=3f463505e01aaf54ec0c0845e0ee482036c0ba6c;hpb=9f6957d1a5b23d4bee9390da20537db6e93b3433;p=vpp.git diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index 3f463505e01..36689480538 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -18,15 +18,21 @@ #include #include #include +#include #include #include +#include +#include + +#include #define foreach_esp_decrypt_next \ _(DROP, "error-drop") \ -_(IP4_INPUT, "ip4-input") \ +_(IP4_INPUT, "ip4-input-no-checksum") \ _(IP6_INPUT, "ip6-input") \ -_(IPSEC_GRE_INPUT, "ipsec-gre-input") +_(L2_INPUT, "l2-input") \ +_(HANDOFF, "handoff") #define _(v, s) ESP_DECRYPT_NEXT_##v, typedef enum @@ -37,13 +43,18 @@ typedef enum } esp_decrypt_next_t; -#define foreach_esp_decrypt_error \ - _(RX_PKTS, "ESP pkts received") \ - _(NO_BUFFER, "No buffer (packed dropped)") \ - _(DECRYPTION_FAILED, "ESP decryption failed") \ - _(INTEG_ERROR, "Integrity check failed") \ - _(REPLAY, "SA replayed packet") \ - _(NOT_IP, "Not IP packet (dropped)") +#define foreach_esp_decrypt_error \ + _(RX_PKTS, "ESP pkts received") \ + _(DECRYPTION_FAILED, "ESP decryption failed") \ + _(INTEG_ERROR, "Integrity check failed") \ + _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \ + _(REPLAY, "SA replayed packet") \ + _(RUNT, "undersized packet") \ + _(NO_BUFFERS, "no buffers (packet dropped)") \ + _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \ + _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \ + _(TUN_NO_PROTO, "no tunnel protocol") \ + _(UNSUP_PAYLOAD, "unsupported payload") \ typedef enum @@ -62,6 +73,9 @@ static char *esp_decrypt_error_strings[] = { typedef struct { + u32 seq; + u32 sa_seq; + u32 sa_seq_hi; ipsec_crypto_alg_t crypto_alg; ipsec_integ_alg_t integ_alg; } esp_decrypt_trace_t; @@ -74,343 +88,861 @@ format_esp_decrypt_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *); - s = format (s, "esp: crypto %U integrity %U", - format_ipsec_crypto_alg, t->crypto_alg, - format_ipsec_integ_alg, t->integ_alg); + s = + format (s, + "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u", + format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg, + t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi); return s; } -always_inline void -esp_decrypt_cbc (ipsec_crypto_alg_t alg, - u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv) +typedef struct { - ipsec_proto_main_t *em = &ipsec_proto_main; - u32 thread_index = vlib_get_thread_index (); -#if OPENSSL_VERSION_NUMBER >= 0x10100000L - EVP_CIPHER_CTX *ctx = em->per_thread_data[thread_index].decrypt_ctx; -#else - EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].decrypt_ctx); -#endif - const EVP_CIPHER *cipher = NULL; - int out_len; - - ASSERT (alg < IPSEC_CRYPTO_N_ALG); - - if (PREDICT_FALSE (em->ipsec_proto_main_crypto_algs[alg].type == 0)) + vlib_buffer_t *lb; + union + { + struct + { + u8 icv_sz; + u8 iv_sz; + ipsec_sa_flags_t flags; + u32 sa_index; + }; + u64 sa_data; + }; + + u32 seq; + u32 free_buffer_index; + i16 current_data; + i16 current_length; + u16 hdr_sz; + u8 icv_removed; + u8 __unused; +} esp_decrypt_packet_data_t; + +STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 4 * sizeof (u64)); + +#define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2) + +static_always_inline void +esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node, + vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts, + int e) +{ + vnet_crypto_op_t *op = ops; + u32 n_fail, n_ops = vec_len (ops); + + if (n_ops == 0) return; - if (PREDICT_FALSE - (alg != em->per_thread_data[thread_index].last_decrypt_alg)) + n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops); + + while (n_fail) + { + ASSERT (op - ops < n_ops); + if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) + { + u32 err, bi = op->user_data; + if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC) + err = e; + else + err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; + b[bi]->error = node->errors[err]; + nexts[bi] = ESP_DECRYPT_NEXT_DROP; + n_fail--; + } + op++; + } +} + +static_always_inline void +esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node, + vnet_crypto_op_t * ops, vlib_buffer_t * b[], + u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e) +{ + + vnet_crypto_op_t *op = ops; + u32 n_fail, n_ops = vec_len (ops); + + if (n_ops == 0) + return; + + n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops); + + while (n_fail) + { + ASSERT (op - ops < n_ops); + if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) + { + u32 err, bi = op->user_data; + if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC) + err = e; + else + err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; + b[bi]->error = node->errors[err]; + nexts[bi] = ESP_DECRYPT_NEXT_DROP; + n_fail--; + } + op++; + } +} + +always_inline void +esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last, + u16 tail) +{ + vlib_buffer_t *before_last = b; + + if (last->current_length > tail) + { + last->current_length -= tail; + return; + } + ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT); + + while (b->flags & VLIB_BUFFER_NEXT_PRESENT) { - cipher = em->ipsec_proto_main_crypto_algs[alg].type; - em->per_thread_data[thread_index].last_decrypt_alg = alg; + before_last = b; + b = vlib_get_buffer (vm, b->next_buffer); } + before_last->current_length -= tail - last->current_length; + vlib_buffer_free_one (vm, before_last->next_buffer); + before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT; +} + +/* ICV is splitted in last two buffers so move it to the last buffer and + return pointer to it */ +static_always_inline u8 * +esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first, + esp_decrypt_packet_data_t * pd, u16 icv_sz) +{ + vlib_buffer_t *before_last, *bp; + u16 last_sz = pd->lb->current_length; + u16 first_sz = icv_sz - last_sz; - EVP_DecryptInit_ex (ctx, cipher, NULL, key, iv); + bp = before_last = first; + while (bp->flags & VLIB_BUFFER_NEXT_PRESENT) + { + before_last = bp; + bp = vlib_get_buffer (vm, bp->next_buffer); + } - EVP_DecryptUpdate (ctx, out, &out_len, in, in_len); - EVP_DecryptFinal_ex (ctx, out + out_len, &out_len); + u8 *lb_curr = vlib_buffer_get_current (pd->lb); + memmove (lb_curr + first_sz, lb_curr, last_sz); + clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz, + first_sz); + before_last->current_length -= first_sz; + pd->lb = before_last; + pd->icv_removed = 1; + pd->free_buffer_index = before_last->next_buffer; + before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT; + return lb_curr; } always_inline uword esp_decrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, - int is_ip6) + int is_ip6, int is_tun) { - u32 n_left_from, *from, next_index, *to_next; ipsec_main_t *im = &ipsec_main; - ipsec_proto_main_t *em = &ipsec_proto_main; - u32 *recycle = 0; - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - u32 thread_index = vlib_get_thread_index (); + u32 thread_index = vm->thread_index; + u16 buffer_data_size = vlib_buffer_get_default_data_size (vm); + u16 len; + ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index); + u32 *from = vlib_frame_vector_args (from_frame); + u32 n_left = from_frame->n_vectors; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; + u16 nexts[VLIB_FRAME_SIZE], *next = nexts; + esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data; + esp_decrypt_packet_data_t cpd = { }; + u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0; + const u8 esp_sz = sizeof (esp_header_t); + ipsec_sa_t *sa0 = 0; + vnet_crypto_op_chunk_t *ch; + vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops; + vnet_crypto_op_t **integ_ops = &ptd->integ_ops; + + vlib_get_buffers (vm, from, b, n_left); + vec_reset_length (ptd->crypto_ops); + vec_reset_length (ptd->integ_ops); + vec_reset_length (ptd->chained_crypto_ops); + vec_reset_length (ptd->chained_integ_ops); + vec_reset_length (ptd->chunks); + clib_memset_u16 (nexts, -1, n_left); + + while (n_left > 0) + { + u8 *payload; - ipsec_alloc_empty_buffers (vm, im); + if (n_left > 2) + { + u8 *p; + vlib_prefetch_buffer_header (b[2], LOAD); + p = vlib_buffer_get_current (b[1]); + CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); + p -= CLIB_CACHE_LINE_BYTES; + CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); + } - u32 *empty_buffers = im->empty_buffers[thread_index]; + u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]); + if (n_bufs == 0) + { + b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS]; + next[0] = ESP_DECRYPT_NEXT_DROP; + goto next; + } - if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from)) - { - vlib_node_increment_counter (vm, node->node_index, - ESP_DECRYPT_ERROR_NO_BUFFER, n_left_from); - goto free_buffers_and_exit; - } + if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index) + { + if (current_sa_pkts) + vlib_increment_combined_counter (&ipsec_sa_counters, thread_index, + current_sa_index, + current_sa_pkts, + current_sa_bytes); + current_sa_bytes = current_sa_pkts = 0; + + current_sa_index = vnet_buffer (b[0])->ipsec.sad_index; + sa0 = pool_elt_at_index (im->sad, current_sa_index); + cpd.icv_sz = sa0->integ_icv_size; + cpd.iv_sz = sa0->crypto_iv_size; + cpd.flags = sa0->flags; + cpd.sa_index = current_sa_index; + } - next_index = node->cached_next_index; + if (PREDICT_FALSE (~0 == sa0->decrypt_thread_index)) + { + /* this is the first packet to use this SA, claim the SA + * for this thread. this could happen simultaneously on + * another thread */ + clib_atomic_cmp_and_swap (&sa0->decrypt_thread_index, ~0, + ipsec_sa_assign_thread (thread_index)); + } - while (n_left_from > 0) - { - u32 n_left_to_next; + if (PREDICT_TRUE (thread_index != sa0->decrypt_thread_index)) + { + next[0] = ESP_DECRYPT_NEXT_HANDOFF; + goto next; + } + + /* store packet data for next round for easier prefetch */ + pd->sa_data = cpd.sa_data; + pd->current_data = b[0]->current_data; + pd->current_length = b[0]->current_length; + pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset; + payload = b[0]->data + pd->current_data; + pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq); + pd->free_buffer_index = 0; + pd->icv_removed = 0; + + pd->lb = b[0]; + if (n_bufs > 1) + { + /* find last buffer in the chain */ + while (pd->lb->flags & VLIB_BUFFER_NEXT_PRESENT) + pd->lb = vlib_get_buffer (vm, pd->lb->next_buffer); - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + crypto_ops = &ptd->chained_crypto_ops; + integ_ops = &ptd->chained_integ_ops; + } + pd->current_length = b[0]->current_length; - while (n_left_from > 0 && n_left_to_next > 0) + /* we need 4 extra bytes for HMAC calculation when ESN are used */ + /* Chained buffers can process ESN as a separate chunk */ + if (pd->lb == b[0] && ipsec_sa_is_set_USE_ESN (sa0) && cpd.icv_sz && + (pd->lb->current_data + pd->lb->current_length + 4 + > buffer_data_size)) { - u32 i_bi0, o_bi0 = (u32) ~ 0, next0; - vlib_buffer_t *i_b0; - vlib_buffer_t *o_b0 = 0; - esp_header_t *esp0; - ipsec_sa_t *sa0; - u32 sa_index0 = ~0; - u32 seq; - ip4_header_t *ih4 = 0, *oh4 = 0; - ip6_header_t *ih6 = 0, *oh6 = 0; - u8 tunnel_mode = 1; - - i_bi0 = from[0]; - from += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - next0 = ESP_DECRYPT_NEXT_DROP; - - i_b0 = vlib_get_buffer (vm, i_bi0); - esp0 = vlib_buffer_get_current (i_b0); - - sa_index0 = vnet_buffer (i_b0)->ipsec.sad_index; - sa0 = pool_elt_at_index (im->sad, sa_index0); - - seq = clib_host_to_net_u32 (esp0->seq); - - /* anti-replay check */ - if (sa0->use_anti_replay) - { - int rv = 0; + b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_TAIL_SPACE]; + next[0] = ESP_DECRYPT_NEXT_DROP; + goto next; + } - if (PREDICT_TRUE (sa0->use_esn)) - rv = esp_replay_check_esn (sa0, seq); - else - rv = esp_replay_check (sa0, seq); + /* anti-reply check */ + if (ipsec_sa_anti_replay_check (sa0, pd->seq)) + { + b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY]; + next[0] = ESP_DECRYPT_NEXT_DROP; + goto next; + } - if (PREDICT_FALSE (rv)) - { - vlib_node_increment_counter (vm, node->node_index, - ESP_DECRYPT_ERROR_REPLAY, 1); - o_bi0 = i_bi0; - to_next[0] = o_bi0; - to_next += 1; - goto trace; - } - } + if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz) + { + b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT]; + next[0] = ESP_DECRYPT_NEXT_DROP; + goto next; + } - sa0->total_data_size += i_b0->current_length; + len = pd->current_length - cpd.icv_sz; + current_sa_pkts += 1; + current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]); - if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE)) + if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE)) + { + vnet_crypto_op_t *op; + vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); + + vnet_crypto_op_init (op, sa0->integ_op_id); + op->key_index = sa0->integ_key_index; + op->src = payload; + op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK; + op->user_data = b - bufs; + op->digest = payload + len; + op->digest_len = cpd.icv_sz; + op->len = len; + + if (pd->lb != b[0]) { - u8 sig[64]; - int icv_size = - em->ipsec_proto_main_integ_algs[sa0->integ_alg].trunc_size; - clib_memset (sig, 0, sizeof (sig)); - u8 *icv = - vlib_buffer_get_current (i_b0) + i_b0->current_length - - icv_size; - i_b0->current_length -= icv_size; - - hmac_calc (sa0->integ_alg, sa0->integ_key, sa0->integ_key_len, - (u8 *) esp0, i_b0->current_length, sig, sa0->use_esn, - sa0->seq_hi); - - if (PREDICT_FALSE (memcmp (icv, sig, icv_size))) + /* buffer is chained */ + vlib_buffer_t *cb = b[0]; + op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; + op->chunk_index = vec_len (ptd->chunks); + + if (pd->lb->current_length < cpd.icv_sz) + op->digest = esp_move_icv (vm, b[0], pd, cpd.icv_sz); + else + op->digest = vlib_buffer_get_tail (pd->lb) - cpd.icv_sz; + + vec_add2 (ptd->chunks, ch, 1); + ch->len = pd->current_length; + ch->src = payload; + cb = vlib_get_buffer (vm, cb->next_buffer); + op->n_chunks = 1; + while (1) { - vlib_node_increment_counter (vm, node->node_index, - ESP_DECRYPT_ERROR_INTEG_ERROR, - 1); - o_bi0 = i_bi0; - to_next[0] = o_bi0; - to_next += 1; - goto trace; + vec_add2 (ptd->chunks, ch, 1); + op->n_chunks += 1; + ch->src = vlib_buffer_get_current (cb); + if (pd->lb == cb) + { + if (pd->icv_removed) + ch->len = cb->current_length; + else + ch->len = cb->current_length - cpd.icv_sz; + if (ipsec_sa_is_set_USE_ESN (sa0)) + { + u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi); + u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi); + u8 *esn; + vlib_buffer_t *tmp_b; + u16 space_left = vlib_buffer_space_left_at_end + (vm, pd->lb); + if (space_left < sz) + { + if (pd->icv_removed) + { + /* use pre-data area from the last bufer + that was removed from the chain */ + tmp_b = + vlib_get_buffer (vm, + pd->free_buffer_index); + esn = tmp_b->data - sz; + } + else + { + /* no space, need to allocate new buffer */ + u32 tmp_bi = 0; + vlib_buffer_alloc (vm, &tmp_bi, 1); + tmp_b = vlib_get_buffer (vm, tmp_bi); + esn = tmp_b->data; + pd->free_buffer_index = tmp_bi; + } + clib_memcpy_fast (esn, &seq_hi, sz); + + vec_add2 (ptd->chunks, ch, 1); + op->n_chunks += 1; + ch->src = esn; + ch->len = sz; + } + else + { + if (pd->icv_removed) + { + clib_memcpy_fast (vlib_buffer_get_tail + (pd->lb), &seq_hi, sz); + } + else + { + clib_memcpy_fast (tmp, op->digest, + ESP_MAX_ICV_SIZE); + clib_memcpy_fast (op->digest, &seq_hi, sz); + clib_memcpy_fast (op->digest + sz, tmp, + ESP_MAX_ICV_SIZE); + op->digest += sz; + } + ch->len += sz; + } + } + } + else + ch->len = cb->current_length; + + if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT)) + break; + + cb = vlib_get_buffer (vm, cb->next_buffer); } } - - if (PREDICT_TRUE (sa0->use_anti_replay)) + else if (ipsec_sa_is_set_USE_ESN (sa0)) { - if (PREDICT_TRUE (sa0->use_esn)) - esp_replay_advance_esn (sa0, seq); - else - esp_replay_advance (sa0, seq); + /* shift ICV by 4 bytes to insert ESN */ + u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi); + u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi); + clib_memcpy_fast (tmp, payload + len, ESP_MAX_ICV_SIZE); + clib_memcpy_fast (payload + len, &seq_hi, sz); + clib_memcpy_fast (payload + len + sz, tmp, ESP_MAX_ICV_SIZE); + op->len += sz; + op->digest += sz; } + } - /* grab free buffer */ - uword last_empty_buffer = vec_len (empty_buffers) - 1; - o_bi0 = empty_buffers[last_empty_buffer]; - to_next[0] = o_bi0; - to_next += 1; - o_b0 = vlib_get_buffer (vm, o_bi0); - vlib_prefetch_buffer_with_index (vm, - empty_buffers[last_empty_buffer - - 1], STORE); - _vec_len (empty_buffers) = last_empty_buffer; - - /* add old buffer to the recycle list */ - vec_add1 (recycle, i_bi0); - - if ((sa0->crypto_alg >= IPSEC_CRYPTO_ALG_AES_CBC_128 && - sa0->crypto_alg <= IPSEC_CRYPTO_ALG_AES_CBC_256) || - (sa0->crypto_alg >= IPSEC_CRYPTO_ALG_DES_CBC && - sa0->crypto_alg <= IPSEC_CRYPTO_ALG_3DES_CBC)) - { - const int BLOCK_SIZE = - em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].block_size;; - const int IV_SIZE = - em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size; - esp_footer_t *f0; - u8 ip_hdr_size = 0; + payload += esp_sz; + len -= esp_sz; - int blocks = - (i_b0->current_length - sizeof (esp_header_t) - - IV_SIZE) / BLOCK_SIZE; + if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE) + { + vnet_crypto_op_t *op; + vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); + vnet_crypto_op_init (op, sa0->crypto_dec_op_id); + op->key_index = sa0->crypto_key_index; + op->iv = payload; - o_b0->current_data = sizeof (ethernet_header_t); + if (ipsec_sa_is_set_IS_AEAD (sa0)) + { + esp_header_t *esp0; + esp_aead_t *aad; + u8 *scratch; + + /* + * construct the AAD and the nonce (Salt || IV) in a scratch + * space in front of the IP header. + */ + scratch = payload - esp_sz; + esp0 = (esp_header_t *) (scratch); + + scratch -= (sizeof (*aad) + pd->hdr_sz); + op->aad = scratch; + + esp_aad_fill (op, esp0, sa0); + + /* + * we don't need to refer to the ESP header anymore so we + * can overwrite it with the salt and use the IV where it is + * to form the nonce = (Salt + IV) + */ + op->iv -= sizeof (sa0->salt); + clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt)); + + op->tag = payload + len; + op->tag_len = 16; + } + op->src = op->dst = payload += cpd.iv_sz; + op->len = len - cpd.iv_sz; + op->user_data = b - bufs; - /* transport mode */ - if (PREDICT_FALSE (!sa0->is_tunnel && !sa0->is_tunnel_ip6)) + if (pd->lb != b[0]) + { + /* buffer is chained */ + vlib_buffer_t *cb = b[0]; + op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; + op->chunk_index = vec_len (ptd->chunks); + vec_add2 (ptd->chunks, ch, 1); + ch->len = len - cpd.iv_sz + cpd.icv_sz; + ch->src = ch->dst = payload; + cb = vlib_get_buffer (vm, cb->next_buffer); + op->n_chunks = 1; + + while (1) { - tunnel_mode = 0; - - if (is_ip6) - { - ih6 = - (ip6_header_t *) ((u8 *) esp0 - - sizeof (ip6_header_t)); - ip_hdr_size = sizeof (ip6_header_t); - oh6 = vlib_buffer_get_current (o_b0); - } - else + vec_add2 (ptd->chunks, ch, 1); + op->n_chunks += 1; + ch->src = ch->dst = vlib_buffer_get_current (cb); + if (pd->lb == cb) { - if (sa0->udp_encap) + if (ipsec_sa_is_set_IS_AEAD (sa0)) { - ih4 = - (ip4_header_t *) ((u8 *) esp0 - - sizeof (udp_header_t) - - sizeof (ip4_header_t)); + if (pd->lb->current_length < cpd.icv_sz) + { + op->tag = + esp_move_icv (vm, b[0], pd, cpd.icv_sz); + + /* this chunk does not contain crypto data */ + op->n_chunks -= 1; + + /* and fix previous chunk's length as it might have + been changed */ + ASSERT (op->n_chunks > 0); + ch[-1].len = pd->lb->current_length; + break; + } + else + op->tag = + vlib_buffer_get_tail (pd->lb) - cpd.icv_sz; } + + if (pd->icv_removed) + ch->len = cb->current_length; else - { - ih4 = - (ip4_header_t *) ((u8 *) esp0 - - sizeof (ip4_header_t)); - } - oh4 = vlib_buffer_get_current (o_b0); - ip_hdr_size = sizeof (ip4_header_t); + ch->len = cb->current_length - cpd.icv_sz; } + else + ch->len = cb->current_length; + + if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT)) + break; + + cb = vlib_get_buffer (vm, cb->next_buffer); } + } + } + + /* next */ + next: + n_left -= 1; + next += 1; + pd += 1; + b += 1; + } + + if (PREDICT_TRUE (~0 != current_sa_index)) + vlib_increment_combined_counter (&ipsec_sa_counters, thread_index, + current_sa_index, current_sa_pkts, + current_sa_bytes); + + esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, + ESP_DECRYPT_ERROR_INTEG_ERROR); + esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts, + ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR); + + esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED); + esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts, + ptd->chunks, ESP_DECRYPT_ERROR_DECRYPTION_FAILED); + + /* Post decryption ronud - adjust packet data start and length and next + node */ + + n_left = from_frame->n_vectors; + next = nexts; + pd = pkt_data; + b = bufs; + + while (n_left) + { + const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | + IPSEC_SA_FLAG_IS_TUNNEL_V6; + + if (n_left >= 2) + { + void *data = b[1]->data + pd[1].current_data; + + /* buffer metadata */ + vlib_prefetch_buffer_header (b[1], LOAD); - esp_decrypt_cbc (sa0->crypto_alg, - esp0->data + IV_SIZE, - (u8 *) vlib_buffer_get_current (o_b0) + - ip_hdr_size, BLOCK_SIZE * blocks, - sa0->crypto_key, esp0->data); - - o_b0->current_length = (blocks * BLOCK_SIZE) - 2 + ip_hdr_size; - o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID; - f0 = - (esp_footer_t *) ((u8 *) vlib_buffer_get_current (o_b0) + - o_b0->current_length); - o_b0->current_length -= f0->pad_length; - - /* tunnel mode */ - if (PREDICT_TRUE (tunnel_mode)) + /* esp_footer_t */ + CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2, + CLIB_CACHE_LINE_BYTES, LOAD); + + /* packet headers */ + CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES, + CLIB_CACHE_LINE_BYTES * 2, LOAD); + } + + if (next[0] < ESP_DECRYPT_N_NEXT) + goto trace; + + sa0 = vec_elt_at_index (im->sad, pd->sa_index); + + /* + * redo the anti-reply check + * in this frame say we have sequence numbers, s, s+1, s+1, s+1 + * and s and s+1 are in the window. When we did the anti-replay + * check above we did so against the state of the window (W), + * after packet s-1. So each of the packets in the sequence will be + * accepted. + * This time s will be cheked against Ws-1, s+1 chceked against Ws + * (i.e. the window state is updated/advnaced) + * so this time the successive s+! packet will be dropped. + * This is a consequence of batching the decrypts. If the + * check-dcrypt-advance process was done for each packet it would + * be fine. But we batch the decrypts because it's much more efficient + * to do so in SW and if we offload to HW and the process is async. + * + * You're probably thinking, but this means an attacker can send the + * above sequence and cause VPP to perform decrpyts that will fail, + * and that's true. But if the attacker can determine s (a valid + * sequence number in the window) which is non-trivial, it can generate + * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any + * implementation, sequential or batching, from decrypting these. + */ + if (ipsec_sa_anti_replay_check (sa0, pd->seq)) + { + b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY]; + next[0] = ESP_DECRYPT_NEXT_DROP; + goto trace; + } + + ipsec_sa_anti_replay_advance (sa0, pd->seq); + + u8 pad_length = 0, next_header = 0; + u16 icv_sz = pd->icv_removed ? 0 : pd->icv_sz; + + if (pd->free_buffer_index) + vlib_buffer_free_one (vm, pd->free_buffer_index); + + if (pd->lb->current_length < sizeof (esp_footer_t) + icv_sz) + { + /* esp footer is either splitted in two buffers or in the before + * last buffer */ + + vlib_buffer_t *before_last = b[0], *bp = b[0]; + while (bp->flags & VLIB_BUFFER_NEXT_PRESENT) + { + before_last = bp; + bp = vlib_get_buffer (vm, bp->next_buffer); + } + u8 *bt = vlib_buffer_get_tail (before_last); + + if (pd->lb->current_length == icv_sz) + { + esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f)); + pad_length = f->pad_length; + next_header = f->next_header; + } + else + { + pad_length = (bt - 1)[0]; + next_header = ((u8 *) vlib_buffer_get_current (pd->lb))[0]; + } + } + else + { + esp_footer_t *f = + (esp_footer_t *) (pd->lb->data + pd->lb->current_data + + pd->lb->current_length - sizeof (esp_footer_t) - + icv_sz); + pad_length = f->pad_length; + next_header = f->next_header; + } + + u16 adv = pd->iv_sz + esp_sz; + u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz; + u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz; + b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; + + if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */ + { + u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ? + sizeof (udp_header_t) : 0; + u16 ip_hdr_sz = pd->hdr_sz - udp_sz; + u8 *old_ip = b[0]->data + pd->current_data - ip_hdr_sz - udp_sz; + u8 *ip = old_ip + adv + udp_sz; + + if (is_ip6 && ip_hdr_sz > 64) + memmove (ip, old_ip, ip_hdr_sz); + else + clib_memcpy_le64 (ip, old_ip, ip_hdr_sz); + + b[0]->current_data = pd->current_data + adv - ip_hdr_sz; + b[0]->current_length = pd->current_length + ip_hdr_sz - adv; + esp_remove_tail (vm, b[0], pd->lb, tail); + + if (is_ip6) + { + ip6_header_t *ip6 = (ip6_header_t *) ip; + u16 len = clib_net_to_host_u16 (ip6->payload_length); + len -= adv + tail_orig; + ip6->payload_length = clib_host_to_net_u16 (len); + ip6->protocol = next_header; + next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; + } + else + { + ip4_header_t *ip4 = (ip4_header_t *) ip; + ip_csum_t sum = ip4->checksum; + u16 len = clib_net_to_host_u16 (ip4->length); + len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz); + sum = ip_csum_update (sum, ip4->protocol, next_header, + ip4_header_t, protocol); + sum = ip_csum_update (sum, ip4->length, len, + ip4_header_t, length); + ip4->checksum = ip_csum_fold (sum); + ip4->protocol = next_header; + ip4->length = len; + next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; + } + } + else + { + if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP)) + { + next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; + b[0]->current_data = pd->current_data + adv; + b[0]->current_length = pd->current_length - adv; + esp_remove_tail (vm, b[0], pd->lb, tail); + } + else if (next_header == IP_PROTOCOL_IPV6) + { + next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; + b[0]->current_data = pd->current_data + adv; + b[0]->current_length = pd->current_length - adv; + esp_remove_tail (vm, b[0], pd->lb, tail); + } + else + { + if (is_tun && next_header == IP_PROTOCOL_GRE) { - if (PREDICT_TRUE (f0->next_header == IP_PROTOCOL_IP_IN_IP)) - { - next0 = ESP_DECRYPT_NEXT_IP4_INPUT; - oh4 = vlib_buffer_get_current (o_b0); - } - else if (f0->next_header == IP_PROTOCOL_IPV6) - next0 = ESP_DECRYPT_NEXT_IP6_INPUT; - else + gre_header_t *gre; + + b[0]->current_data = pd->current_data + adv; + b[0]->current_length = pd->current_length - adv - tail; + + gre = vlib_buffer_get_current (b[0]); + + vlib_buffer_advance (b[0], sizeof (*gre)); + + switch (clib_net_to_host_u16 (gre->protocol)) { - vlib_node_increment_counter (vm, node->node_index, - ESP_DECRYPT_ERROR_DECRYPTION_FAILED, - 1); - o_b0 = 0; - goto trace; + case GRE_PROTOCOL_teb: + vnet_update_l2_len (b[0]); + next[0] = ESP_DECRYPT_NEXT_L2_INPUT; + break; + case GRE_PROTOCOL_ip4: + next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; + break; + case GRE_PROTOCOL_ip6: + next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; + break; + default: + b[0]->error = + node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD]; + next[0] = ESP_DECRYPT_NEXT_DROP; + break; } } - /* transport mode */ else { - if (is_ip6) + next[0] = ESP_DECRYPT_NEXT_DROP; + b[0]->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD]; + goto trace; + } + } + if (is_tun) + { + if (ipsec_sa_is_set_IS_PROTECT (sa0)) + { + /* + * There are two encap possibilities + * 1) the tunnel and ths SA are prodiving encap, i.e. it's + * MAC | SA-IP | TUN-IP | ESP | PAYLOAD + * implying the SA is in tunnel mode (on a tunnel interface) + * 2) only the tunnel provides encap + * MAC | TUN-IP | ESP | PAYLOAD + * implying the SA is in transport mode. + * + * For 2) we need only strip the tunnel encap and we're good. + * since the tunnel and crypto ecnap (int the tun=protect + * object) are the same and we verified above that these match + * for 1) we need to strip the SA-IP outer headers, to + * reveal the tunnel IP and then check that this matches + * the configured tunnel. + */ + const ipsec_tun_protect_t *itp; + + itp = ipsec_tun_protect_get + (vnet_buffer (b[0])->ipsec.protect_index); + + if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP)) { - next0 = ESP_DECRYPT_NEXT_IP6_INPUT; - oh6->ip_version_traffic_class_and_flow_label = - ih6->ip_version_traffic_class_and_flow_label; - oh6->protocol = f0->next_header; - oh6->hop_limit = ih6->hop_limit; - oh6->src_address.as_u64[0] = ih6->src_address.as_u64[0]; - oh6->src_address.as_u64[1] = ih6->src_address.as_u64[1]; - oh6->dst_address.as_u64[0] = ih6->dst_address.as_u64[0]; - oh6->dst_address.as_u64[1] = ih6->dst_address.as_u64[1]; - oh6->payload_length = - clib_host_to_net_u16 (vlib_buffer_length_in_chain - (vm, - o_b0) - sizeof (ip6_header_t)); + const ip4_header_t *ip4; + + ip4 = vlib_buffer_get_current (b[0]); + + if (!ip46_address_is_equal_v4 (&itp->itp_tun.src, + &ip4->dst_address) || + !ip46_address_is_equal_v4 (&itp->itp_tun.dst, + &ip4->src_address)) + { + next[0] = ESP_DECRYPT_NEXT_DROP; + b[0]->error = + node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO]; + } } - else + else if (next_header == IP_PROTOCOL_IPV6) { - next0 = ESP_DECRYPT_NEXT_IP4_INPUT; - oh4->ip_version_and_header_length = 0x45; - oh4->tos = ih4->tos; - oh4->fragment_id = 0; - oh4->flags_and_fragment_offset = 0; - oh4->ttl = ih4->ttl; - oh4->protocol = f0->next_header; - oh4->src_address.as_u32 = ih4->src_address.as_u32; - oh4->dst_address.as_u32 = ih4->dst_address.as_u32; - oh4->length = - clib_host_to_net_u16 (vlib_buffer_length_in_chain - (vm, o_b0)); - oh4->checksum = ip4_header_checksum (oh4); - } - } + const ip6_header_t *ip6; - /* for IPSec-GRE tunnel next node is ipsec-gre-input */ - if (PREDICT_FALSE - ((vnet_buffer (i_b0)->ipsec.flags) & - IPSEC_FLAG_IPSEC_GRE_TUNNEL)) - next0 = ESP_DECRYPT_NEXT_IPSEC_GRE_INPUT; + ip6 = vlib_buffer_get_current (b[0]); - vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; - vnet_buffer (o_b0)->sw_if_index[VLIB_RX] = - vnet_buffer (i_b0)->sw_if_index[VLIB_RX]; - } - - trace: - if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED)) - { - if (o_b0) - { - o_b0->flags |= VLIB_BUFFER_IS_TRACED; - o_b0->trace_index = i_b0->trace_index; - esp_decrypt_trace_t *tr = - vlib_add_trace (vm, node, o_b0, sizeof (*tr)); - tr->crypto_alg = sa0->crypto_alg; - tr->integ_alg = sa0->integ_alg; + if (!ip46_address_is_equal_v6 (&itp->itp_tun.src, + &ip6->dst_address) || + !ip46_address_is_equal_v6 (&itp->itp_tun.dst, + &ip6->src_address)) + { + next[0] = ESP_DECRYPT_NEXT_DROP; + b[0]->error = + node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO]; + } + } } } + } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, - n_left_to_next, o_bi0, next0); + trace: + if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_decrypt_trace_t *tr; + tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); + sa0 = pool_elt_at_index (im->sad, + vnet_buffer (b[0])->ipsec.sad_index); + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; + tr->seq = pd->seq; + tr->sa_seq = sa0->last_seq; + tr->sa_seq_hi = sa0->seq_hi; } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); + + /* next */ + n_left -= 1; + next += 1; + pd += 1; + b += 1; } + + n_left = from_frame->n_vectors; vlib_node_increment_counter (vm, node->node_index, - ESP_DECRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); + ESP_DECRYPT_ERROR_RX_PKTS, n_left); + vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left); -free_buffers_and_exit: - if (recycle) - vlib_buffer_free (vm, recycle, vec_len (recycle)); - vec_free (recycle); - return from_frame->n_vectors; + return n_left; } VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { - return esp_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ ); + return esp_decrypt_inline (vm, node, from_frame, 0, 0); +} + +VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return esp_decrypt_inline (vm, node, from_frame, 0, 1); +} + +VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return esp_decrypt_inline (vm, node, from_frame, 1, 0); +} + +VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return esp_decrypt_inline (vm, node, from_frame, 1, 1); } /* *INDENT-OFF* */ @@ -425,21 +957,14 @@ VLIB_REGISTER_NODE (esp4_decrypt_node) = { .n_next_nodes = ESP_DECRYPT_N_NEXT, .next_nodes = { -#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, - foreach_esp_decrypt_next -#undef _ + [ESP_DECRYPT_NEXT_DROP] = "ip4-drop", + [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum", + [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input", + [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input", + [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff", }, }; -/* *INDENT-ON* */ -VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return esp_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ ); -} - -/* *INDENT-OFF* */ VLIB_REGISTER_NODE (esp6_decrypt_node) = { .name = "esp6-decrypt", .vector_size = sizeof (u32), @@ -451,9 +976,45 @@ VLIB_REGISTER_NODE (esp6_decrypt_node) = { .n_next_nodes = ESP_DECRYPT_N_NEXT, .next_nodes = { -#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, - foreach_esp_decrypt_next -#undef _ + [ESP_DECRYPT_NEXT_DROP] = "ip6-drop", + [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum", + [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input", + [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input", + [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff", + }, +}; + +VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = { + .name = "esp4-decrypt-tun", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = ARRAY_LEN(esp_decrypt_error_strings), + .error_strings = esp_decrypt_error_strings, + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { + [ESP_DECRYPT_NEXT_DROP] = "ip4-drop", + [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum", + [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input", + [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input", + [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff", + }, +}; + +VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = { + .name = "esp6-decrypt-tun", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = ARRAY_LEN(esp_decrypt_error_strings), + .error_strings = esp_decrypt_error_strings, + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { + [ESP_DECRYPT_NEXT_DROP] = "ip6-drop", + [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum", + [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input", + [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input", + [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff", }, }; /* *INDENT-ON* */