X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fdpdk%2Fipsec%2Fesp_decrypt.c;h=afbab963009b484c663a6bec776f6708fa410872;hb=5025d40a1134272ab57c3c3f10311e31a65cd63c;hp=d8d1d385f0f0a5dfa7a1bb655df90859f8847291;hpb=a5c308e68d5578e5d75327619e30d73a95abb1a5;p=vpp.git diff --git a/src/plugins/dpdk/ipsec/esp_decrypt.c b/src/plugins/dpdk/ipsec/esp_decrypt.c index d8d1d385f0f..afbab963009 100644 --- a/src/plugins/dpdk/ipsec/esp_decrypt.c +++ b/src/plugins/dpdk/ipsec/esp_decrypt.c @@ -97,7 +97,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip6) { - u32 n_left_from, *from, *to_next, next_index; + u32 n_left_from, *from, *to_next, next_index, thread_index; ipsec_main_t *im = &ipsec_main; u32 thread_idx = vlib_get_thread_index (); dpdk_crypto_main_t *dcm = &dpdk_crypto_main; @@ -114,6 +114,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm, from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; + thread_index = vm->thread_index; ret = crypto_alloc_ops (numa, ops, n_left_from); if (ret) @@ -139,7 +140,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm, while (n_left_from > 0 && n_left_to_next > 0) { clib_error_t *error; - u32 bi0, sa_index0, seq, iv_size; + u32 bi0, sa_index0, iv_size; u8 trunc_size; vlib_buffer_t *b0; esp_header_t *esp0; @@ -165,12 +166,17 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm, ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED); dpdk_op_priv_t *priv = crypto_op_get_priv (op); + /* store bi in op private */ + priv->bi = bi0; + priv->encrypt = 0; u16 op_len = sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]); CLIB_PREFETCH (op, op_len, STORE); sa_index0 = vnet_buffer (b0)->ipsec.sad_index; + vlib_prefetch_combined_counter (&ipsec_sa_counters, + thread_index, sa_index0); if (sa_index0 != last_sa_index) { @@ -229,33 +235,22 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm, } /* anti-replay check */ - if (sa0->use_anti_replay) + if (ipsec_sa_anti_replay_check + (sa0, clib_host_to_net_u32 (esp0->seq))) { - int rv = 0; - - seq = clib_net_to_host_u32 (esp0->seq); - - if (PREDICT_TRUE (sa0->use_esn)) - rv = esp_replay_check_esn (sa0, seq); + clib_warning ("failed anti-replay check"); + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_REPLAY, 1); else - rv = esp_replay_check (sa0, seq); - - if (PREDICT_FALSE (rv)) - { - clib_warning ("failed anti-replay check"); - if (is_ip6) - vlib_node_increment_counter (vm, - dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_REPLAY, 1); - else - vlib_node_increment_counter (vm, - dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_REPLAY, 1); - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - goto trace; - } + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_REPLAY, 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; } if (is_ip6) @@ -264,7 +259,9 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm, priv->next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT4_POST; /* FIXME multi-seg */ - sa0->total_data_size += b0->current_length; + vlib_increment_combined_counter + (&ipsec_sa_counters, thread_index, sa_index0, + 1, b0->current_length); res->ops[res->n_ops] = op; res->bi[res->n_ops] = bi0; @@ -332,7 +329,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm, clib_memcpy_fast (aad, esp0, 8); /* _aad[3] should always be 0 */ - if (PREDICT_FALSE (sa0->use_esn)) + if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0))) _aad[2] = clib_host_to_net_u32 (sa0->seq_hi); else _aad[2] = 0; @@ -341,7 +338,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm, { auth_len = sizeof (esp_header_t) + iv_size + payload_len; - if (sa0->use_esn) + if (ipsec_sa_is_set_USE_ESN (sa0)) { clib_memcpy_fast (priv->icv, digest, trunc_size); u32 *_digest = (u32 *) digest; @@ -376,8 +373,8 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm, ESP_DECRYPT_ERROR_RX_PKTS, from_frame->n_vectors); - crypto_enqueue_ops (vm, cwm, 0, dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_ENQ_FAIL, numa); + crypto_enqueue_ops (vm, cwm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ ); } else { @@ -385,8 +382,8 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm, ESP_DECRYPT_ERROR_RX_PKTS, from_frame->n_vectors); - crypto_enqueue_ops (vm, cwm, 0, dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_ENQ_FAIL, numa); + crypto_enqueue_ops (vm, cwm, dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ ); } crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops); @@ -553,18 +550,12 @@ dpdk_esp_decrypt_post_inline (vlib_main_t * vm, iv_size = cipher_alg->iv_len; - if (sa0->use_anti_replay) - { - u32 seq; - seq = clib_host_to_net_u32 (esp0->seq); - if (PREDICT_TRUE (sa0->use_esn)) - esp_replay_advance_esn (sa0, seq); - else - esp_replay_advance (sa0, seq); - } + ipsec_sa_anti_replay_advance (sa0, + clib_host_to_net_u32 (esp0->seq)); /* if UDP encapsulation is used adjust the address of the IP header */ - if (sa0->udp_encap && (b0->flags & VNET_BUFFER_F_IS_IP4)) + if (ipsec_sa_is_set_UDP_ENCAP (sa0) + && (b0->flags & VNET_BUFFER_F_IS_IP4)) { udp_encap_adv = sizeof (udp_header_t); } @@ -592,12 +583,11 @@ dpdk_esp_decrypt_post_inline (vlib_main_t * vm, goto trace; } #endif - if (sa0->is_tunnel) + if (ipsec_sa_is_set_IS_TUNNEL (sa0)) { if (f0->next_header == IP_PROTOCOL_IP_IN_IP) next0 = ESP_DECRYPT_NEXT_IP4_INPUT; - else if (sa0->is_tunnel_ip6 - && f0->next_header == IP_PROTOCOL_IPV6) + else if (f0->next_header == IP_PROTOCOL_IPV6) next0 = ESP_DECRYPT_NEXT_IP6_INPUT; else { @@ -622,7 +612,7 @@ dpdk_esp_decrypt_post_inline (vlib_main_t * vm, u16 ih4_len = ip4_header_bytes (ih4); vlib_buffer_advance (b0, -ih4_len - udp_encap_adv); next0 = ESP_DECRYPT_NEXT_IP4_INPUT; - if (!sa0->udp_encap) + if (!ipsec_sa_is_set_UDP_ENCAP (sa0)) { oh4 = vlib_buffer_get_current (b0); memmove (oh4, ih4, ih4_len);