X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fah_encrypt.c;h=5f6a0991be38388c1ff833bf27e05f9d05ba789b;hb=eba31ecebed1a7d168da17194cab7a8955761f2b;hp=47f3b3884c00d93865632b5ef0cce5a6f0f5de6c;hpb=b7b929931a07fbb27b43d5cd105f366c3e29807e;p=vpp.git diff --git a/src/vnet/ipsec/ah_encrypt.c b/src/vnet/ipsec/ah_encrypt.c index 47f3b3884c0..5f6a0991be3 100644 --- a/src/vnet/ipsec/ah_encrypt.c +++ b/src/vnet/ipsec/ah_encrypt.c @@ -59,6 +59,7 @@ static char *ah_encrypt_error_strings[] = { typedef struct { + u32 sa_index; u32 spi; u32 seq; ipsec_integ_alg_t integ_alg; @@ -72,8 +73,9 @@ format_ah_encrypt_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); ah_encrypt_trace_t *t = va_arg (*args, ah_encrypt_trace_t *); - s = format (s, "ah: spi %u seq %u integrity %U", - t->spi, t->seq, format_ipsec_integ_alg, t->integ_alg); + s = format (s, "ah: sa-index %d spi %u seq %u integrity %U", + t->sa_index, t->spi, t->seq, + format_ipsec_integ_alg, t->integ_alg); return s; } @@ -82,13 +84,14 @@ ah_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip6) { - u32 n_left_from, *from, *to_next = 0, next_index; + u32 n_left_from, *from, *to_next = 0, next_index, thread_index; int icv_size = 0; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; ipsec_main_t *im = &ipsec_main; ipsec_proto_main_t *em = &ipsec_proto_main; next_index = node->cached_next_index; + thread_index = vm->thread_index; while (n_left_from > 0) { @@ -125,22 +128,13 @@ ah_encrypt_inline (vlib_main_t * vm, if (PREDICT_FALSE (esp_seq_advance (sa0))) { - clib_warning ("sequence number counter has cycled SPI %u", - sa0->spi); - if (is_ip6) - vlib_node_increment_counter (vm, ah6_encrypt_node.index, - AH_ENCRYPT_ERROR_SEQ_CYCLED, 1); - else - vlib_node_increment_counter (vm, ah4_encrypt_node.index, - AH_ENCRYPT_ERROR_SEQ_CYCLED, 1); - //TODO need to confirm if below is needed - to_next[0] = i_bi0; - to_next += 1; + vlib_node_increment_counter (vm, node->node_index, + AH_ENCRYPT_ERROR_SEQ_CYCLED, 1); goto trace; } - - - sa0->total_data_size += i_b0->current_length; + vlib_increment_combined_counter + (&ipsec_sa_counters, thread_index, sa_index0, + 1, i_b0->current_length); ssize_t adv; ih0 = vlib_buffer_get_current (i_b0); @@ -171,7 +165,7 @@ ah_encrypt_inline (vlib_main_t * vm, sizeof (ethernet_header_t)); ethernet_header_t *oeh0 = (ethernet_header_t *) ((u8 *) ieh0 + (adv - icv_size)); - clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t)); + clib_memcpy_fast (oeh0, ieh0, sizeof (ethernet_header_t)); } vlib_buffer_advance (i_b0, adv - icv_size); @@ -246,8 +240,9 @@ ah_encrypt_inline (vlib_main_t * vm, oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32; oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32; - next0 = AH_ENCRYPT_NEXT_IP4_LOOKUP; - vnet_buffer (i_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + next0 = sa0->dpo[IPSEC_PROTOCOL_AH].dpoi_next_node; + vnet_buffer (i_b0)->ip.adj_index[VLIB_TX] = + sa0->dpo[IPSEC_PROTOCOL_AH].dpoi_index; } else if (is_ip6 && sa0->is_tunnel && sa0->is_tunnel_ip6) { @@ -260,8 +255,9 @@ ah_encrypt_inline (vlib_main_t * vm, oh6_0->ip6.dst_address.as_u64[1] = sa0->tunnel_dst_addr.ip6.as_u64[1]; - next0 = AH_ENCRYPT_NEXT_IP6_LOOKUP; - vnet_buffer (i_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + next0 = sa0->dpo[IPSEC_PROTOCOL_AH].dpoi_next_node; + vnet_buffer (i_b0)->ip.adj_index[VLIB_TX] = + sa0->dpo[IPSEC_PROTOCOL_AH].dpoi_index; } u8 sig[64]; @@ -271,8 +267,8 @@ ah_encrypt_inline (vlib_main_t * vm, sizeof (ah_header_t); clib_memset (digest, 0, icv_size); - unsigned size = hmac_calc (sa0->integ_alg, sa0->integ_key, - sa0->integ_key_len, + unsigned size = hmac_calc (sa0->integ_alg, sa0->integ_key.data, + sa0->integ_key.len, vlib_buffer_get_current (i_b0), i_b0->current_length, sig, sa0->use_esn, sa0->seq_hi); @@ -306,6 +302,7 @@ ah_encrypt_inline (vlib_main_t * vm, tr->spi = sa0->spi; tr->seq = sa0->seq - 1; tr->integ_alg = sa0->integ_alg; + tr->sa_index = sa_index0; } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, @@ -314,28 +311,22 @@ ah_encrypt_inline (vlib_main_t * vm, } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - if (is_ip6) - vlib_node_increment_counter (vm, ah6_encrypt_node.index, - AH_ENCRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); - else - vlib_node_increment_counter (vm, ah4_encrypt_node.index, - AH_ENCRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); + vlib_node_increment_counter (vm, node->node_index, + AH_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); return from_frame->n_vectors; } -static uword -ah4_encrypt_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +VLIB_NODE_FN (ah4_encrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return ah_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ ); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ah4_encrypt_node) = { - .function = ah4_encrypt_node_fn, .name = "ah4-encrypt", .vector_size = sizeof (u32), .format_trace = format_ah_encrypt_trace, @@ -353,18 +344,15 @@ VLIB_REGISTER_NODE (ah4_encrypt_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ah4_encrypt_node, ah4_encrypt_node_fn); - -static uword -ah6_encrypt_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +VLIB_NODE_FN (ah6_encrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return ah_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ ); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ah6_encrypt_node) = { - .function = ah6_encrypt_node_fn, .name = "ah6-encrypt", .vector_size = sizeof (u32), .format_trace = format_ah_encrypt_trace, @@ -382,7 +370,6 @@ VLIB_REGISTER_NODE (ah6_encrypt_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ah6_encrypt_node, ah6_encrypt_node_fn); /* * fd.io coding-style-patch-verification: ON *