#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/esp.h>
#include <vnet/ipsec/ah.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
+#include <vnet/tunnel/tunnel_dp.h>
#define foreach_ah_encrypt_next \
- _ (DROP, "error-drop") \
- _ (IP4_LOOKUP, "ip4-lookup") \
- _ (IP6_LOOKUP, "ip6-lookup") \
+ _ (DROP, "error-drop") \
+ _ (HANDOFF, "handoff") \
_ (INTERFACE_OUTPUT, "interface-output")
AH_ENCRYPT_N_NEXT,
} ah_encrypt_next_t;
-#define foreach_ah_encrypt_error \
- _(RX_PKTS, "AH pkts received") \
- _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _(SEQ_CYCLED, "sequence number cycled")
-
-
-typedef enum
-{
-#define _(sym,str) AH_ENCRYPT_ERROR_##sym,
- foreach_ah_encrypt_error
-#undef _
- AH_ENCRYPT_N_ERROR,
-} ah_encrypt_error_t;
-
-static char *ah_encrypt_error_strings[] = {
-#define _(sym,string) string,
- foreach_ah_encrypt_error
-#undef _
-};
-
typedef struct
{
u32 sa_index;
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
ah_encrypt_trace_t *t = va_arg (*args, ah_encrypt_trace_t *);
- s = format (s, "ah: sa-index %d spi %u seq %u:%u integrity %U",
- t->sa_index, t->spi, t->seq_hi, t->seq_lo,
+ s = format (s, "ah: sa-index %d spi %u (0x%08x) seq %u:%u integrity %U",
+ t->sa_index, t->spi, t->spi, t->seq_hi, t->seq_lo,
format_ipsec_integ_alg, t->integ_alg);
return s;
}
if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
{
u32 bi = op->user_data;
- b[bi]->error = node->errors[AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
- nexts[bi] = AH_ENCRYPT_NEXT_DROP;
+ ah_encrypt_set_next_index (b[bi], node, vm->thread_index,
+ AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR, bi,
+ nexts, AH_ENCRYPT_NEXT_DROP,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
{
union
{
+ /* Variable fields in the IP header not covered by the AH
+ * integrity check */
struct
{
- u8 hop_limit;
u32 ip_version_traffic_class_and_flow_label;
+ u8 hop_limit;
};
-
struct
{
u8 ttl;
u8 tos;
};
};
- i16 current_data;
u8 skip;
+ i16 current_data;
u32 sa_index;
} ah_encrypt_packet_data_t;
{
if (current_sa_index != ~0)
vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index,
- current_sa_pkts,
+ current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
- sa0 = pool_elt_at_index (im->sad, current_sa_index);
+ sa0 = ipsec_sa_get (current_sa_index);
current_sa_bytes = current_sa_pkts = 0;
+ vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index);
}
pd->sa_index = current_sa_index;
next[0] = AH_ENCRYPT_NEXT_DROP;
+ if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ {
+ /* this is the first packet to use this SA, claim the SA
+ * for this thread. this could happen simultaneously on
+ * another thread */
+ clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ ipsec_sa_assign_thread (thread_index));
+ }
+
+ if (PREDICT_TRUE (thread_index != sa0->thread_index))
+ {
+ vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ next[0] = AH_ENCRYPT_NEXT_HANDOFF;
+ goto next;
+ }
+
if (PREDICT_FALSE (esp_seq_advance (sa0)))
{
- b[0]->error = node->errors[AH_ENCRYPT_ERROR_SEQ_CYCLED];
+ ah_encrypt_set_next_index (b[0], node, vm->thread_index,
+ AH_ENCRYPT_ERROR_SEQ_CYCLED, 0, next,
+ AH_ENCRYPT_NEXT_DROP, current_sa_index);
pd->skip = 1;
goto next;
}
ssize_t adv;
ih0 = vlib_buffer_get_current (b[0]);
- pd->ttl = ih0->ip4.ttl;
- pd->tos = ih0->ip4.tos;
if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
{
ip_hdr_size = sizeof (ip6_header_t);
oh6_0 = vlib_buffer_get_current (b[0]);
pd->current_data = b[0]->current_data;
-
pd->hop_limit = ih6_0->ip6.hop_limit;
- pd->ip_version_traffic_class_and_flow_label =
+
+ oh6_0->ip6.ip_version_traffic_class_and_flow_label =
ih6_0->ip6.ip_version_traffic_class_and_flow_label;
+
+ if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ {
+ ip6_set_dscp_network_order (&oh6_0->ip6, sa0->tunnel.t_dscp);
+ tunnel_encap_fixup_6o6 (sa0->tunnel_flags, &ih6_0->ip6,
+ &oh6_0->ip6);
+ }
+ pd->ip_version_traffic_class_and_flow_label =
+ oh6_0->ip6.ip_version_traffic_class_and_flow_label;
+
if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
{
next_hdr_type = IP_PROTOCOL_IPV6;
{
ip_hdr_size = sizeof (ip4_header_t);
oh0 = vlib_buffer_get_current (b[0]);
- clib_memset (oh0, 0, sizeof (ip4_and_ah_header_t));
+ pd->ttl = ih0->ip4.ttl;
+
+ if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ {
+ if (sa0->tunnel.t_dscp)
+ pd->tos = sa0->tunnel.t_dscp << 2;
+ else
+ {
+ pd->tos = ih0->ip4.tos;
+
+ if (!(sa0->tunnel_flags &
+ TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_DSCP))
+ pd->tos &= 0x3;
+ if (!(sa0->tunnel_flags &
+ TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_ECN))
+ pd->tos &= 0xfc;
+ }
+ }
+ else
+ {
+ pd->tos = ih0->ip4.tos;
+ }
+
pd->current_data = b[0]->current_data;
+ clib_memset (oh0, 0, sizeof (ip4_and_ah_header_t));
if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
{
{
clib_memcpy_fast (&oh0->ip4.address_pair,
&sa0->ip4_hdr.address_pair,
- sizeof (ip4_address_t));
+ sizeof (ip4_address_pair_t));
next[0] = sa0->dpo.dpoi_next_node;
vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
}
next:
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ sa0 = ipsec_sa_get (pd->sa_index);
+ ah_encrypt_trace_t *tr =
+ vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ tr->spi = sa0->spi;
+ tr->seq_lo = sa0->seq;
+ tr->seq_hi = sa0->seq_hi;
+ tr->integ_alg = sa0->integ_alg;
+ tr->sa_index = pd->sa_index;
+ }
+
n_left -= 1;
next += 1;
pd += 1;
while (n_left)
{
if (pd->skip)
- goto trace;
+ goto next_pkt;
if (is_ip6)
{
oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
}
- trace:
- if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
- {
- sa0 = vec_elt_at_index (im->sad, pd->sa_index);
- ah_encrypt_trace_t *tr =
- vlib_add_trace (vm, node, b[0], sizeof (*tr));
- tr->spi = sa0->spi;
- tr->seq_lo = sa0->seq;
- tr->seq_hi = sa0->seq_hi;
- tr->integ_alg = sa0->integ_alg;
- tr->sa_index = pd->sa_index;
- }
-
+ next_pkt:
n_left -= 1;
next += 1;
pd += 1;
return ah_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ah4_encrypt_node) = {
.name = "ah4-encrypt",
.vector_size = sizeof (u32),
.format_trace = format_ah_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(ah_encrypt_error_strings),
- .error_strings = ah_encrypt_error_strings,
+ .n_errors = AH_ENCRYPT_N_ERROR,
+ .error_counters = ah_encrypt_error_counters,
.n_next_nodes = AH_ENCRYPT_N_NEXT,
.next_nodes = {
-#define _(s,n) [AH_ENCRYPT_NEXT_##s] = n,
- foreach_ah_encrypt_next
-#undef _
+ [AH_ENCRYPT_NEXT_DROP] = "ip4-drop",
+ [AH_ENCRYPT_NEXT_HANDOFF] = "ah4-encrypt-handoff",
+ [AH_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
},
};
-/* *INDENT-ON* */
VLIB_NODE_FN (ah6_encrypt_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
return ah_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ah6_encrypt_node) = {
.name = "ah6-encrypt",
.vector_size = sizeof (u32),
.format_trace = format_ah_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(ah_encrypt_error_strings),
- .error_strings = ah_encrypt_error_strings,
+ .n_errors = AH_ENCRYPT_N_ERROR,
+ .error_counters = ah_encrypt_error_counters,
.n_next_nodes = AH_ENCRYPT_N_NEXT,
.next_nodes = {
-#define _(s,n) [AH_ENCRYPT_NEXT_##s] = n,
- foreach_ah_encrypt_next
-#undef _
+ [AH_ENCRYPT_NEXT_DROP] = "ip6-drop",
+ [AH_ENCRYPT_NEXT_HANDOFF] = "ah6-encrypt-handoff",
+ [AH_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
},
};
-/* *INDENT-ON* */
+
+#ifndef CLIB_MARCH_VARIANT
+
+static clib_error_t *
+ah_encrypt_init (vlib_main_t *vm)
+{
+ ipsec_main_t *im = &ipsec_main;
+
+ im->ah4_enc_fq_index = vlib_frame_queue_main_init (ah4_encrypt_node.index,
+ im->handoff_queue_size);
+ im->ah6_enc_fq_index = vlib_frame_queue_main_init (ah6_encrypt_node.index,
+ im->handoff_queue_size);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ah_encrypt_init);
+
+#endif
/*
* fd.io coding-style-patch-verification: ON