#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/esp.h>
#include <vnet/ipsec/ipsec_io.h>
+#include <vnet/ipsec/ipsec_punt.h>
/* Statistics (not really errors) */
#define foreach_ipsec_if_input_error \
_(RX, "good packets received") \
_(DISABLED, "ipsec packets received on disabled interface") \
-_(NO_TUNNEL, "no matching tunnel")
+_(NO_TUNNEL, "no matching tunnel") \
+_(SPI_0, "SPI 0")
static char *ipsec_if_input_error_strings[] = {
#define _(sym,string) string,
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
ipsec_if_input_trace_t *t = va_arg (*args, ipsec_if_input_trace_t *);
- s = format (s, "IPSec: spi %u seq %u", t->spi, t->seq);
+ s = format (s, "IPSec: spi %u (0x%08x) seq %u", t->spi, t->spi, t->seq);
return s;
}
+always_inline u16
+ipsec_ip4_if_no_tunnel (vlib_node_runtime_t * node,
+ vlib_buffer_t * b,
+ const esp_header_t * esp,
+ const ip4_header_t * ip4, u16 offset)
+{
+ if (PREDICT_FALSE (0 == esp->spi))
+ {
+ b->error = node->errors[IPSEC_IF_INPUT_ERROR_SPI_0];
+ b->punt_reason =
+ ipsec_punt_reason[(ip4->protocol == IP_PROTOCOL_UDP ?
+ IPSEC_PUNT_IP4_SPI_UDP_0 : IPSEC_PUNT_IP4_SPI_0)];
+ }
+ else
+ {
+ b->error = node->errors[IPSEC_IF_INPUT_ERROR_NO_TUNNEL];
+ b->punt_reason = ipsec_punt_reason[IPSEC_PUNT_IP4_NO_SUCH_TUNNEL];
+ }
+ vlib_buffer_advance (b, -offset);
+ return IPSEC_INPUT_NEXT_PUNT;
+}
+
+always_inline u16
+ipsec_ip6_if_no_tunnel (vlib_node_runtime_t * node,
+ vlib_buffer_t * b,
+ const esp_header_t * esp, u16 offset)
+{
+ if (PREDICT_FALSE (0 == esp->spi))
+ {
+ b->error = node->errors[IPSEC_IF_INPUT_ERROR_NO_TUNNEL];
+ b->punt_reason = ipsec_punt_reason[IPSEC_PUNT_IP6_SPI_0];
+ }
+ else
+ {
+ b->error = node->errors[IPSEC_IF_INPUT_ERROR_NO_TUNNEL];
+ b->punt_reason = ipsec_punt_reason[IPSEC_PUNT_IP6_NO_SUCH_TUNNEL];
+ }
+ vlib_buffer_advance (b, -offset);
+ return (IPSEC_INPUT_NEXT_PUNT);
+}
always_inline uword
ipsec_if_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
}
else
{
+ next[0] =
+ ipsec_ip6_if_no_tunnel (node, b[0], esp0, buf_adv0);
n_no_tunnel++;
- next[0] = IPSEC_INPUT_NEXT_DROP;
goto pkt1;
}
}
}
else
{
+ next[0] =
+ ipsec_ip4_if_no_tunnel (node, b[0], esp0, ip40, buf_adv0);
n_no_tunnel++;
- next[0] = IPSEC_INPUT_NEXT_DROP;
goto pkt1;
}
}
if (PREDICT_TRUE (t0->hw_if_index != ~0))
{
- vnet_buffer (b[0])->ipsec.flags = 0;
sw_if_index0 = t0->sw_if_index;
vnet_buffer (b[0])->sw_if_index[VLIB_RX] = sw_if_index0;
vlib_increment_combined_counter
(drop_counter, thread_index, sw_if_index0, 1, len0);
n_disabled++;
+ b[0]->error = node->errors[IPSEC_IF_INPUT_ERROR_DISABLED];
next[0] = IPSEC_INPUT_NEXT_DROP;
goto pkt1;
}
n_bytes = len0;
}
}
- else
- {
- vnet_buffer (b[0])->ipsec.flags = IPSEC_FLAG_IPSEC_GRE_TUNNEL;
- }
pkt1:
if (is_ip6)
}
else
{
+ next[1] =
+ ipsec_ip6_if_no_tunnel (node, b[1], esp1, buf_adv1);
n_no_tunnel++;
- next[1] = IPSEC_INPUT_NEXT_DROP;
goto trace1;
}
}
}
else
{
+ next[1] =
+ ipsec_ip4_if_no_tunnel (node, b[1], esp1, ip41, buf_adv1);
n_no_tunnel++;
- next[1] = IPSEC_INPUT_NEXT_DROP;
goto trace1;
}
}
if (PREDICT_TRUE (t1->hw_if_index != ~0))
{
- vnet_buffer (b[1])->ipsec.flags = 0;
sw_if_index1 = t1->sw_if_index;
vnet_buffer (b[1])->sw_if_index[VLIB_RX] = sw_if_index1;
vlib_increment_combined_counter
(drop_counter, thread_index, sw_if_index1, 1, len1);
n_disabled++;
+ b[1]->error = node->errors[IPSEC_IF_INPUT_ERROR_DISABLED];
next[1] = IPSEC_INPUT_NEXT_DROP;
goto trace1;
}
n_bytes = len1;
}
}
- else
- {
- vnet_buffer (b[1])->ipsec.flags = IPSEC_FLAG_IPSEC_GRE_TUNNEL;
- }
trace1:
if (PREDICT_FALSE (is_trace))
ip6_header_t *ip60;
esp_header_t *esp0;
u32 len0;
- u16 buf_adv0;
+ u16 buf_adv0, buf_rewind0;
u32 tid0;
ipsec_tunnel_if_t *t0;
ipsec4_tunnel_key_t key40;
(esp_header_t *) ((u8 *) ip40 + ip4_header_bytes (ip40) +
sizeof (udp_header_t));
buf_adv0 = 0;
+ buf_rewind0 = ip4_header_bytes (ip40) + sizeof (udp_header_t);
}
else
{
esp0 = (esp_header_t *) ((u8 *) ip40 + ip4_header_bytes (ip40));
- buf_adv0 = ip4_header_bytes (ip40);
+ buf_rewind0 = buf_adv0 = ip4_header_bytes (ip40);
}
}
}
else
{
+ next[0] =
+ ipsec_ip6_if_no_tunnel (node, b[0], esp0, buf_adv0);
n_no_tunnel++;
- next[0] = IPSEC_INPUT_NEXT_DROP;
goto trace00;
}
}
}
else
{
+ next[0] =
+ ipsec_ip4_if_no_tunnel (node, b[0], esp0, ip40,
+ buf_rewind0);
n_no_tunnel++;
- next[0] = IPSEC_INPUT_NEXT_DROP;
goto trace00;
}
}
if (PREDICT_TRUE (t0->hw_if_index != ~0))
{
- vnet_buffer (b[0])->ipsec.flags = 0;
sw_if_index0 = t0->sw_if_index;
vnet_buffer (b[0])->sw_if_index[VLIB_RX] = sw_if_index0;
vlib_increment_combined_counter
(drop_counter, thread_index, sw_if_index0, 1, len0);
n_disabled++;
+ b[0]->error = node->errors[IPSEC_IF_INPUT_ERROR_DISABLED];
next[0] = IPSEC_INPUT_NEXT_DROP;
goto trace00;
}
n_bytes = len0;
}
}
- else
- {
- vnet_buffer (b[0])->ipsec.flags = IPSEC_FLAG_IPSEC_GRE_TUNNEL;
- }
trace00:
if (PREDICT_FALSE (is_trace))
vlib_node_increment_counter (vm, node->node_index,
IPSEC_IF_INPUT_ERROR_RX,
- from_frame->n_vectors - n_disabled);
- vlib_node_increment_counter (vm, node->node_index,
- IPSEC_IF_INPUT_ERROR_DISABLED, n_disabled);
- vlib_node_increment_counter (vm, node->node_index,
- IPSEC_IF_INPUT_ERROR_NO_TUNNEL, n_no_tunnel);
+ from_frame->n_vectors - (n_disabled +
+ n_no_tunnel));
vlib_buffer_enqueue_to_next (vm, node, from, nexts, from_frame->n_vectors);