u32 sa_index;
u32 spi;
u32 seq;
+ u32 sa_seq_hi;
u8 udp_encap;
ipsec_crypto_alg_t crypto_alg;
ipsec_integ_alg_t integ_alg;
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
- s = format (s, "esp: sa-index %d spi %u seq %u crypto %U integrity %U%s",
- t->sa_index, t->spi, t->seq,
- format_ipsec_crypto_alg, t->crypto_alg,
- format_ipsec_integ_alg, t->integ_alg,
- t->udp_encap ? " udp-encap-enabled" : "");
+ s =
+ format (s,
+ "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
+ t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
+ format_ipsec_crypto_alg,
+ t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
+ t->udp_encap ? " udp-encap-enabled" : "");
return s;
}
/* pad packet in input buffer */
static_always_inline u8 *
-esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz)
+esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz,
+ u16 * next, vlib_node_runtime_t * node,
+ u16 buffer_data_size)
{
static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (b) +
new_length - sizeof (esp_footer_t));
+ if (b->current_data + new_length + icv_sz > buffer_data_size)
+ {
+ b->error = node->errors[ESP_ENCRYPT_ERROR_NO_TRAILER_SPACE];
+ next[0] = ESP_ENCRYPT_NEXT_DROP;
+ return 0;
+ }
+
if (pad_bytes)
- clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, ESP_MAX_BLOCK_SIZE);
+ {
+ ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
+ pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
+ clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
+ }
f->pad_length = pad_bytes;
b->current_length = new_length + icv_sz;
return len;
}
-static_always_inline int
-esp_trailer_icv_overflow (vlib_node_runtime_t * node, vlib_buffer_t * b,
- u16 * next, u16 buffer_data_size)
-{
- if (b->current_data + b->current_length <= buffer_data_size)
- return 0;
-
- b->current_length -= buffer_data_size - b->current_data;
- b->error = node->errors[ESP_ENCRYPT_ERROR_NO_TRAILER_SPACE];
- next[0] = ESP_ENCRYPT_NEXT_DROP;
- return 1;
-}
-
static_always_inline void
esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
if (sa_index0 != current_sa_index)
{
+ if (current_sa_packets)
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index,
+ current_sa_packets,
+ current_sa_bytes);
+ current_sa_packets = current_sa_bytes = 0;
+
sa0 = pool_elt_at_index (im->sad, sa_index0);
current_sa_index = sa_index0;
- vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- sa_index0, current_sa_packets,
- current_sa_bytes);
- current_sa_packets = current_sa_bytes = 0;
spi = clib_net_to_host_u32 (sa0->spi);
block_sz = sa0->crypto_block_size;
icv_sz = sa0->integ_icv_size;
if (ipsec_sa_is_set_IS_TUNNEL (sa0))
{
payload = vlib_buffer_get_current (b[0]);
- next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz);
- payload_len = b[0]->current_length;
-
- if (esp_trailer_icv_overflow (node, b[0], next, buffer_data_size))
+ next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz,
+ next, node,
+ buffer_data_size);
+ if (!next_hdr_ptr)
goto trace;
+ payload_len = b[0]->current_length;
/* ESP header */
hdr_len += sizeof (*esp);
esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
}
- dpo = sa0->dpo + IPSEC_PROTOCOL_ESP;
+ dpo = &sa0->dpo;
if (!is_tun)
{
next[0] = dpo->dpoi_next_node;
vlib_buffer_advance (b[0], ip_len);
payload = vlib_buffer_get_current (b[0]);
- next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz);
- payload_len = b[0]->current_length;
-
- if (esp_trailer_icv_overflow (node, b[0], next, buffer_data_size))
+ next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz,
+ next, node,
+ buffer_data_size);
+ if (!next_hdr_ptr)
goto trace;
+ payload_len = b[0]->current_length;
/* ESP header */
hdr_len += sizeof (*esp);
ip_hdr = payload - hdr_len;
/* L2 header */
- l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
- hdr_len += l2_len;
- l2_hdr = payload - hdr_len;
+ if (!is_tun)
+ {
+ l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
+ hdr_len += l2_len;
+ l2_hdr = payload - hdr_len;
+
+ /* copy l2 and ip header */
+ clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
+ }
+ else
+ l2_len = 0;
- /* copy l2 and ip header */
- clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
if (is_ip6)
esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
}
- next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ if (!is_tun)
+ next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
}
esp->spi = spi;
sizeof (*tr));
tr->sa_index = sa_index0;
tr->spi = sa0->spi;
- tr->seq = sa0->seq - 1;
+ tr->seq = sa0->seq;
+ tr->sa_seq_hi = sa0->seq_hi;
tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
tr->crypto_alg = sa0->crypto_alg;
tr->integ_alg = sa0->integ_alg;
.node_name = "esp4-encrypt-tun",
.runs_before = VNET_FEATURES ("adj-midchain-tx"),
};
+
+VNET_FEATURE_INIT (esp6o4_encrypt_tun_feat_node, static) =
+{
+ .arc_name = "ip6-output",
+ .node_name = "esp4-encrypt-tun",
+ .runs_before = VNET_FEATURES ("adj-midchain-tx"),
+};
+
+VNET_FEATURE_INIT (esp4_ethernet_encrypt_tun_feat_node, static) =
+{
+ .arc_name = "ethernet-output",
+ .node_name = "esp4-encrypt-tun",
+ .runs_before = VNET_FEATURES ("adj-midchain-tx", "adj-midchain-tx-no-count"),
+};
/* *INDENT-ON* */
VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
.node_name = "esp6-encrypt-tun",
.runs_before = VNET_FEATURES ("adj-midchain-tx"),
};
+
+VNET_FEATURE_INIT (esp4o6_encrypt_tun_feat_node, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "esp6-encrypt-tun",
+ .runs_before = VNET_FEATURES ("adj-midchain-tx"),
+};
+
+/* *INDENT-ON* */
+
+typedef struct
+{
+ u32 sa_index;
+} esp_no_crypto_trace_t;
+
+static u8 *
+format_esp_no_crypto_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
+
+ s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
+
+ return s;
+}
+
+enum
+{
+ ESP_NO_CRYPTO_NEXT_DROP,
+ ESP_NO_CRYPTO_N_NEXT,
+};
+
+enum
+{
+ ESP_NO_CRYPTO_ERROR_RX_PKTS,
+};
+
+static char *esp_no_crypto_error_strings[] = {
+ "Outbound ESP packets received",
+};
+
+always_inline uword
+esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left = frame->n_vectors;
+
+ vlib_get_buffers (vm, from, b, n_left);
+
+ while (n_left > 0)
+ {
+ u32 next0;
+ u32 sa_index0;
+
+ /* packets are always going to be dropped, but get the sa_index */
+ sa_index0 = *(u32 *) vnet_feature_next_with_data (&next0, b[0],
+ sizeof (sa_index0));
+
+ next[0] = ESP_NO_CRYPTO_NEXT_DROP;
+
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
+ sizeof (*tr));
+ tr->sa_index = sa_index0;
+ }
+
+ n_left -= 1;
+ next += 1;
+ b += 1;
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_no_crypto_inline (vm, node, from_frame);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
+{
+ .name = "esp4-no-crypto",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_no_crypto_trace,
+ .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
+ .error_strings = esp_no_crypto_error_strings,
+ .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
+ .next_nodes = {
+ [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
+ },
+};
+
+VNET_FEATURE_INIT (esp4_no_crypto_tun_feat_node, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "esp4-no-crypto",
+ .runs_before = VNET_FEATURES ("adj-midchain-tx"),
+};
+
+VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_no_crypto_inline (vm, node, from_frame);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
+{
+ .name = "esp6-no-crypto",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_no_crypto_trace,
+ .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
+ .error_strings = esp_no_crypto_error_strings,
+ .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
+ .next_nodes = {
+ [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
+ },
+};
+
+VNET_FEATURE_INIT (esp6_no_crypto_tun_feat_node, static) =
+{
+ .arc_name = "ip6-output",
+ .node_name = "esp6-no-crypto",
+ .runs_before = VNET_FEATURES ("adj-midchain-tx"),
+};
/* *INDENT-ON* */
/*