#include <vnet/vnet.h>
#include <vnet/api_errno.h>
#include <vnet/ip/ip.h>
+#include <vnet/l2/l2_input.h>
#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/esp.h>
#include <vnet/ipsec/ipsec_io.h>
#include <vnet/ipsec/ipsec_tun.h>
+#include <vnet/gre/gre.h>
+
#define foreach_esp_decrypt_next \
_(DROP, "error-drop") \
_(IP4_INPUT, "ip4-input-no-checksum") \
-_(IP6_INPUT, "ip6-input")
+_(IP6_INPUT, "ip6-input") \
+_(L2_INPUT, "l2-input") \
+_(HANDOFF, "handoff")
#define _(v, s) ESP_DECRYPT_NEXT_##v,
typedef enum
_(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
_(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
_(TUN_NO_PROTO, "no tunnel protocol") \
+ _(UNSUP_PAYLOAD, "unsupported payload") \
typedef enum
cpd.sa_index = current_sa_index;
}
+ if (PREDICT_FALSE (~0 == sa0->decrypt_thread_index))
+ {
+ /* this is the first packet to use this SA, claim the SA
+ * for this thread. this could happen simultaneously on
+ * another thread */
+ clib_atomic_cmp_and_swap (&sa0->decrypt_thread_index, ~0,
+ ipsec_sa_assign_thread (thread_index));
+ }
+
+ if (PREDICT_TRUE (thread_index != sa0->decrypt_thread_index))
+ {
+ next[0] = ESP_DECRYPT_NEXT_HANDOFF;
+ goto next;
+ }
+
/* store packet data for next round for easier prefetch */
pd->sa_data = cpd.sa_data;
pd->current_data = b[0]->current_data;
b += 1;
}
- vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index, current_sa_pkts,
- current_sa_bytes);
+ if (PREDICT_TRUE (~0 != current_sa_index))
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index, current_sa_pkts,
+ current_sa_bytes);
if ((n = vec_len (ptd->integ_ops)))
{
}
else
{
- next[0] = ESP_DECRYPT_NEXT_DROP;
- b[0]->error = node->errors[ESP_DECRYPT_ERROR_DECRYPTION_FAILED];
- goto trace;
+ if (is_tun && f->next_header == IP_PROTOCOL_GRE)
+ {
+ gre_header_t *gre;
+
+ b[0]->current_data = pd->current_data + adv;
+ b[0]->current_length = pd->current_length - adv - tail;
+
+ gre = vlib_buffer_get_current (b[0]);
+
+ vlib_buffer_advance (b[0], sizeof (*gre));
+
+ switch (clib_net_to_host_u16 (gre->protocol))
+ {
+ case GRE_PROTOCOL_teb:
+ vnet_update_l2_len (b[0]);
+ next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
+ break;
+ case GRE_PROTOCOL_ip4:
+ next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
+ break;
+ case GRE_PROTOCOL_ip6:
+ next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
+ break;
+ default:
+ b[0]->error =
+ node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
+ next[0] = ESP_DECRYPT_NEXT_DROP;
+ break;
+ }
+ }
+ else
+ {
+ next[0] = ESP_DECRYPT_NEXT_DROP;
+ b[0]->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
+ goto trace;
+ }
}
if (is_tun)
{
if (ipsec_sa_is_set_IS_PROTECT (sa0))
{
/*
- * Check that the reveal IP header matches that
- * of the tunnel we are protecting
+ * There are two encap possibilities
+ * 1) the tunnel and ths SA are prodiving encap, i.e. it's
+ * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
+ * implying the SA is in tunnel mode (on a tunnel interface)
+ * 2) only the tunnel provides encap
+ * MAC | TUN-IP | ESP | PAYLOAD
+ * implying the SA is in transport mode.
+ *
+ * For 2) we need only strip the tunnel encap and we're good.
+ * since the tunnel and crypto ecnap (int the tun=protect
+ * object) are the same and we verified above that these match
+ * for 1) we need to strip the SA-IP outer headers, to
+ * reveal the tunnel IP and then check that this matches
+ * the configured tunnel.
*/
const ipsec_tun_protect_t *itp;
- itp =
- ipsec_tun_protect_get (vnet_buffer (b[0])->
- ipsec.protect_index);
+ itp = ipsec_tun_protect_get
+ (vnet_buffer (b[0])->ipsec.protect_index);
+
if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
{
const ip4_header_t *ip4;
.n_next_nodes = ESP_DECRYPT_N_NEXT,
.next_nodes = {
-#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
- foreach_esp_decrypt_next
-#undef _
+ [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
+ [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
+ [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
+ [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
+ [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
},
};
.n_next_nodes = ESP_DECRYPT_N_NEXT,
.next_nodes = {
-#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
- foreach_esp_decrypt_next
-#undef _
+ [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
+ [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
+ [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
+ [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
+ [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
},
};
.type = VLIB_NODE_TYPE_INTERNAL,
.n_errors = ARRAY_LEN(esp_decrypt_error_strings),
.error_strings = esp_decrypt_error_strings,
- .sibling_of = "esp4-decrypt",
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+ [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
+ [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
+ [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
+ [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
+ [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
+ },
};
VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
.type = VLIB_NODE_TYPE_INTERNAL,
.n_errors = ARRAY_LEN(esp_decrypt_error_strings),
.error_strings = esp_decrypt_error_strings,
- .sibling_of = "esp6-decrypt",
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+ [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
+ [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
+ [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
+ [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
+ [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff",
+ },
};
/* *INDENT-ON* */