always_inline uword
dpdk_esp_encrypt_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
- vlib_frame_t * from_frame, int is_ip6)
+ vlib_frame_t * from_frame, int is_ip6, int is_tun)
{
u32 n_left_from, *from, *to_next, next_index, thread_index;
ipsec_main_t *im = &ipsec_main;
sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]);
CLIB_PREFETCH (op, op_len, STORE);
- sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
+ if (is_tun)
+ {
+ u32 tmp;
+ /* we are on a ipsec tunnel's feature arc */
+ sa_index0 = *(u32 *) vnet_feature_next_with_data (&tmp, b0,
+ sizeof
+ (sa_index0));
+ }
+ else
+ sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
if (sa_index0 != last_sa_index)
{
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ );
+ return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 0);
}
/* *INDENT-OFF* */
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ );
+ return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 0);
}
/* *INDENT-OFF* */
};
/* *INDENT-ON* */
+VLIB_NODE_FN (dpdk_esp4_encrypt_tun_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (dpdk_esp4_encrypt_tun_node) = {
+ .name = "dpdk-esp4-encrypt-tun",
+ .flags = VLIB_NODE_FLAG_IS_OUTPUT,
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_encrypt_trace,
+ .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
+ .error_strings = esp_encrypt_error_strings,
+ .n_next_nodes = 1,
+ .next_nodes =
+ {
+ [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
+ }
+};
+
+VNET_FEATURE_INIT (dpdk_esp4_encrypt_tun_feat_node, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "dpdk-esp4-encrypt-tun",
+ .runs_before = VNET_FEATURES ("adj-midchain-tx"),
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FN (dpdk_esp6_encrypt_tun_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (dpdk_esp6_encrypt_tun_node) = {
+ .name = "dpdk-esp6-encrypt-tun",
+ .flags = VLIB_NODE_FLAG_IS_OUTPUT,
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_encrypt_trace,
+ .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
+ .error_strings = esp_encrypt_error_strings,
+ .n_next_nodes = 1,
+ .next_nodes =
+ {
+ [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
+ }
+};
+
+VNET_FEATURE_INIT (dpdk_esp6_encrypt_tun_feat_node, static) =
+{
+ .arc_name = "ip6-output",
+ .node_name = "dpdk-esp6-encrypt-tun",
+ .runs_before = VNET_FEATURES ("adj-midchain-tx"),
+};
+/* *INDENT-ON* */
+
/*
* fd.io coding-style-patch-verification: ON
*
u32 idx = ipsec_register_esp_backend (vm, im, "dpdk backend",
"dpdk-esp4-encrypt",
+ "dpdk-esp4-encrypt-tun",
"dpdk-esp4-decrypt",
"dpdk-esp6-encrypt",
+ "dpdk-esp6-encrypt-tun",
"dpdk-esp6-decrypt",
dpdk_ipsec_check_support,
add_del_sa_session);
_(NON_ETHERNET, -151, "Interface is not an Ethernet interface") \
_(BD_ALREADY_HAS_BVI, -152, "Bridge domain already has a BVI interface") \
_(INVALID_PROTOCOL, -153, "Invalid Protocol") \
-_(INVALID_ALGORITHM, -154, "Invalid Algorithm")
+_(INVALID_ALGORITHM, -154, "Invalid Algorithm") \
+_(RSRC_IN_USE, -155, "Resource In Use")
typedef enum
{
*out_next_index = vlib_node_add_next (vm, prev_node->index, node->index);
}
+static void
+ipsec_add_feature (const char *arc_name,
+ const char *node_name, u32 * out_feature_index)
+{
+ u8 arc;
+
+ arc = vnet_get_feature_arc_index (arc_name);
+ *out_feature_index = vnet_get_feature_index (arc, node_name);
+}
+
u32
ipsec_register_ah_backend (vlib_main_t * vm, ipsec_main_t * im,
const char *name,
ipsec_register_esp_backend (vlib_main_t * vm, ipsec_main_t * im,
const char *name,
const char *esp4_encrypt_node_name,
+ const char *esp4_encrypt_node_tun_name,
const char *esp4_decrypt_node_name,
const char *esp6_encrypt_node_name,
+ const char *esp6_encrypt_node_tun_name,
const char *esp6_decrypt_node_name,
check_support_cb_t esp_check_support_cb,
add_del_sa_sess_cb_t esp_add_del_sa_sess_cb)
{
ipsec_esp_backend_t *b;
+
pool_get (im->esp_backends, b);
b->name = format (0, "%s%c", name, 0);
ipsec_add_node (vm, esp6_decrypt_node_name, "ipsec6-input-feature",
&b->esp6_decrypt_node_index, &b->esp6_decrypt_next_index);
+ ipsec_add_feature ("ip4-output", esp4_encrypt_node_tun_name,
+ &b->esp4_encrypt_tun_feature_index);
+ ipsec_add_feature ("ip6-output", esp6_encrypt_node_tun_name,
+ &b->esp6_encrypt_tun_feature_index);
+
b->check_support_cb = esp_check_support_cb;
b->add_del_sa_sess_cb = esp_add_del_sa_sess_cb;
return b - im->esp_backends;
}
-static walk_rc_t
-ipsec_sa_restack (ipsec_sa_t * sa, void *ctx)
+clib_error_t *
+ipsec_rsc_in_use (ipsec_main_t * im)
{
- ipsec_sa_stack (sa);
-
- return (WALK_CONTINUE);
+ /* return an error is crypto resource are in use */
+ if (pool_elts (im->sad) > 0)
+ return clib_error_return (0,
+ "%d SA entries configured",
+ pool_elts (im->sad));
+
+ if (pool_elts (im->tunnel_interfaces))
+ return clib_error_return (0,
+ "%d tunnel-interface entries configured",
+ pool_elts (im->tunnel_interfaces));
+
+ return (NULL);
}
int
ipsec_select_ah_backend (ipsec_main_t * im, u32 backend_idx)
{
- if (pool_elts (im->sad) > 0
- || pool_is_free_index (im->ah_backends, backend_idx))
- {
- return -1;
- }
+ if (ipsec_rsc_in_use (im))
+ return VNET_API_ERROR_RSRC_IN_USE;
+
+ if (pool_is_free_index (im->ah_backends, backend_idx))
+ return VNET_API_ERROR_INVALID_VALUE;
+
ipsec_ah_backend_t *b = pool_elt_at_index (im->ah_backends, backend_idx);
im->ah_current_backend = backend_idx;
im->ah4_encrypt_node_index = b->ah4_encrypt_node_index;
im->ah6_encrypt_next_index = b->ah6_encrypt_next_index;
im->ah6_decrypt_next_index = b->ah6_decrypt_next_index;
- ipsec_sa_walk (ipsec_sa_restack, NULL);
return 0;
}
int
ipsec_select_esp_backend (ipsec_main_t * im, u32 backend_idx)
{
- if (pool_elts (im->sad) > 0
- || pool_is_free_index (im->esp_backends, backend_idx))
- {
- return -1;
- }
+ if (ipsec_rsc_in_use (im))
+ return VNET_API_ERROR_RSRC_IN_USE;
+
+ if (pool_is_free_index (im->esp_backends, backend_idx))
+ return VNET_API_ERROR_INVALID_VALUE;
+
ipsec_esp_backend_t *b = pool_elt_at_index (im->esp_backends, backend_idx);
im->esp_current_backend = backend_idx;
im->esp4_encrypt_node_index = b->esp4_encrypt_node_index;
im->esp6_encrypt_next_index = b->esp6_encrypt_next_index;
im->esp6_decrypt_next_index = b->esp6_decrypt_next_index;
- ipsec_sa_walk (ipsec_sa_restack, NULL);
+ im->esp4_encrypt_tun_feature_index = b->esp4_encrypt_tun_feature_index;
+ im->esp6_encrypt_tun_feature_index = b->esp6_encrypt_tun_feature_index;
+
return 0;
}
idx = ipsec_register_esp_backend (vm, im, "default openssl backend",
"esp4-encrypt",
+ "esp4-encrypt-tun",
"esp4-decrypt",
"esp6-encrypt",
+ "esp6-encrypt-tun",
"esp6-decrypt",
ipsec_check_esp_support, NULL);
im->esp_default_backend = idx;
u32 esp6_decrypt_node_index;
u32 esp6_encrypt_next_index;
u32 esp6_decrypt_next_index;
+ u32 esp4_encrypt_tun_feature_index;
+ u32 esp6_encrypt_tun_feature_index;
} ipsec_esp_backend_t;
typedef struct
u32 ah6_encrypt_next_index;
u32 ah6_decrypt_next_index;
+ /* tun encrypt arcs and feature nodes */
+ u32 esp4_encrypt_tun_feature_index;
+ u32 esp6_encrypt_tun_feature_index;
+
/* pool of ah backends */
ipsec_ah_backend_t *ah_backends;
/* pool of esp backends */
u32 ipsec_register_esp_backend (vlib_main_t * vm, ipsec_main_t * im,
const char *name,
const char *esp4_encrypt_node_name,
+ const char *esp4_encrypt_tun_node_name,
const char *esp4_decrypt_node_name,
const char *esp6_encrypt_node_name,
+ const char *esp6_encrypt_tun_node_name,
const char *esp6_decrypt_node_name,
check_support_cb_t esp_check_support_cb,
add_del_sa_sess_cb_t esp_add_del_sa_sess_cb);
int ipsec_select_ah_backend (ipsec_main_t * im, u32 ah_backend_idx);
int ipsec_select_esp_backend (ipsec_main_t * im, u32 esp_backend_idx);
+clib_error_t *ipsec_rsc_in_use (ipsec_main_t * im);
+
always_inline ipsec_sa_t *
ipsec_sa_get (u32 sa_index)
{
switch (protocol)
{
case IPSEC_PROTOCOL_ESP:
- if (pool_is_free_index (im->esp_backends, mp->index))
- {
- rv = VNET_API_ERROR_INVALID_VALUE;
- break;
- }
- ipsec_select_esp_backend (im, mp->index);
+ rv = ipsec_select_esp_backend (im, mp->index);
break;
case IPSEC_PROTOCOL_AH:
- if (pool_is_free_index (im->ah_backends, mp->index))
- {
- rv = VNET_API_ERROR_INVALID_VALUE;
- break;
- }
- ipsec_select_ah_backend (im, mp->index);
+ rv = ipsec_select_ah_backend (im, mp->index);
break;
default:
- rv = VNET_API_ERROR_INVALID_VALUE;
+ rv = VNET_API_ERROR_INVALID_PROTOCOL;
break;
}
#else
unformat_input_t * input,
vlib_cli_command_t * cmd)
{
- u32 backend_index;
+ unformat_input_t _line_input, *line_input = &_line_input;
ipsec_main_t *im = &ipsec_main;
+ clib_error_t *error;
+ u32 backend_index;
- if (pool_elts (im->sad) > 0)
- {
- return clib_error_return (0,
- "Cannot change IPsec backend, while %u SA entries are configured",
- pool_elts (im->sad));
- }
+ error = ipsec_rsc_in_use (im);
+
+ if (error)
+ return error;
- unformat_input_t _line_input, *line_input = &_line_input;
/* Get a line of input. */
if (!unformat_user (input, unformat_line_input, line_input))
return 0;
}
static void
-ipsec_tunnel_feature_set (ipsec_tunnel_if_t * t, u8 enable)
+ipsec_tunnel_feature_set (ipsec_main_t * im, ipsec_tunnel_if_t * t, u8 enable)
{
- vnet_feature_enable_disable ("ip4-output",
- "esp4-encrypt-tun",
- t->sw_if_index, enable,
- &t->output_sa_index,
- sizeof (t->output_sa_index));
- vnet_feature_enable_disable ("ip6-output",
- "esp6-encrypt-tun",
- t->sw_if_index, enable,
- &t->output_sa_index,
- sizeof (t->output_sa_index));
+ u8 arc;
+
+ arc = vnet_get_feature_arc_index ("ip4-output");
+
+ vnet_feature_enable_disable_with_index (arc,
+ im->esp4_encrypt_tun_feature_index,
+ t->sw_if_index, enable,
+ &t->output_sa_index,
+ sizeof (t->output_sa_index));
+
+ arc = vnet_get_feature_arc_index ("ip6-output");
+
+ vnet_feature_enable_disable_with_index (arc,
+ im->esp6_encrypt_tun_feature_index,
+ t->sw_if_index, enable,
+ &t->output_sa_index,
+ sizeof (t->output_sa_index));
}
int
~0);
im->ipsec_if_by_sw_if_index[t->sw_if_index] = dev_instance;
- ipsec_tunnel_feature_set (t, 1);
+ ipsec_tunnel_feature_set (im, t, 1);
/*1st interface, register protocol */
if (pool_elts (im->tunnel_interfaces) == 1)
hi = vnet_get_hw_interface (vnm, t->hw_if_index);
vnet_sw_interface_set_flags (vnm, hi->sw_if_index, 0); /* admin down */
- ipsec_tunnel_feature_set (t, 0);
+ ipsec_tunnel_feature_set (im, t, 0);
vnet_delete_hw_interface (vnm, t->hw_if_index);
if (is_ip6)
* re-enable the feature to get the new SA in
* the workers are stopped so no packets are sent in the clear
*/
- ipsec_tunnel_feature_set (t, 0);
+ ipsec_tunnel_feature_set (im, t, 0);
t->output_sa_index = sa_index;
- ipsec_tunnel_feature_set (t, 1);
+ ipsec_tunnel_feature_set (im, t, 1);
}
/* remove sa_id to sa_index mapping on old SA */
return 0;
}
-
clib_error_t *
ipsec_tunnel_if_init (vlib_main_t * vm)
{
/**
* 'stack' (resolve the recursion for) the SA tunnel destination
*/
-void
+static void
ipsec_sa_stack (ipsec_sa_t * sa)
{
ipsec_main_t *im = &ipsec_main;
const ip46_address_t * tunnel_dst_addr,
u32 * sa_index);
extern u32 ipsec_sa_del (u32 id);
-extern void ipsec_sa_stack (ipsec_sa_t * sa);
extern void ipsec_sa_set_crypto_alg (ipsec_sa_t * sa,
ipsec_crypto_alg_t crypto_alg);
extern void ipsec_sa_set_integ_alg (ipsec_sa_t * sa,