/*
- * decap.c : IPSec tunnel support
+ * ipsec.c : IPSEC module functions
*
* Copyright (c) 2015 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
#include <vnet/api_errno.h>
#include <vnet/ip/ip.h>
#include <vnet/interface.h>
+#include <vnet/udp/udp_local.h>
#include <vnet/ipsec/ipsec.h>
-#include <vnet/ipsec/ikev2.h>
-
-#if DPDK_CRYPTO==1
-#include <vnet/devices/dpdk/ipsec/esp.h>
-#define ESP_NODE "dpdk-esp-encrypt"
-#else
#include <vnet/ipsec/esp.h>
-#define ESP_NODE "esp-encrypt"
-#endif
+#include <vnet/ipsec/ah.h>
-#if DPDK_CRYPTO==0
-/* dummy function */
-static int
-add_del_sa_sess (u32 sa_index, u8 is_add)
-{
- return 0;
-}
-#endif
+ipsec_main_t ipsec_main;
+esp_async_post_next_t esp_encrypt_async_next;
+esp_async_post_next_t esp_decrypt_async_next;
-u32
-ipsec_get_sa_index_by_sa_id (u32 sa_id)
+static clib_error_t *
+ipsec_check_ah_support (ipsec_sa_t * sa)
{
ipsec_main_t *im = &ipsec_main;
- uword *p = hash_get (im->sa_index_by_sa_id, sa_id);
- if (!p)
- return ~0;
- return p[0];
+ if (sa->integ_alg == IPSEC_INTEG_ALG_NONE)
+ return clib_error_return (0, "unsupported none integ-alg");
+
+ if (!vnet_crypto_is_set_handler (im->integ_algs[sa->integ_alg].alg))
+ return clib_error_return (0, "No crypto engine support for %U",
+ format_ipsec_integ_alg, sa->integ_alg);
+
+ return 0;
}
-int
-ipsec_set_interface_spd (vlib_main_t * vm, u32 sw_if_index, u32 spd_id,
- int is_add)
+static clib_error_t *
+ipsec_check_esp_support (ipsec_sa_t * sa)
{
ipsec_main_t *im = &ipsec_main;
- ip4_ipsec_config_t config;
- u32 spd_index;
- uword *p;
+ if (IPSEC_INTEG_ALG_NONE != sa->integ_alg)
+ {
+ if (!vnet_crypto_is_set_handler (im->integ_algs[sa->integ_alg].alg))
+ return clib_error_return (0, "No crypto engine support for %U",
+ format_ipsec_integ_alg, sa->integ_alg);
+ }
+ if (IPSEC_CRYPTO_ALG_NONE != sa->crypto_alg)
+ {
+ if (!vnet_crypto_is_set_handler (im->crypto_algs[sa->crypto_alg].alg))
+ return clib_error_return (0, "No crypto engine support for %U",
+ format_ipsec_crypto_alg, sa->crypto_alg);
+ }
- p = hash_get (im->spd_index_by_spd_id, spd_id);
- if (!p)
- return VNET_API_ERROR_SYSCALL_ERROR_1; /* no such spd-id */
+ return (0);
+}
- spd_index = p[0];
+clib_error_t *
+ipsec_add_del_sa_sess_cb (ipsec_main_t * im, u32 sa_index, u8 is_add)
+{
+ ipsec_ah_backend_t *ah =
+ pool_elt_at_index (im->ah_backends, im->ah_current_backend);
+ if (ah->add_del_sa_sess_cb)
+ {
+ clib_error_t *err = ah->add_del_sa_sess_cb (sa_index, is_add);
+ if (err)
+ return err;
+ }
+ ipsec_esp_backend_t *esp =
+ pool_elt_at_index (im->esp_backends, im->esp_current_backend);
+ if (esp->add_del_sa_sess_cb)
+ {
+ clib_error_t *err = esp->add_del_sa_sess_cb (sa_index, is_add);
+ if (err)
+ return err;
+ }
+ return 0;
+}
- p = hash_get (im->spd_index_by_sw_if_index, sw_if_index);
- if (p && is_add)
- return VNET_API_ERROR_SYSCALL_ERROR_1; /* spd already assigned */
+clib_error_t *
+ipsec_check_support_cb (ipsec_main_t * im, ipsec_sa_t * sa)
+{
+ clib_error_t *error = 0;
- if (is_add)
+ if (PREDICT_FALSE (sa->protocol == IPSEC_PROTOCOL_AH))
{
- hash_set (im->spd_index_by_sw_if_index, sw_if_index, spd_index);
+ ipsec_ah_backend_t *ah =
+ pool_elt_at_index (im->ah_backends, im->ah_current_backend);
+ ASSERT (ah->check_support_cb);
+ error = ah->check_support_cb (sa);
}
else
{
- hash_unset (im->spd_index_by_sw_if_index, sw_if_index);
+ ipsec_esp_backend_t *esp =
+ pool_elt_at_index (im->esp_backends, im->esp_current_backend);
+ ASSERT (esp->check_support_cb);
+ error = esp->check_support_cb (sa);
}
+ return error;
+}
- clib_warning ("sw_if_index %u spd_id %u spd_index %u",
- sw_if_index, spd_id, spd_index);
- /* enable IPsec on TX */
- vnet_feature_enable_disable ("ip4-output", "ipsec-output-ip4", sw_if_index,
- is_add, 0, 0);
- vnet_feature_enable_disable ("ip6-output", "ipsec-output-ip6", sw_if_index,
- is_add, 0, 0);
+static void
+ipsec_add_node (vlib_main_t * vm, const char *node_name,
+ const char *prev_node_name, u32 * out_node_index,
+ u32 * out_next_index)
+{
+ vlib_node_t *prev_node, *node;
+ prev_node = vlib_get_node_by_name (vm, (u8 *) prev_node_name);
+ ASSERT (prev_node);
+ node = vlib_get_node_by_name (vm, (u8 *) node_name);
+ ASSERT (node);
+ *out_node_index = node->index;
+ *out_next_index = vlib_node_add_next (vm, prev_node->index, node->index);
+}
- /* enable IPsec on RX */
- vnet_feature_enable_disable ("ip4-unicast", "ipsec-input-ip4", sw_if_index,
- is_add, &config, sizeof (config));
- vnet_feature_enable_disable ("ip6-unicast", "ipsec-input-ip6", sw_if_index,
- is_add, &config, sizeof (config));
+void
+ipsec_add_feature (const char *arc_name,
+ const char *node_name, u32 * out_feature_index)
+{
+ u8 arc;
- return 0;
+ arc = vnet_get_feature_arc_index (arc_name);
+ ASSERT (arc != (u8) ~ 0);
+ *out_feature_index = vnet_get_feature_index (arc, node_name);
}
-int
-ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add)
+void
+ipsec_unregister_udp_port (u16 port)
{
ipsec_main_t *im = &ipsec_main;
- ipsec_spd_t *spd = 0;
+ u32 n_regs;
uword *p;
- u32 spd_index, k, v;
- p = hash_get (im->spd_index_by_spd_id, spd_id);
- if (p && is_add)
- return VNET_API_ERROR_INVALID_VALUE;
- if (!p && !is_add)
- return VNET_API_ERROR_INVALID_VALUE;
+ p = hash_get (im->udp_port_registrations, port);
+
+ ASSERT (p);
+
+ n_regs = p[0];
- if (!is_add) /* delete */
+ if (0 == --n_regs)
{
- spd_index = p[0];
- spd = pool_elt_at_index (im->spds, spd_index);
- if (!spd)
- return VNET_API_ERROR_INVALID_VALUE;
- /* *INDENT-OFF* */
- hash_foreach (k, v, im->spd_index_by_sw_if_index, ({
- if (v == spd_index)
- ipsec_set_interface_spd(vm, k, spd_id, 0);
- }));
- /* *INDENT-ON* */
- hash_unset (im->spd_index_by_spd_id, spd_id);
- pool_free (spd->policies);
- vec_free (spd->ipv4_outbound_policies);
- vec_free (spd->ipv6_outbound_policies);
- vec_free (spd->ipv4_inbound_protect_policy_indices);
- vec_free (spd->ipv4_inbound_policy_discard_and_bypass_indices);
- pool_put (im->spds, spd);
+ udp_unregister_dst_port (vlib_get_main (), port, 1);
+ hash_unset (im->udp_port_registrations, port);
}
- else /* create new SPD */
+ else
{
- pool_get (im->spds, spd);
- memset (spd, 0, sizeof (*spd));
- spd_index = spd - im->spds;
- spd->id = spd_id;
- hash_set (im->spd_index_by_spd_id, spd_id, spd_index);
+ hash_unset (im->udp_port_registrations, port);
+ hash_set (im->udp_port_registrations, port, n_regs);
}
- return 0;
-}
-
-static int
-ipsec_spd_entry_sort (void *a1, void *a2)
-{
- ipsec_main_t *im = &ipsec_main;
- u32 *id1 = a1;
- u32 *id2 = a2;
- ipsec_spd_t *spd;
- ipsec_policy_t *p1, *p2;
-
- /* *INDENT-OFF* */
- pool_foreach (spd, im->spds, ({
- p1 = pool_elt_at_index(spd->policies, *id1);
- p2 = pool_elt_at_index(spd->policies, *id2);
- if (p1 && p2)
- return p2->priority - p1->priority;
- }));
- /* *INDENT-ON* */
-
- return 0;
}
-int
-ipsec_add_del_policy (vlib_main_t * vm, ipsec_policy_t * policy, int is_add)
+void
+ipsec_register_udp_port (u16 port)
{
ipsec_main_t *im = &ipsec_main;
- ipsec_spd_t *spd = 0;
- ipsec_policy_t *vp;
+ u32 n_regs;
uword *p;
- u32 spd_index;
- clib_warning ("policy-id %u priority %d is_outbound %u", policy->id,
- policy->priority, policy->is_outbound);
+ p = hash_get (im->udp_port_registrations, port);
- if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
- {
- p = hash_get (im->sa_index_by_sa_id, policy->sa_id);
- if (!p)
- return VNET_API_ERROR_SYSCALL_ERROR_1;
- policy->sa_index = p[0];
- }
-
- p = hash_get (im->spd_index_by_spd_id, policy->id);
-
- if (!p)
- return VNET_API_ERROR_SYSCALL_ERROR_1;
+ n_regs = (p ? p[0] : 0);
- spd_index = p[0];
- spd = pool_elt_at_index (im->spds, spd_index);
- if (!spd)
- return VNET_API_ERROR_SYSCALL_ERROR_1;
+ if (0 == n_regs++)
+ udp_register_dst_port (vlib_get_main (), port,
+ ipsec4_tun_input_node.index, 1);
- if (is_add)
- {
- u32 policy_index;
+ hash_unset (im->udp_port_registrations, port);
+ hash_set (im->udp_port_registrations, port, n_regs);
+}
- pool_get (spd->policies, vp);
- clib_memcpy (vp, policy, sizeof (*vp));
- policy_index = vp - spd->policies;
+u32
+ipsec_register_ah_backend (vlib_main_t * vm, ipsec_main_t * im,
+ const char *name,
+ const char *ah4_encrypt_node_name,
+ const char *ah4_decrypt_node_name,
+ const char *ah6_encrypt_node_name,
+ const char *ah6_decrypt_node_name,
+ check_support_cb_t ah_check_support_cb,
+ add_del_sa_sess_cb_t ah_add_del_sa_sess_cb)
+{
+ ipsec_ah_backend_t *b;
+ pool_get (im->ah_backends, b);
+ b->name = format (0, "%s%c", name, 0);
+
+ ipsec_add_node (vm, ah4_encrypt_node_name, "ipsec4-output-feature",
+ &b->ah4_encrypt_node_index, &b->ah4_encrypt_next_index);
+ ipsec_add_node (vm, ah4_decrypt_node_name, "ipsec4-input-feature",
+ &b->ah4_decrypt_node_index, &b->ah4_decrypt_next_index);
+ ipsec_add_node (vm, ah6_encrypt_node_name, "ipsec6-output-feature",
+ &b->ah6_encrypt_node_index, &b->ah6_encrypt_next_index);
+ ipsec_add_node (vm, ah6_decrypt_node_name, "ipsec6-input-feature",
+ &b->ah6_decrypt_node_index, &b->ah6_decrypt_next_index);
+
+ b->check_support_cb = ah_check_support_cb;
+ b->add_del_sa_sess_cb = ah_add_del_sa_sess_cb;
+ return b - im->ah_backends;
+}
- if (policy->is_outbound)
- {
- if (policy->is_ipv6)
- {
- vec_add1 (spd->ipv6_outbound_policies, policy_index);
- clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
- vec_sort_with_function (spd->ipv6_outbound_policies,
- ipsec_spd_entry_sort);
- }
- else
- {
- vec_add1 (spd->ipv4_outbound_policies, policy_index);
- clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
- vec_sort_with_function (spd->ipv4_outbound_policies,
- ipsec_spd_entry_sort);
- }
- }
- else
- {
- if (policy->is_ipv6)
- {
- if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
- {
- vec_add1 (spd->ipv6_inbound_protect_policy_indices,
- policy_index);
- clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
- vec_sort_with_function
- (spd->ipv6_inbound_protect_policy_indices,
- ipsec_spd_entry_sort);
- }
- else
- {
- vec_add1
- (spd->ipv6_inbound_policy_discard_and_bypass_indices,
- policy_index);
- clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
- vec_sort_with_function
- (spd->ipv6_inbound_policy_discard_and_bypass_indices,
- ipsec_spd_entry_sort);
- }
- }
- else
- {
- if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
- {
- vec_add1 (spd->ipv4_inbound_protect_policy_indices,
- policy_index);
- clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
- vec_sort_with_function
- (spd->ipv4_inbound_protect_policy_indices,
- ipsec_spd_entry_sort);
- }
- else
- {
- vec_add1
- (spd->ipv4_inbound_policy_discard_and_bypass_indices,
- policy_index);
- clib_memcpy (vp, policy, sizeof (ipsec_policy_t));
- vec_sort_with_function
- (spd->ipv4_inbound_policy_discard_and_bypass_indices,
- ipsec_spd_entry_sort);
- }
- }
- }
+u32
+ipsec_register_esp_backend (vlib_main_t * vm, ipsec_main_t * im,
+ const char *name,
+ const char *esp4_encrypt_node_name,
+ const char *esp4_encrypt_node_tun_name,
+ const char *esp4_decrypt_node_name,
+ const char *esp4_decrypt_tun_node_name,
+ const char *esp6_encrypt_node_name,
+ const char *esp6_encrypt_node_tun_name,
+ const char *esp6_decrypt_node_name,
+ const char *esp6_decrypt_tun_node_name,
+ check_support_cb_t esp_check_support_cb,
+ add_del_sa_sess_cb_t esp_add_del_sa_sess_cb,
+ enable_disable_cb_t enable_disable_cb)
+{
+ ipsec_esp_backend_t *b;
+
+ pool_get (im->esp_backends, b);
+ b->name = format (0, "%s%c", name, 0);
+
+ ipsec_add_node (vm, esp4_encrypt_node_name, "ipsec4-output-feature",
+ &b->esp4_encrypt_node_index, &b->esp4_encrypt_next_index);
+ ipsec_add_node (vm, esp4_decrypt_node_name, "ipsec4-input-feature",
+ &b->esp4_decrypt_node_index, &b->esp4_decrypt_next_index);
+ ipsec_add_node (vm, esp6_encrypt_node_name, "ipsec6-output-feature",
+ &b->esp6_encrypt_node_index, &b->esp6_encrypt_next_index);
+ ipsec_add_node (vm, esp6_decrypt_node_name, "ipsec6-input-feature",
+ &b->esp6_decrypt_node_index, &b->esp6_decrypt_next_index);
+ ipsec_add_node (vm, esp4_decrypt_tun_node_name, "ipsec4-tun-input",
+ &b->esp4_decrypt_tun_node_index,
+ &b->esp4_decrypt_tun_next_index);
+ ipsec_add_node (vm, esp6_decrypt_tun_node_name, "ipsec6-tun-input",
+ &b->esp6_decrypt_tun_node_index,
+ &b->esp6_decrypt_tun_next_index);
+
+ b->esp6_encrypt_tun_node_index =
+ vlib_get_node_by_name (vm, (u8 *) esp6_encrypt_node_tun_name)->index;
+ b->esp4_encrypt_tun_node_index =
+ vlib_get_node_by_name (vm, (u8 *) esp4_encrypt_node_tun_name)->index;
+
+ b->check_support_cb = esp_check_support_cb;
+ b->add_del_sa_sess_cb = esp_add_del_sa_sess_cb;
+ b->enable_disable_cb = enable_disable_cb;
+
+ return b - im->esp_backends;
+}
- }
- else
- {
- u32 i, j;
- /* *INDENT-OFF* */
- pool_foreach_index(i, spd->policies, ({
- vp = pool_elt_at_index(spd->policies, i);
- if (vp->priority != policy->priority)
- continue;
- if (vp->is_outbound != policy->is_outbound)
- continue;
- if (vp->policy != policy->policy)
- continue;
- if (vp->sa_id != policy->sa_id)
- continue;
- if (vp->protocol != policy->protocol)
- continue;
- if (vp->lport.start != policy->lport.start)
- continue;
- if (vp->lport.stop != policy->lport.stop)
- continue;
- if (vp->rport.start != policy->rport.start)
- continue;
- if (vp->rport.stop != policy->rport.stop)
- continue;
- if (vp->is_ipv6 != policy->is_ipv6)
- continue;
- if (policy->is_ipv6)
- {
- if (vp->laddr.start.ip6.as_u64[0] != policy->laddr.start.ip6.as_u64[0])
- continue;
- if (vp->laddr.start.ip6.as_u64[1] != policy->laddr.start.ip6.as_u64[1])
- continue;
- if (vp->laddr.stop.ip6.as_u64[0] != policy->laddr.stop.ip6.as_u64[0])
- continue;
- if (vp->laddr.stop.ip6.as_u64[1] != policy->laddr.stop.ip6.as_u64[1])
- continue;
- if (vp->raddr.start.ip6.as_u64[0] != policy->raddr.start.ip6.as_u64[0])
- continue;
- if (vp->raddr.start.ip6.as_u64[1] != policy->raddr.start.ip6.as_u64[1])
- continue;
- if (vp->raddr.stop.ip6.as_u64[0] != policy->raddr.stop.ip6.as_u64[0])
- continue;
- if (vp->laddr.stop.ip6.as_u64[1] != policy->laddr.stop.ip6.as_u64[1])
- continue;
- if (policy->is_outbound)
- {
- vec_foreach_index(j, spd->ipv6_outbound_policies) {
- if (vec_elt(spd->ipv6_outbound_policies, j) == i) {
- vec_del1 (spd->ipv6_outbound_policies, j);
- break;
- }
- }
- }
- else
- {
- if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
- {
- vec_foreach_index(j, spd->ipv6_inbound_protect_policy_indices) {
- if (vec_elt(spd->ipv6_inbound_protect_policy_indices, j) == i) {
- vec_del1 (spd->ipv6_inbound_protect_policy_indices, j);
- break;
- }
- }
- }
- else
- {
- vec_foreach_index(j, spd->ipv6_inbound_policy_discard_and_bypass_indices) {
- if (vec_elt(spd->ipv6_inbound_policy_discard_and_bypass_indices, j) == i) {
- vec_del1 (spd->ipv6_inbound_policy_discard_and_bypass_indices, j);
- break;
- }
- }
- }
- }
- }
- else
- {
- if (vp->laddr.start.ip4.as_u32 != policy->laddr.start.ip4.as_u32)
- continue;
- if (vp->laddr.stop.ip4.as_u32 != policy->laddr.stop.ip4.as_u32)
- continue;
- if (vp->raddr.start.ip4.as_u32 != policy->raddr.start.ip4.as_u32)
- continue;
- if (vp->raddr.stop.ip4.as_u32 != policy->raddr.stop.ip4.as_u32)
- continue;
- if (policy->is_outbound)
- {
- vec_foreach_index(j, spd->ipv4_outbound_policies) {
- if (vec_elt(spd->ipv4_outbound_policies, j) == i) {
- vec_del1 (spd->ipv4_outbound_policies, j);
- break;
- }
- }
- }
- else
- {
- if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
- {
- vec_foreach_index(j, spd->ipv4_inbound_protect_policy_indices) {
- if (vec_elt(spd->ipv4_inbound_protect_policy_indices, j) == i) {
- vec_del1 (spd->ipv4_inbound_protect_policy_indices, j);
- break;
- }
- }
- }
- else
- {
- vec_foreach_index(j, spd->ipv4_inbound_policy_discard_and_bypass_indices) {
- if (vec_elt(spd->ipv4_inbound_policy_discard_and_bypass_indices, j) == i) {
- vec_del1 (spd->ipv4_inbound_policy_discard_and_bypass_indices, j);
- break;
- }
- }
- }
- }
- pool_put (spd->policies, vp);
- break;
- }
- }));
- /* *INDENT-ON* */
- }
+clib_error_t *
+ipsec_rsc_in_use (ipsec_main_t * im)
+{
+ /* return an error is crypto resource are in use */
+ if (pool_elts (im->sad) > 0)
+ return clib_error_return (0,
+ "%d SA entries configured",
+ pool_elts (im->sad));
- return 0;
+ return (NULL);
}
-static u8
-ipsec_is_sa_used (u32 sa_index)
+int
+ipsec_select_ah_backend (ipsec_main_t * im, u32 backend_idx)
{
- ipsec_main_t *im = &ipsec_main;
- ipsec_spd_t *spd;
- ipsec_policy_t *p;
- ipsec_tunnel_if_t *t;
-
- /* *INDENT-OFF* */
- pool_foreach(spd, im->spds, ({
- pool_foreach(p, spd->policies, ({
- if (p->policy == IPSEC_POLICY_ACTION_PROTECT)
- {
- if (p->sa_index == sa_index)
- return 1;
- }
- }));
- }));
-
- pool_foreach(t, im->tunnel_interfaces, ({
- if (t->input_sa_index == sa_index)
- return 1;
- if (t->output_sa_index == sa_index)
- return 1;
- }));
- /* *INDENT-ON* */
+ if (ipsec_rsc_in_use (im))
+ return VNET_API_ERROR_RSRC_IN_USE;
+
+ if (pool_is_free_index (im->ah_backends, backend_idx))
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ ipsec_ah_backend_t *b = pool_elt_at_index (im->ah_backends, backend_idx);
+ im->ah_current_backend = backend_idx;
+ im->ah4_encrypt_node_index = b->ah4_encrypt_node_index;
+ im->ah4_decrypt_node_index = b->ah4_decrypt_node_index;
+ im->ah4_encrypt_next_index = b->ah4_encrypt_next_index;
+ im->ah4_decrypt_next_index = b->ah4_decrypt_next_index;
+ im->ah6_encrypt_node_index = b->ah6_encrypt_node_index;
+ im->ah6_decrypt_node_index = b->ah6_decrypt_node_index;
+ im->ah6_encrypt_next_index = b->ah6_encrypt_next_index;
+ im->ah6_decrypt_next_index = b->ah6_decrypt_next_index;
return 0;
}
int
-ipsec_add_del_sa (vlib_main_t * vm, ipsec_sa_t * new_sa, int is_add)
+ipsec_select_esp_backend (ipsec_main_t * im, u32 backend_idx)
{
- ipsec_main_t *im = &ipsec_main;
- ipsec_sa_t *sa = 0;
- uword *p;
- u32 sa_index;
-
- clib_warning ("id %u spi %u", new_sa->id, new_sa->spi);
+ if (ipsec_rsc_in_use (im))
+ return VNET_API_ERROR_RSRC_IN_USE;
- p = hash_get (im->sa_index_by_sa_id, new_sa->id);
- if (p && is_add)
- return VNET_API_ERROR_SYSCALL_ERROR_1; /* already exists */
- if (!p && !is_add)
- return VNET_API_ERROR_SYSCALL_ERROR_1;
+ if (pool_is_free_index (im->esp_backends, backend_idx))
+ return VNET_API_ERROR_INVALID_VALUE;
- if (!is_add) /* delete */
+ /* disable current backend */
+ if (im->esp_current_backend != ~0)
{
- sa_index = p[0];
- sa = pool_elt_at_index (im->sad, sa_index);
- if (ipsec_is_sa_used (sa_index))
+ ipsec_esp_backend_t *cb = pool_elt_at_index (im->esp_backends,
+ im->esp_current_backend);
+ if (cb->enable_disable_cb)
{
- clib_warning ("sa_id %u used in policy", sa->id);
- return VNET_API_ERROR_SYSCALL_ERROR_1; /* sa used in policy */
+ if ((cb->enable_disable_cb) (0) != 0)
+ return -1;
}
- hash_unset (im->sa_index_by_sa_id, sa->id);
- add_del_sa_sess (sa_index, is_add);
- pool_put (im->sad, sa);
}
- else /* create new SA */
+
+ ipsec_esp_backend_t *b = pool_elt_at_index (im->esp_backends, backend_idx);
+ im->esp_current_backend = backend_idx;
+ im->esp4_encrypt_node_index = b->esp4_encrypt_node_index;
+ im->esp4_decrypt_node_index = b->esp4_decrypt_node_index;
+ im->esp4_encrypt_next_index = b->esp4_encrypt_next_index;
+ im->esp4_decrypt_next_index = b->esp4_decrypt_next_index;
+ im->esp6_encrypt_node_index = b->esp6_encrypt_node_index;
+ im->esp6_decrypt_node_index = b->esp6_decrypt_node_index;
+ im->esp6_encrypt_next_index = b->esp6_encrypt_next_index;
+ im->esp6_decrypt_next_index = b->esp6_decrypt_next_index;
+ im->esp4_decrypt_tun_node_index = b->esp4_decrypt_tun_node_index;
+ im->esp4_decrypt_tun_next_index = b->esp4_decrypt_tun_next_index;
+ im->esp6_decrypt_tun_node_index = b->esp6_decrypt_tun_node_index;
+ im->esp6_decrypt_tun_next_index = b->esp6_decrypt_tun_next_index;
+ im->esp4_encrypt_tun_node_index = b->esp4_encrypt_tun_node_index;
+ im->esp6_encrypt_tun_node_index = b->esp6_encrypt_tun_node_index;
+
+ if (b->enable_disable_cb)
{
- pool_get (im->sad, sa);
- clib_memcpy (sa, new_sa, sizeof (*sa));
- sa_index = sa - im->sad;
- hash_set (im->sa_index_by_sa_id, sa->id, sa_index);
- if (add_del_sa_sess (sa_index, is_add) < 0)
- return VNET_API_ERROR_SYSCALL_ERROR_1;
+ if ((b->enable_disable_cb) (1) != 0)
+ return -1;
}
return 0;
}
-int
-ipsec_set_sa_key (vlib_main_t * vm, ipsec_sa_t * sa_update)
+void
+ipsec_set_async_mode (u32 is_enabled)
{
ipsec_main_t *im = &ipsec_main;
- uword *p;
- u32 sa_index;
- ipsec_sa_t *sa = 0;
-
- p = hash_get (im->sa_index_by_sa_id, sa_update->id);
- if (!p)
- return VNET_API_ERROR_SYSCALL_ERROR_1; /* no such sa-id */
+ ipsec_sa_t *sa;
- sa_index = p[0];
- sa = pool_elt_at_index (im->sad, sa_index);
-
- /* new crypto key */
- if (0 < sa_update->crypto_key_len)
- {
- clib_memcpy (sa->crypto_key, sa_update->crypto_key,
- sa_update->crypto_key_len);
- sa->crypto_key_len = sa_update->crypto_key_len;
- }
-
- /* new integ key */
- if (0 < sa_update->integ_key_len)
- {
- clib_memcpy (sa->integ_key, sa_update->integ_key,
- sa_update->integ_key_len);
- sa->integ_key_len = sa_update->integ_key_len;
- }
+ /* lock all SAs before change im->async_mode */
+ pool_foreach (sa, im->sad)
+ {
+ fib_node_lock (&sa->node);
+ }
- if (sa->crypto_key_len + sa->integ_key_len > 0)
- {
- if (add_del_sa_sess (sa_index, 0) < 0)
- return VNET_API_ERROR_SYSCALL_ERROR_1;
- }
+ im->async_mode = is_enabled;
- return 0;
+ /* change SA crypto op data before unlock them */
+ pool_foreach (sa, im->sad)
+ {
+ sa->crypto_op_data = is_enabled ?
+ sa->async_op_data.data : sa->sync_op_data.data;
+ fib_node_unlock (&sa->node);
+ }
}
static void
-ipsec_rand_seed (void)
+crypto_engine_backend_register_post_node (vlib_main_t * vm)
{
- struct
- {
- time_t time;
- pid_t pid;
- void *p;
- } seed_data;
-
- seed_data.time = time (NULL);
- seed_data.pid = getpid ();
- seed_data.p = (void *) &seed_data;
-
- RAND_seed ((const void *) &seed_data, sizeof (seed_data));
+ esp_async_post_next_t *eit;
+ esp_async_post_next_t *dit;
+
+ eit = &esp_encrypt_async_next;
+ eit->esp4_post_next =
+ vnet_crypto_register_post_node (vm, "esp4-encrypt-post");
+ eit->esp6_post_next =
+ vnet_crypto_register_post_node (vm, "esp6-encrypt-post");
+ eit->esp4_tun_post_next =
+ vnet_crypto_register_post_node (vm, "esp4-encrypt-tun-post");
+ eit->esp6_tun_post_next =
+ vnet_crypto_register_post_node (vm, "esp6-encrypt-tun-post");
+
+ dit = &esp_decrypt_async_next;
+ dit->esp4_post_next =
+ vnet_crypto_register_post_node (vm, "esp4-decrypt-post");
+ dit->esp6_post_next =
+ vnet_crypto_register_post_node (vm, "esp6-decrypt-post");
+ dit->esp4_tun_post_next =
+ vnet_crypto_register_post_node (vm, "esp4-decrypt-tun-post");
+ dit->esp6_tun_post_next =
+ vnet_crypto_register_post_node (vm, "esp6-decrypt-tun-post");
}
static clib_error_t *
{
clib_error_t *error;
ipsec_main_t *im = &ipsec_main;
- vlib_thread_main_t *tm = vlib_get_thread_main ();
- vlib_node_t *node;
-
- ipsec_rand_seed ();
+ ipsec_main_crypto_alg_t *a;
- memset (im, 0, sizeof (im[0]));
+ /* Backend registration requires the feature arcs to be set up */
+ if ((error = vlib_call_init_function (vm, vnet_feature_init)))
+ return (error);
im->vnet_main = vnet_get_main ();
im->vlib_main = vm;
im->sa_index_by_sa_id = hash_create (0, sizeof (uword));
im->spd_index_by_sw_if_index = hash_create (0, sizeof (uword));
- vec_validate_aligned (im->empty_buffers, tm->n_vlib_mains - 1,
- CLIB_CACHE_LINE_BYTES);
-
- node = vlib_get_node_by_name (vm, (u8 *) "error-drop");
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "error-drop");
ASSERT (node);
im->error_drop_node_index = node->index;
- node = vlib_get_node_by_name (vm, (u8 *) ESP_NODE);
-
- ASSERT (node);
- im->esp_encrypt_node_index = node->index;
-
- node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup");
- ASSERT (node);
- im->ip4_lookup_node_index = node->index;
+ im->ah_current_backend = ~0;
+ im->esp_current_backend = ~0;
+
+ u32 idx = ipsec_register_ah_backend (vm, im, "crypto engine backend",
+ "ah4-encrypt",
+ "ah4-decrypt",
+ "ah6-encrypt",
+ "ah6-decrypt",
+ ipsec_check_ah_support,
+ NULL);
+
+ im->ah_default_backend = idx;
+ int rv = ipsec_select_ah_backend (im, idx);
+ ASSERT (0 == rv);
+ (void) (rv); // avoid warning
+
+ idx = ipsec_register_esp_backend (vm, im, "crypto engine backend",
+ "esp4-encrypt",
+ "esp4-encrypt-tun",
+ "esp4-decrypt",
+ "esp4-decrypt-tun",
+ "esp6-encrypt",
+ "esp6-encrypt-tun",
+ "esp6-decrypt",
+ "esp6-decrypt-tun",
+ ipsec_check_esp_support,
+ NULL, crypto_dispatch_enable_disable);
+ im->esp_default_backend = idx;
+
+ rv = ipsec_select_esp_backend (im, idx);
+ ASSERT (0 == rv);
+ (void) (rv); // avoid warning
if ((error = vlib_call_init_function (vm, ipsec_cli_init)))
return error;
- if ((error = vlib_call_init_function (vm, ipsec_tunnel_if_init)))
- return error;
-
- esp_init ();
-
- if ((error = ikev2_init (vm)))
- return error;
+ vec_validate (im->crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_NONE;
+ a->enc_op_id = VNET_CRYPTO_OP_NONE;
+ a->dec_op_id = VNET_CRYPTO_OP_NONE;
+ a->alg = VNET_CRYPTO_ALG_NONE;
+ a->iv_size = 0;
+ a->block_align = 1;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_DES_CBC;
+ a->enc_op_id = VNET_CRYPTO_OP_DES_CBC_ENC;
+ a->dec_op_id = VNET_CRYPTO_OP_DES_CBC_DEC;
+ a->alg = VNET_CRYPTO_ALG_DES_CBC;
+ a->iv_size = a->block_align = 8;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_3DES_CBC;
+ a->enc_op_id = VNET_CRYPTO_OP_3DES_CBC_ENC;
+ a->dec_op_id = VNET_CRYPTO_OP_3DES_CBC_DEC;
+ a->alg = VNET_CRYPTO_ALG_3DES_CBC;
+ a->iv_size = a->block_align = 8;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_128;
+ a->enc_op_id = VNET_CRYPTO_OP_AES_128_CBC_ENC;
+ a->dec_op_id = VNET_CRYPTO_OP_AES_128_CBC_DEC;
+ a->alg = VNET_CRYPTO_ALG_AES_128_CBC;
+ a->iv_size = a->block_align = 16;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_192;
+ a->enc_op_id = VNET_CRYPTO_OP_AES_192_CBC_ENC;
+ a->dec_op_id = VNET_CRYPTO_OP_AES_192_CBC_DEC;
+ a->alg = VNET_CRYPTO_ALG_AES_192_CBC;
+ a->iv_size = a->block_align = 16;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_256;
+ a->enc_op_id = VNET_CRYPTO_OP_AES_256_CBC_ENC;
+ a->dec_op_id = VNET_CRYPTO_OP_AES_256_CBC_DEC;
+ a->alg = VNET_CRYPTO_ALG_AES_256_CBC;
+ a->iv_size = a->block_align = 16;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_128;
+ a->enc_op_id = VNET_CRYPTO_OP_AES_128_GCM_ENC;
+ a->dec_op_id = VNET_CRYPTO_OP_AES_128_GCM_DEC;
+ a->alg = VNET_CRYPTO_ALG_AES_128_GCM;
+ a->iv_size = 8;
+ a->block_align = 1;
+ a->icv_size = 16;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_192;
+ a->enc_op_id = VNET_CRYPTO_OP_AES_192_GCM_ENC;
+ a->dec_op_id = VNET_CRYPTO_OP_AES_192_GCM_DEC;
+ a->alg = VNET_CRYPTO_ALG_AES_192_GCM;
+ a->iv_size = 8;
+ a->block_align = 1;
+ a->icv_size = 16;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_256;
+ a->enc_op_id = VNET_CRYPTO_OP_AES_256_GCM_ENC;
+ a->dec_op_id = VNET_CRYPTO_OP_AES_256_GCM_DEC;
+ a->alg = VNET_CRYPTO_ALG_AES_256_GCM;
+ a->iv_size = 8;
+ a->block_align = 1;
+ a->icv_size = 16;
+
+ vec_validate (im->integ_algs, IPSEC_INTEG_N_ALG - 1);
+ ipsec_main_integ_alg_t *i;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_MD5_96];
+ i->op_id = VNET_CRYPTO_OP_MD5_HMAC;
+ i->alg = VNET_CRYPTO_ALG_HMAC_MD5;
+ i->icv_size = 12;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_SHA1_96];
+ i->op_id = VNET_CRYPTO_OP_SHA1_HMAC;
+ i->alg = VNET_CRYPTO_ALG_HMAC_SHA1;
+ i->icv_size = 12;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
+ i->op_id = VNET_CRYPTO_OP_SHA1_HMAC;
+ i->alg = VNET_CRYPTO_ALG_HMAC_SHA256;
+ i->icv_size = 12;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
+ i->op_id = VNET_CRYPTO_OP_SHA256_HMAC;
+ i->alg = VNET_CRYPTO_ALG_HMAC_SHA256;
+ i->icv_size = 16;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
+ i->op_id = VNET_CRYPTO_OP_SHA384_HMAC;
+ i->alg = VNET_CRYPTO_ALG_HMAC_SHA384;
+ i->icv_size = 24;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
+ i->op_id = VNET_CRYPTO_OP_SHA512_HMAC;
+ i->alg = VNET_CRYPTO_ALG_HMAC_SHA512;
+ i->icv_size = 32;
+
+ vec_validate_aligned (im->ptd, vlib_num_workers (), CLIB_CACHE_LINE_BYTES);
+
+ im->ah4_enc_fq_index =
+ vlib_frame_queue_main_init (ah4_encrypt_node.index, 0);
+ im->ah4_dec_fq_index =
+ vlib_frame_queue_main_init (ah4_decrypt_node.index, 0);
+ im->ah6_enc_fq_index =
+ vlib_frame_queue_main_init (ah6_encrypt_node.index, 0);
+ im->ah6_dec_fq_index =
+ vlib_frame_queue_main_init (ah6_decrypt_node.index, 0);
+
+ im->esp4_enc_fq_index =
+ vlib_frame_queue_main_init (esp4_encrypt_node.index, 0);
+ im->esp4_dec_fq_index =
+ vlib_frame_queue_main_init (esp4_decrypt_node.index, 0);
+ im->esp6_enc_fq_index =
+ vlib_frame_queue_main_init (esp6_encrypt_node.index, 0);
+ im->esp6_dec_fq_index =
+ vlib_frame_queue_main_init (esp6_decrypt_node.index, 0);
+ im->esp4_enc_tun_fq_index =
+ vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0);
+ im->esp6_enc_tun_fq_index =
+ vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0);
+ im->esp4_dec_tun_fq_index =
+ vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0);
+ im->esp6_dec_tun_fq_index =
+ vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0);
+
+ im->async_mode = 0;
+ crypto_engine_backend_register_post_node (vm);
return 0;
}