dpdk: required changes for 17.08 42/8142/6
authorSergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Thu, 24 Aug 2017 13:09:17 +0000 (14:09 +0100)
committerDamjan Marion <dmarion.lists@gmail.com>
Fri, 25 Aug 2017 14:24:53 +0000 (14:24 +0000)
DPDK 17.08 breaks ethdev and cryptodev APIs.

Address those changes while keeping backwards compatibility for
DPDK 17.02 and 17.05.

Change-Id: Idd6ac264d0d047fe586c41d4c4ca74e8fc778a54
Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
12 files changed:
Makefile
dpdk/Makefile
src/configure.ac
src/plugins/dpdk.am
src/plugins/dpdk/device/common.c
src/plugins/dpdk/device/dpdk.h
src/plugins/dpdk/ipsec/cli.c
src/plugins/dpdk/ipsec/esp.h
src/plugins/dpdk/ipsec/esp_decrypt.c
src/plugins/dpdk/ipsec/esp_encrypt.c
src/plugins/dpdk/ipsec/ipsec.c
src/plugins/dpdk/ipsec/ipsec.h

index c1a7cbb..6ac6f6e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -60,7 +60,7 @@ endif
 DEB_DEPENDS  = curl build-essential autoconf automake bison libssl-dev ccache
 DEB_DEPENDS += debhelper dkms git libtool libapr1-dev dh-systemd
 DEB_DEPENDS += libconfuse-dev git-review exuberant-ctags cscope pkg-config
-DEB_DEPENDS += lcov chrpath autoconf nasm indent
+DEB_DEPENDS += lcov chrpath autoconf nasm indent libnuma-dev
 DEB_DEPENDS += python-all python-dev python-virtualenv python-pip libffi6
 ifeq ($(OS_VERSION_ID),14.04)
        DEB_DEPENDS += openjdk-8-jdk-headless
@@ -73,7 +73,7 @@ endif
 
 RPM_DEPENDS  = redhat-lsb glibc-static java-1.8.0-openjdk-devel yum-utils
 RPM_DEPENDS += openssl-devel https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm apr-devel
-RPM_DEPENDS += python-devel
+RPM_DEPENDS += python-devel numactl-devel
 ifeq ($(OS_ID)-$(OS_VERSION_ID),fedora-25)
        RPM_DEPENDS += python2-virtualenv
        RPM_DEPENDS_GROUPS = 'C Development Tools and Libraries'
@@ -99,7 +99,7 @@ endif
 
 RPM_SUSE_DEPENDS = autoconf automake bison ccache chrpath distribution-release gcc6 glibc-devel-static
 RPM_SUSE_DEPENDS += java-1_8_0-openjdk-devel libopenssl-devel libtool lsb-release make openssl-devel
-RPM_SUSE_DEPENDS += python-devel python-pip python-rpm-macros shadow nasm
+RPM_SUSE_DEPENDS += python-devel python-pip python-rpm-macros shadow nasm numactl-devel
 
 ifneq ($(wildcard $(STARTUP_DIR)/startup.conf),)
         STARTUP_CONF ?= $(STARTUP_DIR)/startup.conf
index 2e4b0e9..8d5b42e 100644 (file)
@@ -35,17 +35,27 @@ DPDK_17.08_TARBALL_MD5_CKSUM := 0641f59ea8ea98afefa7cfa2699f6241
 DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION)
 MACHINE=$(shell uname -m)
 
+AESNI ?= n
+ISA_L_CRYPTO_LIB := n
+
+IPSEC_MB_VER ?= 0.46
+ISA_L_CRYPTO_VER := 2.18.0
+
 ifeq ($(MACHINE),$(filter $(MACHINE),x86_64))
-AESNI := y
-else
-AESNI := n
+AESNI = y
+# DPDK pre 17.08 depends on ISA-L Crypto library for GCM PMD
+  ifneq ($(firstword $(sort $(DPDK_VERSION), 17.08)), 17.08)
+  ISA_L_CRYPTO_LIB = y
+  IPSEC_MB_VER = 0.45
+  $(info Building ISA-L Crypto $(ISA_L_CRYPTO_VER) library)
+  endif
+$(info Building IPSec-MB $(IPSEC_MB_VER) library)
 endif
 
-IPSEC_MB_VER := 0.45
 AESNIMB_LIB_TARBALL := v$(IPSEC_MB_VER).tar.gz
 AESNIMB_LIB_TARBALL_URL := http://github.com/01org/intel-ipsec-mb/archive/$(AESNIMB_LIB_TARBALL)
 AESNIMB_LIB_SOURCE := $(B)/intel-ipsec-mb-$(IPSEC_MB_VER)
-ISA_L_CRYPTO_VER := 2.18.0
+
 ISA_L_CRYPTO_LIB_TARBALL := v$(ISA_L_CRYPTO_VER).tar.gz
 ISA_L_CRYPTO_LIB_TARBALL_URL := http://github.com/01org/isa-l_crypto/archive/$(ISA_L_CRYPTO_LIB_TARBALL)
 ISA_L_CRYPTO_LIB_SOURCE := $(B)/isa-l_crypto-$(ISA_L_CRYPTO_VER)
@@ -100,8 +110,10 @@ else
 DPDK_EXTRA_CFLAGS := -g -O0
 endif
 
+ifeq ($(ISA_L_CRYPTO_LIB),y)
 DPDK_EXTRA_CFLAGS += -I$(ISA_L_CRYPTO_INSTALL_DIR)/include -Wl,-z,muldefs
 DPDK_EXTRA_LDFLAGS += -L$(I)/lib
+endif
 DPDK_MAKE_EXTRA_ARGS += AESNI_MULTI_BUFFER_LIB_PATH=$(AESNIMB_LIB_SOURCE)
 
 # assemble DPDK make arguments
@@ -185,6 +197,8 @@ $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL):
 DPDK_DOWNLOADS = $(CURDIR)/$(DPDK_TARBALL)
 ifeq ($(AESNI),y)
 DPDK_DOWNLOADS += $(CURDIR)/$(AESNIMB_LIB_TARBALL)
+endif
+ifeq ($(ISA_L_CRYPTO_LIB),y)
 DPDK_DOWNLOADS += $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL)
 endif
 
@@ -205,10 +219,12 @@ $(B)/.extract.ok: $(B)/.download.ok
 ifeq ($(AESNI),y)
        @echo --- extracting $(AESNIMB_LIB_TARBALL) ---
        @tar --directory $(B) --extract --file $(CURDIR)/$(AESNIMB_LIB_TARBALL)
+endif
+ifeq ($(ISA_L_CRYPTO_LIB),y)
        @echo --- extracting $(ISA_L_CRYPTO_LIB_TARBALL) ---
        @tar --directory $(B) --extract --file $(CURDIR)/$(ISA_L_CRYPTO_LIB_TARBALL)
-       @touch $@
 endif
+       @touch $@
 
 .PHONY: extract
 extract: $(B)/.extract.ok
@@ -233,17 +249,11 @@ $(B)/.config.ok: $(B)/.patch.ok $(B)/custom-config
 .PHONY: config
 config: $(B)/.config.ok
 
-# Order matters
-ifeq ($(AESNI),y)
-BUILD_TARGETS += build-ipsec-mb build-isal-crypto build-dpdk
-else
-BUILD_TARGETS += build-dpdk
-endif
-
 .PHONY: build-ipsec-mb
 build-ipsec-mb:
        mkdir -p $(I)/lib/
-       make -C $(AESNIMB_LIB_SOURCE) -j NO_GCM=y
+       # Do not build GCM stuff if we are building ISA_L
+       make -C $(AESNIMB_LIB_SOURCE) -j NO_GCM=$(ISA_L_CRYPTO_LIB)
        cp $(AESNIMB_LIB_SOURCE)/libIPSec_MB.a $(I)/lib/
 
 .PHONY: build-isal-crypto
@@ -260,6 +270,15 @@ build-dpdk:
        @if [ ! -e $(B)/.config.ok ] ; then echo 'Please run "make config" first' && false ; fi
        @make $(DPDK_MAKE_ARGS) install
 
+# Order matters
+ifeq ($(AESNI),y)
+BUILD_TARGETS += build-ipsec-mb
+endif
+ifeq ($(ISA_L_CRYPTO_LIB),y)
+BUILD_TARGETS += build-isal-crypto
+endif
+BUILD_TARGETS += build-dpdk
+
 $(B)/.build.ok: $(BUILD_TARGETS)
        @touch $@
 
index 4c2d3b4..6b6d963 100644 (file)
@@ -97,6 +97,26 @@ AC_DEFUN([DPDK_IS_PMD_ENABLED],
   m4_append_uniq([list_of_with], [$2], [, ])
 ])
 
+AC_DEFUN([DETECT_DPDK_IS_1702_OR_1705],
+[
+  AC_MSG_CHECKING([for RTE_VERSION 17.02/17.05 in rte_version.h])
+  AC_TRY_RUN(
+    [
+    #include <rte_version.h>
+    int main()
+    {
+      return ((RTE_VER_YEAR != 17) ||
+             (RTE_VER_MONTH != 2 && RTE_VER_MONTH != 5));
+    }
+    ],
+    [dpdk_is_1702_or_1705=yes]
+    [AC_MSG_RESULT([yes])],
+    [dpdk_is_1702_or_1705=no]
+    [AC_MSG_RESULT([no])]
+  )
+  AM_CONDITIONAL(DPDK_IS_1702_OR_1705, test "$dpdk_is_1702_or_1705" = "yes")
+])
+
 ###############################################################################
 # configure arguments
 ###############################################################################
@@ -185,34 +205,64 @@ AM_COND_IF([ENABLE_DPDK_SHARED],
     [AC_MSG_ERROR([DPDK shared library not found])],)
 ])
 
+with_aesni_mb_lib=no
+with_isa_l_crypto_lib=no
+
 DPDK_IS_PMD_ENABLED(LIBRTE_PMD_AESNI_MB, dpdk_aesni_mb_pmd)
+DPDK_IS_PMD_ENABLED(LIBRTE_PMD_AESNI_GCM, dpdk_aesni_gcm_pmd)
+
+DETECT_DPDK_IS_1702_OR_1705()
+
 AM_COND_IF([WITH_DPDK_AESNI_MB_PMD],
 [
-  AC_CHECK_LIB([IPSec_MB], [submit_job_sse], [],
+  AC_CHECK_LIB([IPSec_MB], [submit_job_sse],
+              [with_aesni_mb_lib=yes],
               [AC_MSG_ERROR([IPSec_MB library not found])])
 ])
 
-DPDK_IS_PMD_ENABLED(LIBRTE_PMD_AESNI_GCM, dpdk_aesni_gcm_pmd)
 AM_COND_IF([WITH_DPDK_AESNI_GCM_PMD],
 [
-  AC_CHECK_LIB([isal_crypto], [aesni_gcm128_init], [],
-              [AC_MSG_ERROR([isal_crypto library not found])])
+  AM_COND_IF([DPDK_IS_1702_OR_1705],
+  [
+    AC_CHECK_LIB([isal_crypto], [aesni_gcm128_init],
+                [with_isa_l_crypto_lib=yes],
+                [AC_MSG_ERROR([isal_crypto library not found])])
+  ],
+  [
+    AC_CHECK_LIB([IPSec_MB], [submit_job_sse],
+                [with_aesni_mb_lib=yes],
+                [AC_MSG_ERROR([IPSec_MB library not found])])
+  ])
 ])
 
-DPDK_IS_PMD_ENABLED(LIBRTE_MLX5_PMD, dpdk_mlx5_pmd)
-AM_COND_IF([WITH_DPDK_MLX5_PMD],
+m4_append([list_of_with], [aesni_mb_lib], [, ])
+AM_CONDITIONAL(WITH_AESNI_MB_LIB, test "$with_aesni_mb_lib" = "yes")
+
+m4_append([list_of_with], [isa_l_crypto_lib], [, ])
+AM_CONDITIONAL(WITH_ISA_L_CRYPTO_LIB, test "$with_isa_l_crypto_lib" = "yes")
+
+
+with_ibverbs_lib=no
+DPDK_IS_PMD_ENABLED(LIBRTE_MLX4_PMD, dpdk_mlx4_pmd)
+AM_COND_IF([WITH_DPDK_MLX4_PMD],
 [
-  AC_CHECK_LIB([ibverbs], [ibv_fork_init], [],
+  AC_CHECK_LIB([ibverbs], [ibv_fork_init],
+              [with_ibverbs_lib=yes],
               [AC_MSG_ERROR([ibverbs library not found])])
 ])
 
-DPDK_IS_PMD_ENABLED(LIBRTE_MLX4_PMD, dpdk_mlx4_pmd)
-AM_COND_IF([WITH_DPDK_MLX4_PMD],
+DPDK_IS_PMD_ENABLED(LIBRTE_MLX5_PMD, dpdk_mlx5_pmd)
+AM_COND_IF([WITH_DPDK_MLX5_PMD],
 [
-  AC_CHECK_LIB([ibverbs], [ibv_fork_init], [],
+  AC_CHECK_LIB([ibverbs], [ibv_fork_init],
+              [with_ibverbs_lib=yes],
               [AC_MSG_ERROR([ibverbs library not found])])
 ])
 
+m4_append([list_of_with], [ibverbs_lib], [, ])
+AM_CONDITIONAL(WITH_IBVERBS_LIB, test "$with_ibverbs_lib" = "yes")
+
+
 AM_COND_IF([ENABLE_G2],
 [
   PKG_CHECK_MODULES(g2, gtk+-2.0)
index 3a1ffee..15195a2 100644 (file)
@@ -19,20 +19,24 @@ dpdk_plugin_la_LDFLAGS = $(AM_LDFLAGS) -ldpdk
 else
 dpdk_plugin_la_LDFLAGS = $(AM_LDFLAGS) -Wl,--whole-archive,-l:libdpdk.a,--no-whole-archive
 endif
-if WITH_DPDK_AESNI_MB_PMD
+if WITH_AESNI_MB_LIB
 dpdk_plugin_la_LDFLAGS += -Wl,--exclude-libs,libIPSec_MB.a,-l:libIPSec_MB.a
 endif
-if WITH_DPDK_AESNI_GCM_PMD
+if WITH_ISA_L_CRYPTO_LIB
 dpdk_plugin_la_LDFLAGS += -Wl,--exclude-libs,libisal_crypto.a,-l:libisal_crypto.a
 endif
-dpdk_plugin_la_LDFLAGS += -Wl,-lm,-ldl
-if WITH_DPDK_MLX5_PMD
+if WITH_IBVERBS_LIB
 dpdk_plugin_la_LDFLAGS += -Wl,-libverbs
 endif
-if WITH_DPDK_MLX4_PMD
-dpdk_plugin_la_LDFLAGS += -Wl,-libverbs
+if DPDK_IS_1702_OR_1705
+dpdk_plugin_la_CFLAGS = $(AM_CFLAGS) -DDPDK_VOID_CALLBACK=1 -DDPDK_NO_AEAD=1
+else
+dpdk_plugin_la_CFLAGS = $(AM_CFLAGS) -DDPDK_VOID_CALLBACK=0 -DDPDK_NO_AEAD=0
+dpdk_plugin_la_LDFLAGS += -Wl,-lnuma
 endif
 
+dpdk_plugin_la_LDFLAGS += -Wl,-lm,-ldl
+
 dpdk_plugin_la_SOURCES =                                       \
   dpdk/main.c                                                  \
   dpdk/buffer.c                                                        \
index df52c58..2707b4d 100644 (file)
@@ -181,9 +181,9 @@ dpdk_device_stop (dpdk_device_t * xd)
     }
 }
 
-void
-dpdk_port_state_callback (uint8_t port_id,
-                         enum rte_eth_event_type type, void *param)
+always_inline int
+dpdk_port_state_callback_inline (uint8_t port_id,
+                                enum rte_eth_event_type type, void *param)
 {
   struct rte_eth_link link;
   vlib_main_t *vm = vlib_get_main ();
@@ -193,7 +193,7 @@ dpdk_port_state_callback (uint8_t port_id,
   if (type != RTE_ETH_EVENT_INTR_LSC)
     {
       clib_warning ("Unknown event %d received for port %d", type, port_id);
-      return;
+      return -1;
     }
 
   rte_eth_link_get_nowait (port_id, &link);
@@ -238,8 +238,28 @@ dpdk_port_state_callback (uint8_t port_id,
       else
        clib_warning ("Port %d Link Down\n\n", port_id);
     }
+
+  return 0;
+}
+
+#if DPDK_VOID_CALLBACK
+void
+dpdk_port_state_callback (uint8_t port_id,
+                         enum rte_eth_event_type type, void *param)
+{
+  dpdk_port_state_callback_inline (port_id, type, param);
 }
 
+#else
+int
+dpdk_port_state_callback (uint8_t port_id,
+                         enum rte_eth_event_type type,
+                         void *param,
+                         void *ret_param __attribute__ ((unused)))
+{
+  return dpdk_port_state_callback_inline (port_id, type, param);
+}
+#endif
 /*
  * fd.io coding-style-patch-verification: ON
  *
index 29a2c76..1e34e3f 100644 (file)
@@ -418,8 +418,15 @@ typedef struct
 void dpdk_device_setup (dpdk_device_t * xd);
 void dpdk_device_start (dpdk_device_t * xd);
 void dpdk_device_stop (dpdk_device_t * xd);
+
+#if DPDK_VOID_CALLBACK
 void dpdk_port_state_callback (uint8_t port_id,
                               enum rte_eth_event_type type, void *param);
+#else
+int dpdk_port_state_callback (uint8_t port_id,
+                             enum rte_eth_event_type type,
+                             void *param, void *ret_param);
+#endif
 
 #define foreach_dpdk_error                                             \
   _(NONE, "no error")                                                  \
index a931406..a9cf250 100644 (file)
@@ -86,13 +86,28 @@ dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display)
          hash_foreach (key, data, cwm->algo_qp_map,
          ({
            cap.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+#if DPDK_NO_AEAD
            cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
            cap.sym.cipher.algo = p_key->cipher_algo;
+#else
+           if (p_key->is_aead)
+             {
+               cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+               cap.sym.aead.algo = p_key->cipher_algo;
+             }
+           else
+             {
+               cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+               cap.sym.cipher.algo = p_key->cipher_algo;
+             }
+#endif
            check_algo_is_supported (&cap, cipher_str);
+
            cap.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
            cap.sym.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
            cap.sym.auth.algo = p_key->auth_algo;
            check_algo_is_supported (&cap, auth_str);
+
            vlib_cli_output (vm, "%u\t%10s\t%15s\t%3s\t%u\t%u\n",
                             vlib_mains[i]->thread_index, cipher_str, auth_str,
                             p_key->is_outbound ? "out" : "in",
index 56f0c75..308a66a 100644 (file)
@@ -22,6 +22,9 @@
 typedef struct
 {
   enum rte_crypto_cipher_algorithm algo;
+#if ! DPDK_NO_AEAD
+  enum rte_crypto_aead_algorithm aead_algo;
+#endif
   u8 key_len;
   u8 iv_len;
 } dpdk_esp_crypto_alg_t;
@@ -65,7 +68,11 @@ dpdk_esp_init ()
   c->iv_len = 16;
 
   c = &em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_GCM_128];
+#if DPDK_NO_AEAD
   c->algo = RTE_CRYPTO_CIPHER_AES_GCM;
+#else
+  c->aead_algo = RTE_CRYPTO_AEAD_AES_GCM;
+#endif
   c->key_len = 16;
   c->iv_len = 8;
 
@@ -90,42 +97,68 @@ dpdk_esp_init ()
   i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
   i->algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
   i->trunc_size = 32;
-
+#if DPDK_NO_AEAD
   i = &em->esp_integ_algs[IPSEC_INTEG_ALG_AES_GCM_128];
   i->algo = RTE_CRYPTO_AUTH_AES_GCM;
   i->trunc_size = 16;
+#endif
 }
 
 static_always_inline int
 translate_crypto_algo (ipsec_crypto_alg_t crypto_algo,
-                      struct rte_crypto_sym_xform *cipher_xform)
+                      struct rte_crypto_sym_xform *xform, u8 use_esn)
 {
+#if ! DPDK_NO_AEAD
+  const u16 iv_off =
+    sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op) +
+    offsetof (dpdk_cop_priv_t, cb);
+#endif
+
+  xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
   switch (crypto_algo)
     {
     case IPSEC_CRYPTO_ALG_NONE:
-      cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+#if ! DPDK_NO_AEAD
+      xform->cipher.iv.offset = iv_off;
+      xform->cipher.iv.length = 0;
+#endif
+      xform->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
       break;
     case IPSEC_CRYPTO_ALG_AES_CBC_128:
     case IPSEC_CRYPTO_ALG_AES_CBC_192:
     case IPSEC_CRYPTO_ALG_AES_CBC_256:
-      cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+#if ! DPDK_NO_AEAD
+      xform->cipher.iv.offset = iv_off;
+      xform->cipher.iv.length = 16;
+#endif
+      xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
       break;
     case IPSEC_CRYPTO_ALG_AES_GCM_128:
-      cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+#if DPDK_NO_AEAD
+      xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+#else
+      xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
+      xform->aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+      xform->aead.iv.offset = iv_off;
+      xform->aead.iv.length = 12;      /* GCM IV, not ESP IV */
+      xform->aead.digest_length = 16;
+      xform->aead.aad_length = use_esn ? 12 : 8;
+#endif
       break;
     default:
       return -1;
     }
 
-  cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
-
   return 0;
 }
 
 static_always_inline int
 translate_integ_algo (ipsec_integ_alg_t integ_alg,
-                     struct rte_crypto_sym_xform *auth_xform, int use_esn)
+                     struct rte_crypto_sym_xform *auth_xform, u8 use_esn)
 {
+  auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
   switch (integ_alg)
     {
     case IPSEC_INTEG_ALG_NONE:
@@ -152,21 +185,21 @@ translate_integ_algo (ipsec_integ_alg_t integ_alg,
       auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
       auth_xform->auth.digest_length = 32;
       break;
+#if DPDK_NO_AEAD
     case IPSEC_INTEG_ALG_AES_GCM_128:
       auth_xform->auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
       auth_xform->auth.digest_length = 16;
       auth_xform->auth.add_auth_data_length = use_esn ? 12 : 8;
       break;
+#endif
     default:
       return -1;
     }
 
-  auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
-
   return 0;
 }
 
-static_always_inline int
+static_always_inline i32
 create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess,
                 u8 is_outbound)
 {
@@ -178,6 +211,10 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess,
   struct rte_crypto_sym_xform *xfs;
   uword key = 0, *data;
   crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key;
+#if ! DPDK_NO_AEAD
+  i32 socket_id = rte_socket_id ();
+  i32 ret;
+#endif
 
   if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
     {
@@ -190,15 +227,7 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess,
       sa->salt = random_u32 (&seed);
     }
 
-  cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
-  cipher_xform.cipher.key.data = sa->crypto_key;
-  cipher_xform.cipher.key.length = sa->crypto_key_len;
-
-  auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
-  auth_xform.auth.key.data = sa->integ_key;
-  auth_xform.auth.key.length = sa->integ_key_len;
-
-  if (translate_crypto_algo (sa->crypto_alg, &cipher_xform) < 0)
+  if (translate_crypto_algo (sa->crypto_alg, &cipher_xform, sa->use_esn) < 0)
     return -1;
   p_key->cipher_algo = cipher_xform.cipher.algo;
 
@@ -206,19 +235,44 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess,
     return -1;
   p_key->auth_algo = auth_xform.auth.algo;
 
-  if (is_outbound)
+#if ! DPDK_NO_AEAD
+  if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
     {
-      cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-      auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
-      cipher_xform.next = &auth_xform;
+      cipher_xform.aead.key.data = sa->crypto_key;
+      cipher_xform.aead.key.length = sa->crypto_key_len;
+
+      if (is_outbound)
+       cipher_xform.cipher.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+      else
+       cipher_xform.cipher.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
+      cipher_xform.next = NULL;
       xfs = &cipher_xform;
+      p_key->is_aead = 1;
     }
-  else
+  else                         /* Cipher + Auth */
+#endif
     {
-      cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
-      auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
-      auth_xform.next = &cipher_xform;
-      xfs = &auth_xform;
+      cipher_xform.cipher.key.data = sa->crypto_key;
+      cipher_xform.cipher.key.length = sa->crypto_key_len;
+
+      auth_xform.auth.key.data = sa->integ_key;
+      auth_xform.auth.key.length = sa->integ_key_len;
+
+      if (is_outbound)
+       {
+         cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+         auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+         cipher_xform.next = &auth_xform;
+         xfs = &cipher_xform;
+       }
+      else
+       {
+         cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+         auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
+         auth_xform.next = &cipher_xform;
+         xfs = &auth_xform;
+       }
+      p_key->is_aead = 0;
     }
 
   p_key->is_outbound = is_outbound;
@@ -227,17 +281,115 @@ create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess,
   if (!data)
     return -1;
 
+#if DPDK_NO_AEAD
   sa_sess->sess =
     rte_cryptodev_sym_session_create (cwm->qp_data[*data].dev_id, xfs);
-
   if (!sa_sess->sess)
     return -1;
+#else
+  sa_sess->sess =
+    rte_cryptodev_sym_session_create (dcm->sess_h_pools[socket_id]);
+  if (!sa_sess->sess)
+    return -1;
+
+  ret =
+    rte_cryptodev_sym_session_init (cwm->qp_data[*data].dev_id, sa_sess->sess,
+                                   xfs, dcm->sess_pools[socket_id]);
+  if (ret)
+    return -1;
+#endif
 
   sa_sess->qp_index = (u8) * data;
 
   return 0;
 }
 
+static_always_inline void
+crypto_set_icb (dpdk_gcm_cnt_blk * icb, u32 salt, u32 seq, u32 seq_hi)
+{
+  icb->salt = salt;
+  icb->iv[0] = seq;
+  icb->iv[1] = seq_hi;
+#if DPDK_NO_AEAD
+  icb->cnt = clib_host_to_net_u32 (1);
+#endif
+}
+
+#define __unused __attribute__((unused))
+static_always_inline void
+crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0,
+                struct rte_crypto_op *cop, void *session,
+                u32 cipher_off, u32 cipher_len,
+                u8 * icb __unused, u32 iv_size __unused,
+                u32 auth_off, u32 auth_len,
+                u8 * aad __unused, u32 aad_size __unused,
+                u8 * digest, u64 digest_paddr, u32 digest_size __unused)
+{
+  struct rte_crypto_sym_op *sym_cop;
+
+  sym_cop = (struct rte_crypto_sym_op *) (cop + 1);
+
+  sym_cop->m_src = mb0;
+  rte_crypto_op_attach_sym_session (cop, session);
+
+  if (!digest_paddr)
+    digest_paddr =
+      rte_pktmbuf_mtophys_offset (mb0, (uintptr_t) digest - (uintptr_t) mb0);
+
+#if DPDK_NO_AEAD
+  sym_cop->cipher.data.offset = cipher_off;
+  sym_cop->cipher.data.length = cipher_len;
+
+  sym_cop->cipher.iv.data = icb;
+  sym_cop->cipher.iv.phys_addr =
+    cop->phys_addr + (uintptr_t) icb - (uintptr_t) cop;
+  sym_cop->cipher.iv.length = iv_size;
+
+  if (is_aead)
+    {
+      sym_cop->auth.aad.data = aad;
+      sym_cop->auth.aad.phys_addr =
+       cop->phys_addr + (uintptr_t) aad - (uintptr_t) cop;
+      sym_cop->auth.aad.length = aad_size;
+    }
+  else
+    {
+      sym_cop->auth.data.offset = auth_off;
+      sym_cop->auth.data.length = auth_len;
+    }
+
+  sym_cop->auth.digest.data = digest;
+  sym_cop->auth.digest.phys_addr = digest_paddr;
+  sym_cop->auth.digest.length = digest_size;
+#else /* ! DPDK_NO_AEAD */
+  if (is_aead)
+    {
+      sym_cop->aead.data.offset = cipher_off;
+      sym_cop->aead.data.length = cipher_len;
+
+      sym_cop->aead.aad.data = aad;
+      sym_cop->aead.aad.phys_addr =
+       cop->phys_addr + (uintptr_t) aad - (uintptr_t) cop;
+
+      sym_cop->aead.digest.data = digest;
+      sym_cop->aead.digest.phys_addr = digest_paddr;
+    }
+  else
+    {
+      sym_cop->cipher.data.offset = cipher_off;
+      sym_cop->cipher.data.length = cipher_len;
+
+      sym_cop->auth.data.offset = auth_off;
+      sym_cop->auth.data.length = auth_len;
+
+      sym_cop->auth.digest.data = digest;
+      sym_cop->auth.digest.phys_addr = digest_paddr;
+    }
+#endif /* DPDK_NO_AEAD */
+}
+
+#undef __unused
+
 #endif /* __DPDK_ESP_H__ */
 
 /*
index 9377970..c4f295d 100644 (file)
@@ -44,8 +44,7 @@ typedef enum {
  _(NOT_IP, "Not IP packet (dropped)")           \
  _(ENQ_FAIL, "Enqueue failed (buffer full)")     \
  _(NO_CRYPTODEV, "Cryptodev not configured")     \
- _(BAD_LEN, "Invalid ciphertext length")         \
- _(UNSUPPORTED, "Cipher/Auth not supported")
+ _(BAD_LEN, "Invalid ciphertext length")
 
 
 typedef enum {
@@ -122,7 +121,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
 
       while (n_left_from > 0 && n_left_to_next > 0)
        {
-         u32 bi0, sa_index0 = ~0, seq, icv_size, iv_size;
+         u32 bi0, sa_index0 = ~0, seq, trunc_size, iv_size;
          vlib_buffer_t * b0;
          esp_header_t * esp0;
          ipsec_sa_t * sa0;
@@ -169,18 +168,6 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
 
          sa0->total_data_size += b0->current_length;
 
-         if (PREDICT_FALSE(sa0->integ_alg == IPSEC_INTEG_ALG_NONE) ||
-                 PREDICT_FALSE(sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE))
-           {
-             clib_warning ("SPI %u : only cipher + auth supported", sa0->spi);
-             vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
-                                          ESP_DECRYPT_ERROR_UNSUPPORTED, 1);
-             to_next[0] = bi0;
-             to_next += 1;
-             n_left_to_next -= 1;
-             goto trace;
-           }
-
          sa_sess = pool_elt_at_index(cwm->sa_sess_d[0], sa_index0);
 
          if (PREDICT_FALSE(!sa_sess->sess))
@@ -211,7 +198,10 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
 
          rte_crypto_op_attach_sym_session(cop, sess);
 
-         icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+         if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+           trunc_size = 16;
+         else
+           trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
          iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
 
          /* Convert vlib buffer to mbuf */
@@ -222,7 +212,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
 
          /* Outer IP header has already been stripped */
          u16 payload_len = rte_pktmbuf_pkt_len(mb0) - sizeof (esp_header_t) -
-             iv_size - icv_size;
+             iv_size - trunc_size;
 
          if ((payload_len & (BLOCK_SIZE - 1)) || (payload_len <= 0))
            {
@@ -242,84 +232,64 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
 
          struct rte_crypto_sym_op *sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
 
-         sym_cop->m_src = mb0;
-         sym_cop->cipher.data.offset = sizeof (esp_header_t) + iv_size;
-         sym_cop->cipher.data.length = payload_len;
+         u8 is_aead = sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128;
+         u32 cipher_off, cipher_len;
+         u32 auth_off = 0, auth_len = 0, aad_size = 0;
+         u8 *aad = NULL, *digest = NULL;
+         u64 digest_paddr = 0;
 
           u8 *iv = rte_pktmbuf_mtod_offset(mb0, void*, sizeof (esp_header_t));
-          dpdk_cop_priv_t * priv = (dpdk_cop_priv_t *)(sym_cop + 1);
+          dpdk_cop_priv_t *priv = (dpdk_cop_priv_t *)(sym_cop + 1);
+          dpdk_gcm_cnt_blk *icb = &priv->cb;
+
+         cipher_off = sizeof (esp_header_t) + iv_size;
+         cipher_len = payload_len;
 
-          if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+          digest =
+           vlib_buffer_get_current (b0) + sizeof(esp_header_t) +
+           iv_size + payload_len;
+
+          if (is_aead)
             {
-              dpdk_gcm_cnt_blk *icb = &priv->cb;
-              icb->salt = sa0->salt;
-              clib_memcpy(icb->iv, iv, 8);
-              icb->cnt = clib_host_to_net_u32(1);
-              sym_cop->cipher.iv.data = (u8 *)icb;
-              sym_cop->cipher.iv.phys_addr = cop->phys_addr +
-               (uintptr_t)icb - (uintptr_t)cop;
-              sym_cop->cipher.iv.length = 16;
-
-              u8 *aad = priv->aad;
-              clib_memcpy(aad, iv - sizeof(esp_header_t), 8);
-              sym_cop->auth.aad.data = aad;
-              sym_cop->auth.aad.phys_addr = cop->phys_addr +
-                  (uintptr_t)aad - (uintptr_t)cop;
-              if (sa0->use_esn)
-                {
-                  *((u32*)&aad[8]) = sa0->seq_hi;
-                  sym_cop->auth.aad.length = 12;
-                }
-              else
-                {
-                  sym_cop->auth.aad.length = 8;
-                }
+             u32 *_iv = (u32 *) iv;
 
-              sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
-                       rte_pktmbuf_pkt_len(mb0) - icv_size);
-              sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
-                       rte_pktmbuf_pkt_len(mb0) - icv_size);
-              sym_cop->auth.digest.length = icv_size;
+             crypto_set_icb (icb, sa0->salt, _iv[0], _iv[1]);
+             iv_size = 16;
 
+              aad = priv->aad;
+              clib_memcpy(aad, esp0, 8);
+             aad_size = 8;
+              if (sa0->use_esn)
+               {
+                 *((u32*)&aad[8]) = sa0->seq_hi;
+                 aad_size = 12;
+               }
             }
           else
             {
-              sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(mb0, void*,
-                       sizeof (esp_header_t));
-              sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
-                       sizeof (esp_header_t));
-              sym_cop->cipher.iv.length = iv_size;
+             clib_memcpy(icb, iv, 16);
+
+             auth_off = 0;
+             auth_len = sizeof(esp_header_t) + iv_size + payload_len;
 
               if (sa0->use_esn)
                 {
                   dpdk_cop_priv_t* priv = (dpdk_cop_priv_t*) (sym_cop + 1);
-                  u8* payload_end = rte_pktmbuf_mtod_offset(
-                      mb0, u8*, sizeof(esp_header_t) + iv_size + payload_len);
-
-                  clib_memcpy (priv->icv, payload_end, icv_size);
-                  *((u32*) payload_end) = sa0->seq_hi;
-                  sym_cop->auth.data.offset = 0;
-                  sym_cop->auth.data.length = sizeof(esp_header_t) + iv_size
-                      + payload_len + sizeof(sa0->seq_hi);
-                  sym_cop->auth.digest.data = priv->icv;
-                  sym_cop->auth.digest.phys_addr = cop->phys_addr
-                      + (uintptr_t) priv->icv - (uintptr_t) cop;
-                  sym_cop->auth.digest.length = icv_size;
-                }
-              else
-                {
-                  sym_cop->auth.data.offset = 0;
-                  sym_cop->auth.data.length = sizeof(esp_header_t) +
-                           iv_size + payload_len;
-
-                  sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
-                           rte_pktmbuf_pkt_len(mb0) - icv_size);
-                  sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
-                           rte_pktmbuf_pkt_len(mb0) - icv_size);
-                  sym_cop->auth.digest.length = icv_size;
+
+                  clib_memcpy (priv->icv, digest, trunc_size);
+                  *((u32*) digest) = sa0->seq_hi;
+                 auth_len += sizeof(sa0->seq_hi);
+
+                  digest = priv->icv;
+                 digest_paddr =
+                   cop->phys_addr + (uintptr_t) priv->icv - (uintptr_t) cop;
                 }
             }
 
+         crypto_op_setup (is_aead, mb0, cop, sess,
+                          cipher_off, cipher_len, (u8 *) icb, iv_size,
+                          auth_off, auth_len, aad, aad_size,
+                          digest, digest_paddr, trunc_size);
 trace:
          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
@@ -339,6 +309,9 @@ trace:
     {
       u32 enq;
 
+      if (!n_cop_qp[i])
+       continue;
+
       qpd = vec_elt_at_index(cwm->qp_data, i);
       enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
                                        qpd->cops, n_cop_qp[i]);
@@ -433,7 +406,7 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
       while (n_left_from > 0 && n_left_to_next > 0)
        {
          esp_footer_t * f0;
-         u32 bi0, next0, icv_size, iv_size;
+         u32 bi0, next0, trunc_size, iv_size;
          vlib_buffer_t * b0 = 0;
          ip4_header_t *ih4 = 0, *oh4 = 0;
          ip6_header_t *ih6 = 0, *oh6 = 0;
@@ -455,7 +428,10 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
          to_next[0] = bi0;
          to_next += 1;
 
-         icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+         if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+           trunc_size = 16;
+         else
+           trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
          iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
 
          if (sa0->use_anti_replay)
@@ -472,7 +448,7 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
          ih4 = (ip4_header_t *) (b0->data + sizeof(ethernet_header_t));
          vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size);
 
-         b0->current_length -= (icv_size + 2);
+         b0->current_length -= (trunc_size + 2);
          b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
          f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (b0) +
                                 b0->current_length);
index ac552f6..6de444f 100644 (file)
@@ -43,8 +43,7 @@ typedef enum
  _(RX_PKTS, "ESP pkts received")                    \
  _(SEQ_CYCLED, "sequence number cycled")            \
  _(ENQ_FAIL, "Enqueue failed (buffer full)")        \
- _(NO_CRYPTODEV, "Cryptodev not configured")        \
- _(UNSUPPORTED, "Cipher/Auth not supported")
+ _(NO_CRYPTODEV, "Cryptodev not configured")
 
 
 typedef enum
@@ -142,6 +141,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
          const int BLOCK_SIZE = 16;
          u32 iv_size;
          u16 orig_sz;
+         u8 trunc_size;
          crypto_sa_session_t *sa_sess;
          void *sess;
          struct rte_crypto_op *cop = 0;
@@ -199,6 +199,11 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
 
          ssize_t adv;
          iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
+         if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+           trunc_size = 16;
+         else
+           trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+
          ih0 = vlib_buffer_get_current (b0);
          orig_sz = b0->current_length;
          is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60;
@@ -314,9 +319,6 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
              transport_mode = 1;
            }
 
-         ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
-         ASSERT (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE);
-
          int blocks = 1 + (orig_sz + 1) / BLOCK_SIZE;
 
          /* pad packet in input buffer */
@@ -330,8 +332,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
          f0 = vlib_buffer_get_current (b0) + b0->current_length + pad_bytes;
          f0->pad_length = pad_bytes;
          f0->next_header = next_hdr_type;
-         b0->current_length += pad_bytes + 2 +
-           em->esp_integ_algs[sa0->integ_alg].trunc_size;
+         b0->current_length += pad_bytes + 2 + trunc_size;
 
          vnet_buffer (b0)->sw_if_index[VLIB_RX] =
            vnet_buffer (b0)->sw_if_index[VLIB_RX];
@@ -349,88 +350,64 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
          mb0->pkt_len = b0->current_length;
          mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
 
-         rte_crypto_op_attach_sym_session (cop, sess);
+         dpdk_gcm_cnt_blk *icb = &priv->cb;
 
-         sym_cop->m_src = mb0;
+         crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi);
 
-         dpdk_gcm_cnt_blk *icb = &priv->cb;
-         icb->salt = sa0->salt;
-         icb->iv[0] = sa0->seq;
-         icb->iv[1] = sa0->seq_hi;
-         icb->cnt = clib_host_to_net_u32 (1);
+         u8 is_aead = sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128;
+         u32 cipher_off, cipher_len;
+         u32 auth_off = 0, auth_len = 0, aad_size = 0;
+         u8 *aad = NULL, *digest = NULL;
 
-         if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+         if (is_aead)
            {
              u32 *esp_iv =
                (u32 *) (b0->data + b0->current_data + ip_hdr_size +
                         sizeof (esp_header_t));
              esp_iv[0] = sa0->seq;
              esp_iv[1] = sa0->seq_hi;
-             sym_cop->cipher.data.offset =
-               ip_hdr_size + sizeof (esp_header_t) + iv_size;
-             sym_cop->cipher.data.length = BLOCK_SIZE * blocks;
-             sym_cop->cipher.iv.length = 16;
-           }
-         else
-           {
-             sym_cop->cipher.data.offset =
-               ip_hdr_size + sizeof (esp_header_t);
-             sym_cop->cipher.data.length = BLOCK_SIZE * blocks + iv_size;
-             sym_cop->cipher.iv.length = iv_size;
-           }
 
-         sym_cop->cipher.iv.data = (u8 *) icb;
-         sym_cop->cipher.iv.phys_addr = cop->phys_addr + (uintptr_t) icb
-           - (uintptr_t) cop;
+             cipher_off = ip_hdr_size + sizeof (esp_header_t) + iv_size;
+             cipher_len = BLOCK_SIZE * blocks;
+             iv_size = 16;     /* GCM IV size, not ESP IV size */
 
-
-         ASSERT (sa0->integ_alg < IPSEC_INTEG_N_ALG);
-         ASSERT (sa0->integ_alg != IPSEC_INTEG_ALG_NONE);
-
-         if (PREDICT_FALSE (sa0->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128))
-           {
-             u8 *aad = priv->aad;
+             aad = priv->aad;
              clib_memcpy (aad, vlib_buffer_get_current (b0) + ip_hdr_size,
                           8);
-             sym_cop->auth.aad.data = aad;
-             sym_cop->auth.aad.phys_addr = cop->phys_addr +
-               (uintptr_t) aad - (uintptr_t) cop;
-
+             aad_size = 8;
              if (PREDICT_FALSE (sa0->use_esn))
                {
                  *((u32 *) & aad[8]) = sa0->seq_hi;
-                 sym_cop->auth.aad.length = 12;
-               }
-             else
-               {
-                 sym_cop->auth.aad.length = 8;
+                 aad_size = 12;
                }
+
+             digest =
+               vlib_buffer_get_current (b0) + b0->current_length -
+               trunc_size;
            }
          else
            {
-             sym_cop->auth.data.offset = ip_hdr_size;
-             sym_cop->auth.data.length = b0->current_length - ip_hdr_size
-               - em->esp_integ_algs[sa0->integ_alg].trunc_size;
+             cipher_off = ip_hdr_size + sizeof (esp_header_t);
+             cipher_len = BLOCK_SIZE * blocks + iv_size;
+
+             auth_off = ip_hdr_size;
+             auth_len = b0->current_length - ip_hdr_size - trunc_size;
+
+             digest =
+               vlib_buffer_get_current (b0) + b0->current_length -
+               trunc_size;
 
              if (PREDICT_FALSE (sa0->use_esn))
                {
-                 u8 *payload_end =
-                   vlib_buffer_get_current (b0) + b0->current_length;
-                 *((u32 *) payload_end) = sa0->seq_hi;
-                 sym_cop->auth.data.length += sizeof (sa0->seq_hi);
+                 *((u32 *) digest) = sa0->seq_hi;
+                 auth_len += sizeof (sa0->seq_hi);
                }
            }
-         sym_cop->auth.digest.data = vlib_buffer_get_current (b0) +
-           b0->current_length -
-           em->esp_integ_algs[sa0->integ_alg].trunc_size;
-         sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset (mb0,
-                                                                      b0->current_length
-                                                                      -
-                                                                      em->esp_integ_algs
-                                                                      [sa0->integ_alg].trunc_size);
-         sym_cop->auth.digest.length =
-           em->esp_integ_algs[sa0->integ_alg].trunc_size;
 
+         crypto_op_setup (is_aead, mb0, cop, sess,
+                          cipher_off, cipher_len, (u8 *) icb, iv_size,
+                          auth_off, auth_len, aad, aad_size,
+                          digest, 0, trunc_size);
 
          if (PREDICT_FALSE (is_ipv6))
            {
@@ -470,6 +447,9 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
     {
       u32 enq;
 
+      if (!n_cop_qp[i])
+       continue;
+
       qpd = vec_elt_at_index(cwm->qp_data, i);
       enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
                                        qpd->cops, n_cop_qp[i]);
index 7066564..c922940 100644 (file)
@@ -56,18 +56,23 @@ add_del_sa_sess (u32 sa_index, u8 is_add)
          else
            {
              u8 dev_id;
+             i32 ret;
 
              sa_sess = pool_elt_at_index (cwm->sa_sess_d[is_outbound], sa_index);
              dev_id = cwm->qp_data[sa_sess->qp_index].dev_id;
 
              if (!sa_sess->sess)
                continue;
-
-             if (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess))
-               {
-                 clib_warning("failed to free session");
-                 return -1;
-               }
+#if DPDK_NO_AEAD
+             ret = (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess) == NULL);
+             ASSERT (ret);
+#else
+             ret = rte_cryptodev_sym_session_clear(dev_id, sa_sess->sess);
+             ASSERT (!ret);
+
+             ret = rte_cryptodev_sym_session_free(sa_sess->sess);
+             ASSERT (!ret);
+#endif
              memset(sa_sess, 0, sizeof(sa_sess[0]));
            }
        }
@@ -94,7 +99,7 @@ update_qp_data (crypto_worker_main_t * cwm,
     }
   /* *INDENT-ON* */
 
-  vec_add2 (cwm->qp_data, qpd, 1);
+  vec_add2_aligned (cwm->qp_data, qpd, 1, CLIB_CACHE_LINE_BYTES);
 
   qpd->dev_id = cdev_id;
   qpd->qp_id = qp_id;
@@ -119,6 +124,9 @@ add_mapping (crypto_worker_main_t * cwm,
   p_key->cipher_algo = (u8) cipher_cap->sym.cipher.algo;
   p_key->auth_algo = (u8) auth_cap->sym.auth.algo;
   p_key->is_outbound = is_outbound;
+#if ! DPDK_NO_AEAD
+  p_key->is_aead = cipher_cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD;
+#endif
 
   ret = hash_get (cwm->algo_qp_map, key);
   if (ret)
@@ -147,6 +155,20 @@ add_cdev_mapping (crypto_worker_main_t * cwm,
 
   for (i = dev_info->capabilities; i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++)
     {
+#if ! DPDK_NO_AEAD
+      if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD)
+       {
+         struct rte_cryptodev_capabilities none = { 0 };
+
+         if (check_algo_is_supported (i, NULL) != 0)
+           continue;
+
+         none.sym.auth.algo = RTE_CRYPTO_AUTH_NULL;
+
+         mapped |= add_mapping (cwm, cdev_id, qp, is_outbound, i, &none);
+         continue;
+       }
+#endif
       if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
        continue;
 
@@ -205,17 +227,23 @@ dpdk_ipsec_check_support (ipsec_sa_t * sa)
     {
       if (sa->integ_alg != IPSEC_INTEG_ALG_NONE)
        return clib_error_return (0, "unsupported integ-alg %U with "
-                                 "crypto-algo aes-gcm-128",
+                                 "crypto-alg aes-gcm-128",
                                  format_ipsec_integ_alg, sa->integ_alg);
+#if DPDK_NO_AEAD
       sa->integ_alg = IPSEC_INTEG_ALG_AES_GCM_128;
+#endif
     }
-  else
-    {
-      if (sa->integ_alg == IPSEC_INTEG_ALG_NONE ||
-         sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128)
-       return clib_error_return (0, "unsupported integ-alg %U",
-                                 format_ipsec_integ_alg, sa->integ_alg);
-    }
+#if DPDK_NO_AEAD
+  else if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE ||
+          sa->integ_alg == IPSEC_INTEG_ALG_NONE ||
+          sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128)
+#else
+  else if (sa->integ_alg == IPSEC_INTEG_ALG_NONE)
+#endif
+    return clib_error_return (0,
+                             "unsupported integ-alg %U with crypto-alg %U",
+                             format_ipsec_integ_alg, sa->integ_alg,
+                             format_ipsec_crypto_alg, sa->crypto_alg);
 
   return 0;
 }
@@ -233,6 +261,10 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
   struct rte_mempool *rmp;
   i32 dev_id, ret;
   u32 i, skip_master;
+#if ! DPDK_NO_AEAD
+  u32 max_sess_size = 0, sess_size;
+  i8 socket_id;
+#endif
 
   if (check_cryptodev_queues () < 0)
     {
@@ -297,9 +329,10 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
 
       dev_conf.socket_id = rte_cryptodev_socket_id (dev_id);
       dev_conf.nb_queue_pairs = cdev_info.max_nb_queue_pairs;
+#if DPDK_NO_AEAD
       dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_SESS_OBJS;
       dev_conf.session_mp.cache_size = DPDK_CRYPTO_CACHE_SIZE;
-
+#endif
       ret = rte_cryptodev_configure (dev_id, &dev_conf);
       if (ret < 0)
        {
@@ -310,16 +343,26 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
       qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC;
       for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
        {
+#if DPDK_NO_AEAD
          ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf,
                                                dev_conf.socket_id);
+#else
+         ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf,
+                                               dev_conf.socket_id, NULL);
+#endif
          if (ret < 0)
            {
              clib_warning ("cryptodev %u qp %u setup error", dev_id, qp);
              goto error;
            }
        }
-      vec_validate_aligned (dcm->cop_pools, dev_conf.socket_id,
-                           CLIB_CACHE_LINE_BYTES);
+      vec_validate (dcm->cop_pools, dev_conf.socket_id);
+
+#if ! DPDK_NO_AEAD
+      sess_size = rte_cryptodev_get_private_session_size (dev_id);
+      if (sess_size > max_sess_size)
+       max_sess_size = sess_size;
+#endif
 
       if (!vec_elt (dcm->cop_pools, dev_conf.socket_id))
        {
@@ -333,14 +376,14 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
                                           DPDK_CRYPTO_CACHE_SIZE,
                                           DPDK_CRYPTO_PRIV_SIZE,
                                           dev_conf.socket_id);
-         vec_free (pool_name);
 
          if (!rmp)
            {
-             clib_warning ("failed to allocate mempool on socket %u",
-                           dev_conf.socket_id);
+             clib_warning ("failed to allocate %s", pool_name);
+             vec_free (pool_name);
              goto error;
            }
+         vec_free (pool_name);
          vec_elt (dcm->cop_pools, dev_conf.socket_id) = rmp;
        }
 
@@ -348,6 +391,51 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
               DPDK_CRYPTO_NB_SESS_OBJS, DPDK_CRYPTO_CACHE_SIZE);
     }
 
+#if ! DPDK_NO_AEAD
+  /* *INDENT-OFF* */
+  vec_foreach_index (socket_id, dcm->cop_pools)
+    {
+      u8 *pool_name;
+
+      if (!vec_elt (dcm->cop_pools, socket_id))
+       continue;
+
+      vec_validate (dcm->sess_h_pools, socket_id);
+      pool_name = format (0, "crypto_sess_h_socket%u%c",
+                             socket_id, 0);
+      rmp =
+       rte_mempool_create((i8 *)pool_name, DPDK_CRYPTO_NB_SESS_OBJS,
+                          rte_cryptodev_get_header_session_size (),
+                          512, 0, NULL, NULL, NULL, NULL,
+                          socket_id, 0);
+      if (!rmp)
+       {
+         clib_warning ("failed to allocate %s", pool_name);
+         vec_free (pool_name);
+         goto error;
+       }
+      vec_free (pool_name);
+      vec_elt (dcm->sess_h_pools, socket_id) = rmp;
+
+      vec_validate (dcm->sess_pools, socket_id);
+      pool_name = format (0, "crypto_sess_socket%u%c",
+                             socket_id, 0);
+      rmp =
+       rte_mempool_create((i8 *)pool_name, DPDK_CRYPTO_NB_SESS_OBJS,
+                          max_sess_size, 512, 0, NULL, NULL, NULL, NULL,
+                          socket_id, 0);
+      if (!rmp)
+       {
+         clib_warning ("failed to allocate %s", pool_name);
+         vec_free (pool_name);
+         goto error;
+       }
+      vec_free (pool_name);
+      vec_elt (dcm->sess_pools, socket_id) = rmp;
+    }
+  /* *INDENT-ON* */
+#endif
+
   dpdk_esp_init ();
 
   /* Add new next node and set as default */
index d794034..a94dd68 100644 (file)
@@ -53,6 +53,7 @@ typedef struct
   u8 cipher_algo;
   u8 auth_algo;
   u8 is_outbound;
+  u8 is_aead;
 } crypto_worker_qp_key_t;
 
 typedef struct
@@ -81,6 +82,8 @@ typedef struct
 
 typedef struct
 {
+  struct rte_mempool **sess_h_pools;
+  struct rte_mempool **sess_pools;
   struct rte_mempool **cop_pools;
   crypto_worker_main_t *workers_main;
   u8 enabled;
@@ -146,12 +149,14 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
 {
   struct
   {
-    uint8_t cipher_algo;
     enum rte_crypto_sym_xform_type type;
     union
     {
       enum rte_crypto_auth_algorithm auth;
       enum rte_crypto_cipher_algorithm cipher;
+#if ! DPDK_NO_AEAD
+      enum rte_crypto_aead_algorithm aead;
+#endif
     };
     char *name;
   } supported_algo[] =
@@ -162,15 +167,18 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
     {
     .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
        RTE_CRYPTO_CIPHER_AES_CBC,.name = "AES_CBC"},
+#if DPDK_NO_AEAD
     {
     .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
-       RTE_CRYPTO_CIPHER_AES_CTR,.name = "AES_CTR"},
+       RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"},
+#else
     {
-    .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
-       RTE_CRYPTO_CIPHER_3DES_CBC,.name = "3DES-CBC"},
+    .type = RTE_CRYPTO_SYM_XFORM_AEAD,.aead =
+       RTE_CRYPTO_AEAD_AES_GCM,.name = "AES-GCM"},
+#endif
     {
-    .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
-       RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"},
+    .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
+       RTE_CRYPTO_AUTH_NULL,.name = "NULL"},
     {
     .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
        RTE_CRYPTO_AUTH_SHA1_HMAC,.name = "HMAC-SHA1"},
@@ -183,15 +191,16 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
     {
     .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
        RTE_CRYPTO_AUTH_SHA512_HMAC,.name = "HMAC-SHA512"},
-    {
-    .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
-       RTE_CRYPTO_AUTH_AES_XCBC_MAC,.name = "AES-XCBC-MAC"},
+#if DPDK_NO_AEAD
     {
     .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
        RTE_CRYPTO_AUTH_AES_GCM,.name = "AES-GCM"},
+#endif
     {
       /* tail */
-  .type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED},};
+    .type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED}
+  };
+
   uint32_t i = 0;
 
   if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
@@ -203,6 +212,10 @@ check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
        {
          if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
               cap->sym.cipher.algo == supported_algo[i].cipher) ||
+#if ! DPDK_NO_AEAD
+             (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+              cap->sym.aead.algo == supported_algo[i].aead) ||
+#endif
              (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH &&
               cap->sym.auth.algo == supported_algo[i].auth))
            {