dpdk: required changes for 17.08
[vpp.git] / src / plugins / dpdk / ipsec / esp_encrypt.c
index 73d9880..6de444f 100644 (file)
@@ -43,8 +43,7 @@ typedef enum
  _(RX_PKTS, "ESP pkts received")                    \
  _(SEQ_CYCLED, "sequence number cycled")            \
  _(ENQ_FAIL, "Enqueue failed (buffer full)")        \
- _(NO_CRYPTODEV, "Cryptodev not configured")        \
- _(UNSUPPORTED, "Cipher/Auth not supported")
+ _(NO_CRYPTODEV, "Cryptodev not configured")
 
 
 typedef enum
@@ -142,6 +141,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
          const int BLOCK_SIZE = 16;
          u32 iv_size;
          u16 orig_sz;
+         u8 trunc_size;
          crypto_sa_session_t *sa_sess;
          void *sess;
          struct rte_crypto_op *cop = 0;
@@ -199,6 +199,11 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
 
          ssize_t adv;
          iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
+         if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+           trunc_size = 16;
+         else
+           trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
+
          ih0 = vlib_buffer_get_current (b0);
          orig_sz = b0->current_length;
          is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60;
@@ -314,9 +319,6 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
              transport_mode = 1;
            }
 
-         ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
-         ASSERT (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE);
-
          int blocks = 1 + (orig_sz + 1) / BLOCK_SIZE;
 
          /* pad packet in input buffer */
@@ -330,8 +332,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
          f0 = vlib_buffer_get_current (b0) + b0->current_length + pad_bytes;
          f0->pad_length = pad_bytes;
          f0->next_header = next_hdr_type;
-         b0->current_length += pad_bytes + 2 +
-           em->esp_integ_algs[sa0->integ_alg].trunc_size;
+         b0->current_length += pad_bytes + 2 + trunc_size;
 
          vnet_buffer (b0)->sw_if_index[VLIB_RX] =
            vnet_buffer (b0)->sw_if_index[VLIB_RX];
@@ -349,88 +350,64 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
          mb0->pkt_len = b0->current_length;
          mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
 
-         rte_crypto_op_attach_sym_session (cop, sess);
+         dpdk_gcm_cnt_blk *icb = &priv->cb;
 
-         sym_cop->m_src = mb0;
+         crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi);
 
-         dpdk_gcm_cnt_blk *icb = &priv->cb;
-         icb->salt = sa0->salt;
-         icb->iv[0] = sa0->seq;
-         icb->iv[1] = sa0->seq_hi;
-         icb->cnt = clib_host_to_net_u32 (1);
+         u8 is_aead = sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128;
+         u32 cipher_off, cipher_len;
+         u32 auth_off = 0, auth_len = 0, aad_size = 0;
+         u8 *aad = NULL, *digest = NULL;
 
-         if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
+         if (is_aead)
            {
              u32 *esp_iv =
-               (u32 *) vlib_buffer_get_current (b0) + ip_hdr_size +
-               sizeof (esp_header_t);
+               (u32 *) (b0->data + b0->current_data + ip_hdr_size +
+                        sizeof (esp_header_t));
              esp_iv[0] = sa0->seq;
              esp_iv[1] = sa0->seq_hi;
-             sym_cop->cipher.data.offset =
-               ip_hdr_size + sizeof (esp_header_t) + iv_size;
-             sym_cop->cipher.data.length = BLOCK_SIZE * blocks;
-             sym_cop->cipher.iv.length = 16;
-           }
-         else
-           {
-             sym_cop->cipher.data.offset =
-               ip_hdr_size + sizeof (esp_header_t);
-             sym_cop->cipher.data.length = BLOCK_SIZE * blocks + iv_size;
-             sym_cop->cipher.iv.length = iv_size;
-           }
 
-         sym_cop->cipher.iv.data = (u8 *) icb;
-         sym_cop->cipher.iv.phys_addr = cop->phys_addr + (uintptr_t) icb
-           - (uintptr_t) cop;
+             cipher_off = ip_hdr_size + sizeof (esp_header_t) + iv_size;
+             cipher_len = BLOCK_SIZE * blocks;
+             iv_size = 16;     /* GCM IV size, not ESP IV size */
 
-
-         ASSERT (sa0->integ_alg < IPSEC_INTEG_N_ALG);
-         ASSERT (sa0->integ_alg != IPSEC_INTEG_ALG_NONE);
-
-         if (PREDICT_FALSE (sa0->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128))
-           {
-             u8 *aad = priv->aad;
+             aad = priv->aad;
              clib_memcpy (aad, vlib_buffer_get_current (b0) + ip_hdr_size,
                           8);
-             sym_cop->auth.aad.data = aad;
-             sym_cop->auth.aad.phys_addr = cop->phys_addr +
-               (uintptr_t) aad - (uintptr_t) cop;
-
+             aad_size = 8;
              if (PREDICT_FALSE (sa0->use_esn))
                {
                  *((u32 *) & aad[8]) = sa0->seq_hi;
-                 sym_cop->auth.aad.length = 12;
-               }
-             else
-               {
-                 sym_cop->auth.aad.length = 8;
+                 aad_size = 12;
                }
+
+             digest =
+               vlib_buffer_get_current (b0) + b0->current_length -
+               trunc_size;
            }
          else
            {
-             sym_cop->auth.data.offset = ip_hdr_size;
-             sym_cop->auth.data.length = b0->current_length - ip_hdr_size
-               - em->esp_integ_algs[sa0->integ_alg].trunc_size;
+             cipher_off = ip_hdr_size + sizeof (esp_header_t);
+             cipher_len = BLOCK_SIZE * blocks + iv_size;
+
+             auth_off = ip_hdr_size;
+             auth_len = b0->current_length - ip_hdr_size - trunc_size;
+
+             digest =
+               vlib_buffer_get_current (b0) + b0->current_length -
+               trunc_size;
 
              if (PREDICT_FALSE (sa0->use_esn))
                {
-                 u8 *payload_end =
-                   vlib_buffer_get_current (b0) + b0->current_length;
-                 *((u32 *) payload_end) = sa0->seq_hi;
-                 sym_cop->auth.data.length += sizeof (sa0->seq_hi);
+                 *((u32 *) digest) = sa0->seq_hi;
+                 auth_len += sizeof (sa0->seq_hi);
                }
            }
-         sym_cop->auth.digest.data = vlib_buffer_get_current (b0) +
-           b0->current_length -
-           em->esp_integ_algs[sa0->integ_alg].trunc_size;
-         sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset (mb0,
-                                                                      b0->current_length
-                                                                      -
-                                                                      em->esp_integ_algs
-                                                                      [sa0->integ_alg].trunc_size);
-         sym_cop->auth.digest.length =
-           em->esp_integ_algs[sa0->integ_alg].trunc_size;
 
+         crypto_op_setup (is_aead, mb0, cop, sess,
+                          cipher_off, cipher_len, (u8 *) icb, iv_size,
+                          auth_off, auth_len, aad, aad_size,
+                          digest, 0, trunc_size);
 
          if (PREDICT_FALSE (is_ipv6))
            {
@@ -470,6 +447,9 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
     {
       u32 enq;
 
+      if (!n_cop_qp[i])
+       continue;
+
       qpd = vec_elt_at_index(cwm->qp_data, i);
       enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
                                        qpd->cops, n_cop_qp[i]);