New upstream version 16.11.3
[deb_dpdk.git] / examples / l2fwd-crypto / main.c
index 8dc616d..b40c49c 100644 (file)
@@ -45,6 +45,8 @@
 #include <ctype.h>
 #include <errno.h>
 #include <getopt.h>
+#include <fcntl.h>
+#include <unistd.h>
 
 #include <rte_atomic.h>
 #include <rte_branch_prediction.h>
@@ -70,7 +72,6 @@
 #include <rte_per_lcore.h>
 #include <rte_prefetch.h>
 #include <rte_random.h>
-#include <rte_ring.h>
 #include <rte_hexdump.h>
 
 enum cdev_type {
@@ -199,7 +200,7 @@ struct lcore_queue_conf {
        unsigned nb_crypto_devs;
        unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
 
-       struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+       struct op_buffer op_buf[RTE_CRYPTO_MAX_DEVS];
        struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
 } __rte_cache_aligned;
 
@@ -214,7 +215,7 @@ static const struct rte_eth_conf port_conf = {
                .hw_ip_checksum = 0, /**< IP checksum offload disabled */
                .hw_vlan_filter = 0, /**< VLAN filtering disabled */
                .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
-               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+               .hw_strip_crc   = 1, /**< CRC stripped by hardware */
        },
        .txmode = {
                .mq_mode = ETH_MQ_TX_NONE,
@@ -243,7 +244,7 @@ struct l2fwd_crypto_statistics {
 } __rte_cache_aligned;
 
 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
-struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS];
+struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS];
 
 /* A tsc-based timer responsible for triggering statistics printout */
 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
@@ -298,7 +299,7 @@ print_stats(void)
 
        for (cdevid = 0; cdevid < RTE_CRYPTO_MAX_DEVS; cdevid++) {
                /* skip disabled ports */
-               if ((l2fwd_enabled_crypto_mask & (1lu << cdevid)) == 0)
+               if ((l2fwd_enabled_crypto_mask & (((uint64_t)1) << cdevid)) == 0)
                        continue;
                printf("\nStatistics for cryptodev %"PRIu64
                                " -------------------------"
@@ -339,16 +340,24 @@ fill_supported_algorithm_tables(void)
                strcpy(supported_auth_algo[i], "NOT_SUPPORTED");
 
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GCM], "AES_GCM");
+       strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GMAC], "AES_GMAC");
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5_HMAC], "MD5_HMAC");
+       strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5], "MD5");
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_NULL], "NULL");
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_XCBC_MAC],
                "AES_XCBC_MAC");
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1_HMAC], "SHA1_HMAC");
+       strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1], "SHA1");
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224_HMAC], "SHA224_HMAC");
+       strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224], "SHA224");
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256_HMAC], "SHA256_HMAC");
+       strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256], "SHA256");
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384_HMAC], "SHA384_HMAC");
+       strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384], "SHA384");
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512_HMAC], "SHA512_HMAC");
+       strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512], "SHA512");
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SNOW3G_UIA2], "SNOW3G_UIA2");
+       strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_ZUC_EIA3], "ZUC_EIA3");
        strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_KASUMI_F9], "KASUMI_F9");
 
        for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++)
@@ -359,7 +368,10 @@ fill_supported_algorithm_tables(void)
        strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_GCM], "AES_GCM");
        strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_NULL], "NULL");
        strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_SNOW3G_UEA2], "SNOW3G_UEA2");
+       strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_ZUC_EEA3], "ZUC_EEA3");
        strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_KASUMI_F8], "KASUMI_F8");
+       strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CTR], "3DES_CTR");
+       strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CBC], "3DES_CBC");
 }
 
 
@@ -420,7 +432,8 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
        struct ether_hdr *eth_hdr;
        struct ipv4_hdr *ip_hdr;
 
-       unsigned ipdata_offset, pad_len, data_len;
+       uint32_t ipdata_offset, data_len;
+       uint32_t pad_len = 0;
        char *padding;
 
        eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -439,16 +452,36 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
 
        /* Zero pad data to be crypto'd so it is block aligned */
        data_len  = rte_pktmbuf_data_len(m) - ipdata_offset;
-       pad_len = data_len % cparams->block_size ? cparams->block_size -
-                       (data_len % cparams->block_size) : 0;
 
-       if (pad_len) {
-               padding = rte_pktmbuf_append(m, pad_len);
-               if (unlikely(!padding))
-                       return -1;
+       if (cparams->do_hash && cparams->hash_verify)
+               data_len -= cparams->digest_length;
+
+       if (cparams->do_cipher) {
+               /*
+                * Following algorithms are block cipher algorithms,
+                * and might need padding
+                */
+               switch (cparams->cipher_algo) {
+               case RTE_CRYPTO_CIPHER_AES_CBC:
+               case RTE_CRYPTO_CIPHER_AES_ECB:
+               case RTE_CRYPTO_CIPHER_3DES_CBC:
+               case RTE_CRYPTO_CIPHER_3DES_ECB:
+                       if (data_len % cparams->block_size)
+                               pad_len = cparams->block_size -
+                                       (data_len % cparams->block_size);
+                       break;
+               default:
+                       pad_len = 0;
+               }
+
+               if (pad_len) {
+                       padding = rte_pktmbuf_append(m, pad_len);
+                       if (unlikely(!padding))
+                               return -1;
 
-               data_len += pad_len;
-               memset(padding, 0, pad_len);
+                       data_len += pad_len;
+                       memset(padding, 0, pad_len);
+               }
        }
 
        /* Set crypto operation data parameters */
@@ -460,17 +493,18 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
                        op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
                                cparams->digest_length);
                } else {
-                       op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
-                               cparams->digest_length);
+                       op->sym->auth.digest.data = rte_pktmbuf_mtod(m,
+                               uint8_t *) + ipdata_offset + data_len;
                }
 
                op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
                                rte_pktmbuf_pkt_len(m) - cparams->digest_length);
                op->sym->auth.digest.length = cparams->digest_length;
 
-               /* For SNOW3G/KASUMI algorithms, offset/length must be in bits */
+               /* For wireless algorithms, offset/length must be in bits */
                if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
-                               cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
+                               cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
+                               cparams->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
                        op->sym->auth.data.offset = ipdata_offset << 3;
                        op->sym->auth.data.length = data_len << 3;
                } else {
@@ -482,6 +516,10 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
                        op->sym->auth.aad.data = cparams->aad.data;
                        op->sym->auth.aad.phys_addr = cparams->aad.phys_addr;
                        op->sym->auth.aad.length = cparams->aad.length;
+               } else {
+                       op->sym->auth.aad.data = NULL;
+                       op->sym->auth.aad.phys_addr = 0;
+                       op->sym->auth.aad.length = 0;
                }
        }
 
@@ -490,25 +528,15 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
                op->sym->cipher.iv.phys_addr = cparams->iv.phys_addr;
                op->sym->cipher.iv.length = cparams->iv.length;
 
-               /* For SNOW3G algorithms, offset/length must be in bits */
+               /* For wireless algorithms, offset/length must be in bits */
                if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
-                               cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8) {
+                               cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
+                               cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
                        op->sym->cipher.data.offset = ipdata_offset << 3;
-                       if (cparams->do_hash && cparams->hash_verify)
-                               /* Do not cipher the hash tag */
-                               op->sym->cipher.data.length = (data_len -
-                                       cparams->digest_length) << 3;
-                       else
-                               op->sym->cipher.data.length = data_len << 3;
-
+                       op->sym->cipher.data.length = data_len << 3;
                } else {
                        op->sym->cipher.data.offset = ipdata_offset;
-                       if (cparams->do_hash && cparams->hash_verify)
-                               /* Do not cipher the hash tag */
-                               op->sym->cipher.data.length = data_len -
-                                       cparams->digest_length;
-                       else
-                               op->sym->cipher.data.length = data_len;
+                       op->sym->cipher.data.length = data_len;
                }
        }
 
@@ -588,10 +616,18 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
 static void
 generate_random_key(uint8_t *key, unsigned length)
 {
-       unsigned i;
+       int fd;
+       int ret;
 
-       for (i = 0; i < length; i++)
-               key[i] = rand() % 0xff;
+       fd = open("/dev/urandom", O_RDONLY);
+       if (fd < 0)
+               rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
+
+       ret = read(fd, key, length);
+       close(fd);
+
+       if (ret != (signed)length)
+               rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
 }
 
 static struct rte_cryptodev_sym_session *
@@ -628,7 +664,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
 
        unsigned lcore_id = rte_lcore_id();
        uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
-       unsigned i, j, portid, nb_rx;
+       unsigned i, j, portid, nb_rx, len;
        struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
        const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
                        US_PER_S * BURST_TX_DRAIN_US;
@@ -684,7 +720,8 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
                                        generate_random_key(port_cparams[i].aad.data,
                                                port_cparams[i].aad.length);
 
-                       }
+                       } else
+                               port_cparams[i].aad.length = 0;
 
                        if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
                                port_cparams[i].hash_verify = 1;
@@ -727,10 +764,18 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
                cur_tsc = rte_rdtsc();
 
                /*
-                * TX burst queue drain
+                * Crypto device/TX burst queue drain
                 */
                diff_tsc = cur_tsc - prev_tsc;
                if (unlikely(diff_tsc > drain_tsc)) {
+                       /* Enqueue all crypto ops remaining in buffers */
+                       for (i = 0; i < qconf->nb_crypto_devs; i++) {
+                               cparams = &port_cparams[i];
+                               len = qconf->op_buf[cparams->dev_id].len;
+                               l2fwd_crypto_send_burst(qconf, len, cparams);
+                               qconf->op_buf[cparams->dev_id].len = 0;
+                       }
+                       /* Transmit all packets remaining in buffers */
                        for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
                                if (qconf->pkt_buf[portid].len == 0)
                                        continue;
@@ -787,7 +832,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
                                                ops_burst, nb_rx) !=
                                                                nb_rx) {
                                        for (j = 0; j < nb_rx; j++)
-                                               rte_pktmbuf_free(pkts_burst[i]);
+                                               rte_pktmbuf_free(pkts_burst[j]);
 
                                        nb_rx = 0;
                                }
@@ -841,7 +886,8 @@ l2fwd_crypto_usage(const char *prgname)
                " (0 to disable, 10 default, 86400 maximum)\n"
 
                "  --cdev_type HW / SW / ANY\n"
-               "  --chain HASH_CIPHER / CIPHER_HASH\n"
+               "  --chain HASH_CIPHER / CIPHER_HASH / CIPHER_ONLY /"
+               " HASH_ONLY\n"
 
                "  --cipher_algo ALGO\n"
                "  --cipher_op ENCRYPT / DECRYPT\n"
@@ -1187,8 +1233,6 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
 static void
 l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
 {
-       srand(time(NULL));
-
        options->portmask = 0xffffffff;
        options->nb_ports_per_lcore = 1;
        options->refresh_period = 10000;
@@ -1353,7 +1397,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
 
        l2fwd_crypto_default_options(options);
 
-       while ((opt = getopt_long(argc, argvopt, "p:q:st:", lgopts,
+       while ((opt = getopt_long(argc, argvopt, "p:q:sT:", lgopts,
                        &option_index)) != EOF) {
                switch (opt) {
                /* long options */
@@ -1672,7 +1716,6 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
                                continue;
                        }
 
-                       options->block_size = cap->sym.auth.block_size;
                        /*
                         * Check if length of provided AAD is supported
                         * by the algorithm chosen.
@@ -1780,7 +1823,14 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
                        return -1;
                }
 
-               l2fwd_enabled_crypto_mask |= (1 << cdev_id);
+               retval = rte_cryptodev_start(cdev_id);
+               if (retval < 0) {
+                       printf("Failed to start device %u: error %d\n",
+                                       cdev_id, retval);
+                       return -1;
+               }
+
+               l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id);
 
                enabled_cdevs[cdev_id] = 1;
                enabled_cdev_count++;