X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=examples%2Fl2fwd-crypto%2Fmain.c;h=07282317f1f2f60e5484dd4917ef50f66436fd71;hb=c921f906b1367468d45b28ec5b7d4650fd1d4f8f;hp=8dc616d433e68c150cd789c03815b7cbe0da4838;hpb=809f08006d56e7ba4ce190b0a63d44acf62d8044;p=deb_dpdk.git diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c index 8dc616d4..07282317 100644 --- a/examples/l2fwd-crypto/main.c +++ b/examples/l2fwd-crypto/main.c @@ -45,6 +45,8 @@ #include #include #include +#include +#include #include #include @@ -70,7 +72,6 @@ #include #include #include -#include #include enum cdev_type { @@ -199,7 +200,7 @@ struct lcore_queue_conf { unsigned nb_crypto_devs; unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE]; - struct op_buffer op_buf[RTE_MAX_ETHPORTS]; + struct op_buffer op_buf[RTE_CRYPTO_MAX_DEVS]; struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS]; } __rte_cache_aligned; @@ -214,7 +215,7 @@ static const struct rte_eth_conf port_conf = { .hw_ip_checksum = 0, /**< IP checksum offload disabled */ .hw_vlan_filter = 0, /**< VLAN filtering disabled */ .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 0, /**< CRC stripped by hardware */ + .hw_strip_crc = 1, /**< CRC stripped by hardware */ }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, @@ -243,7 +244,7 @@ struct l2fwd_crypto_statistics { } __rte_cache_aligned; struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; -struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS]; +struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS]; /* A tsc-based timer responsible for triggering statistics printout */ #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ @@ -298,7 +299,7 @@ print_stats(void) for (cdevid = 0; cdevid < RTE_CRYPTO_MAX_DEVS; cdevid++) { /* skip disabled ports */ - if ((l2fwd_enabled_crypto_mask & (1lu << cdevid)) == 0) + if ((l2fwd_enabled_crypto_mask & (((uint64_t)1) << cdevid)) == 0) continue; printf("\nStatistics for cryptodev %"PRIu64 " -------------------------" @@ -339,16 +340,24 @@ fill_supported_algorithm_tables(void) strcpy(supported_auth_algo[i], "NOT_SUPPORTED"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GCM], "AES_GCM"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GMAC], "AES_GMAC"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5_HMAC], "MD5_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5], "MD5"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_NULL], "NULL"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_XCBC_MAC], "AES_XCBC_MAC"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1_HMAC], "SHA1_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1], "SHA1"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224_HMAC], "SHA224_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224], "SHA224"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256_HMAC], "SHA256_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256], "SHA256"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384_HMAC], "SHA384_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384], "SHA384"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512_HMAC], "SHA512_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512], "SHA512"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SNOW3G_UIA2], "SNOW3G_UIA2"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_ZUC_EIA3], "ZUC_EIA3"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_KASUMI_F9], "KASUMI_F9"); for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++) @@ -359,7 +368,10 @@ fill_supported_algorithm_tables(void) strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_GCM], "AES_GCM"); strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_NULL], "NULL"); strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_SNOW3G_UEA2], "SNOW3G_UEA2"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_ZUC_EEA3], "ZUC_EEA3"); strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_KASUMI_F8], "KASUMI_F8"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CTR], "3DES_CTR"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CBC], "3DES_CBC"); } @@ -439,6 +451,10 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, /* Zero pad data to be crypto'd so it is block aligned */ data_len = rte_pktmbuf_data_len(m) - ipdata_offset; + + if (cparams->do_hash && cparams->hash_verify) + data_len -= cparams->digest_length; + pad_len = data_len % cparams->block_size ? cparams->block_size - (data_len % cparams->block_size) : 0; @@ -460,17 +476,18 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m, cparams->digest_length); } else { - op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m, - cparams->digest_length); + op->sym->auth.digest.data = rte_pktmbuf_mtod(m, + uint8_t *) + ipdata_offset + data_len; } op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, rte_pktmbuf_pkt_len(m) - cparams->digest_length); op->sym->auth.digest.length = cparams->digest_length; - /* For SNOW3G/KASUMI algorithms, offset/length must be in bits */ + /* For wireless algorithms, offset/length must be in bits */ if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || - cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9) { + cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || + cparams->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { op->sym->auth.data.offset = ipdata_offset << 3; op->sym->auth.data.length = data_len << 3; } else { @@ -482,6 +499,10 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, op->sym->auth.aad.data = cparams->aad.data; op->sym->auth.aad.phys_addr = cparams->aad.phys_addr; op->sym->auth.aad.length = cparams->aad.length; + } else { + op->sym->auth.aad.data = NULL; + op->sym->auth.aad.phys_addr = 0; + op->sym->auth.aad.length = 0; } } @@ -490,25 +511,15 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, op->sym->cipher.iv.phys_addr = cparams->iv.phys_addr; op->sym->cipher.iv.length = cparams->iv.length; - /* For SNOW3G algorithms, offset/length must be in bits */ + /* For wireless algorithms, offset/length must be in bits */ if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || - cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8) { + cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || + cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) { op->sym->cipher.data.offset = ipdata_offset << 3; - if (cparams->do_hash && cparams->hash_verify) - /* Do not cipher the hash tag */ - op->sym->cipher.data.length = (data_len - - cparams->digest_length) << 3; - else - op->sym->cipher.data.length = data_len << 3; - + op->sym->cipher.data.length = data_len << 3; } else { op->sym->cipher.data.offset = ipdata_offset; - if (cparams->do_hash && cparams->hash_verify) - /* Do not cipher the hash tag */ - op->sym->cipher.data.length = data_len - - cparams->digest_length; - else - op->sym->cipher.data.length = data_len; + op->sym->cipher.data.length = data_len; } } @@ -588,10 +599,18 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) static void generate_random_key(uint8_t *key, unsigned length) { - unsigned i; + int fd; + int ret; + + fd = open("/dev/urandom", O_RDONLY); + if (fd < 0) + rte_exit(EXIT_FAILURE, "Failed to generate random key\n"); - for (i = 0; i < length; i++) - key[i] = rand() % 0xff; + ret = read(fd, key, length); + close(fd); + + if (ret != (signed)length) + rte_exit(EXIT_FAILURE, "Failed to generate random key\n"); } static struct rte_cryptodev_sym_session * @@ -628,7 +647,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options) unsigned lcore_id = rte_lcore_id(); uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; - unsigned i, j, portid, nb_rx; + unsigned i, j, portid, nb_rx, len; struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id]; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; @@ -684,7 +703,8 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options) generate_random_key(port_cparams[i].aad.data, port_cparams[i].aad.length); - } + } else + port_cparams[i].aad.length = 0; if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) port_cparams[i].hash_verify = 1; @@ -727,10 +747,18 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options) cur_tsc = rte_rdtsc(); /* - * TX burst queue drain + * Crypto device/TX burst queue drain */ diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { + /* Enqueue all crypto ops remaining in buffers */ + for (i = 0; i < qconf->nb_crypto_devs; i++) { + cparams = &port_cparams[i]; + len = qconf->op_buf[cparams->dev_id].len; + l2fwd_crypto_send_burst(qconf, len, cparams); + qconf->op_buf[cparams->dev_id].len = 0; + } + /* Transmit all packets remaining in buffers */ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { if (qconf->pkt_buf[portid].len == 0) continue; @@ -787,7 +815,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options) ops_burst, nb_rx) != nb_rx) { for (j = 0; j < nb_rx; j++) - rte_pktmbuf_free(pkts_burst[i]); + rte_pktmbuf_free(pkts_burst[j]); nb_rx = 0; } @@ -1187,8 +1215,6 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options, static void l2fwd_crypto_default_options(struct l2fwd_crypto_options *options) { - srand(time(NULL)); - options->portmask = 0xffffffff; options->nb_ports_per_lcore = 1; options->refresh_period = 10000; @@ -1672,7 +1698,6 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports, continue; } - options->block_size = cap->sym.auth.block_size; /* * Check if length of provided AAD is supported * by the algorithm chosen. @@ -1780,7 +1805,14 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports, return -1; } - l2fwd_enabled_crypto_mask |= (1 << cdev_id); + retval = rte_cryptodev_start(cdev_id); + if (retval < 0) { + printf("Failed to start device %u: error %d\n", + cdev_id, retval); + return -1; + } + + l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id); enabled_cdevs[cdev_id] = 1; enabled_cdev_count++;