1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include <netinet/in.h>
11 #include <netinet/ip.h>
12 #include <netinet/ip6.h>
14 #include <sys/queue.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
41 #include <rte_jhash.h>
42 #include <rte_cryptodev.h>
43 #include <rte_security.h>
48 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
50 #define MAX_JUMBO_PKT_LEN 9600
52 #define MEMPOOL_CACHE_SIZE 256
54 #define NB_MBUF (32000)
56 #define CDEV_QUEUE_DESC 2048
57 #define CDEV_MAP_ENTRIES 16384
58 #define CDEV_MP_NB_OBJS 2048
59 #define CDEV_MP_CACHE_SZ 64
60 #define MAX_QUEUE_PAIRS 1
62 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
66 /* Configure how many packets ahead to prefetch, when reading packets */
67 #define PREFETCH_OFFSET 3
69 #define MAX_RX_QUEUE_PER_LCORE 16
71 #define MAX_LCORE_PARAMS 1024
73 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
76 * Configurable number of RX/TX ring descriptors
78 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
79 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
80 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
81 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
83 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
84 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
85 (((uint64_t)((a) & 0xff) << 56) | \
86 ((uint64_t)((b) & 0xff) << 48) | \
87 ((uint64_t)((c) & 0xff) << 40) | \
88 ((uint64_t)((d) & 0xff) << 32) | \
89 ((uint64_t)((e) & 0xff) << 24) | \
90 ((uint64_t)((f) & 0xff) << 16) | \
91 ((uint64_t)((g) & 0xff) << 8) | \
92 ((uint64_t)(h) & 0xff))
94 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
95 (((uint64_t)((h) & 0xff) << 56) | \
96 ((uint64_t)((g) & 0xff) << 48) | \
97 ((uint64_t)((f) & 0xff) << 40) | \
98 ((uint64_t)((e) & 0xff) << 32) | \
99 ((uint64_t)((d) & 0xff) << 24) | \
100 ((uint64_t)((c) & 0xff) << 16) | \
101 ((uint64_t)((b) & 0xff) << 8) | \
102 ((uint64_t)(a) & 0xff))
104 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
106 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
107 addr.addr_bytes[0], addr.addr_bytes[1], \
108 addr.addr_bytes[2], addr.addr_bytes[3], \
109 addr.addr_bytes[4], addr.addr_bytes[5], \
112 /* port/source ethernet addr and destination ethernet addr */
113 struct ethaddr_info {
117 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
118 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
119 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
120 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
121 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
124 #define CMD_LINE_OPT_CONFIG "config"
125 #define CMD_LINE_OPT_SINGLE_SA "single-sa"
126 #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
129 /* long options mapped to a short option */
131 /* first long only option value must be >= 256, so that we won't
132 * conflict with short options
134 CMD_LINE_OPT_MIN_NUM = 256,
135 CMD_LINE_OPT_CONFIG_NUM,
136 CMD_LINE_OPT_SINGLE_SA_NUM,
137 CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
140 static const struct option lgopts[] = {
141 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
142 {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
143 {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
147 /* mask of enabled ports */
148 static uint32_t enabled_port_mask;
149 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
150 static uint32_t unprotected_port_mask;
151 static int32_t promiscuous_on = 1;
152 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
153 static uint32_t nb_lcores;
154 static uint32_t single_sa;
155 static uint32_t single_sa_idx;
156 static uint32_t frame_size;
158 struct lcore_rx_queue {
161 } __rte_cache_aligned;
163 struct lcore_params {
167 } __rte_cache_aligned;
169 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
171 static struct lcore_params *lcore_params;
172 static uint16_t nb_lcore_params;
174 static struct rte_hash *cdev_map_in;
175 static struct rte_hash *cdev_map_out;
179 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
183 uint16_t nb_rx_queue;
184 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
185 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
186 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
187 struct ipsec_ctx inbound;
188 struct ipsec_ctx outbound;
189 struct rt_ctx *rt4_ctx;
190 struct rt_ctx *rt6_ctx;
191 } __rte_cache_aligned;
193 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
195 static struct rte_eth_conf port_conf = {
197 .mq_mode = ETH_MQ_RX_RSS,
198 .max_rx_pkt_len = ETHER_MAX_LEN,
200 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
205 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
206 ETH_RSS_TCP | ETH_RSS_SCTP,
210 .mq_mode = ETH_MQ_TX_NONE,
211 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
212 DEV_TX_OFFLOAD_MULTI_SEGS),
216 static struct socket_ctx socket_ctx[NB_SOCKETS];
218 struct traffic_type {
219 const uint8_t *data[MAX_PKT_BURST * 2];
220 struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
221 uint32_t res[MAX_PKT_BURST * 2];
225 struct ipsec_traffic {
226 struct traffic_type ipsec;
227 struct traffic_type ip4;
228 struct traffic_type ip6;
232 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
235 struct ether_hdr *eth;
237 eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
238 if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
239 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
240 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
241 if (*nlp == IPPROTO_ESP)
242 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
244 t->ip4.data[t->ip4.num] = nlp;
245 t->ip4.pkts[(t->ip4.num)++] = pkt;
247 } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
248 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
249 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
250 if (*nlp == IPPROTO_ESP)
251 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
253 t->ip6.data[t->ip6.num] = nlp;
254 t->ip6.pkts[(t->ip6.num)++] = pkt;
257 /* Unknown/Unsupported type, drop the packet */
258 RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
259 rte_pktmbuf_free(pkt);
262 /* Check if the packet has been processed inline. For inline protocol
263 * processed packets, the metadata in the mbuf can be used to identify
264 * the security processing done on the packet. The metadata will be
265 * used to retrieve the application registered userdata associated
266 * with the security session.
269 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
271 struct ipsec_mbuf_metadata *priv;
272 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
273 rte_eth_dev_get_sec_ctx(
276 /* Retrieve the userdata registered. Here, the userdata
277 * registered is the SA pointer.
280 sa = (struct ipsec_sa *)
281 rte_security_get_userdata(ctx, pkt->udata64);
284 /* userdata could not be retrieved */
288 /* Save SA as priv member in mbuf. This will be used in the
289 * IPsec selector(SP-SA) check.
292 priv = get_priv(pkt);
298 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
307 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
308 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
310 prepare_one_packet(pkts[i], t);
312 /* Process left packets */
313 for (; i < nb_pkts; i++)
314 prepare_one_packet(pkts[i], t);
318 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
321 struct ether_hdr *ethhdr;
323 ip = rte_pktmbuf_mtod(pkt, struct ip *);
325 ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
327 if (ip->ip_v == IPVERSION) {
328 pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
329 pkt->l3_len = sizeof(struct ip);
330 pkt->l2_len = ETHER_HDR_LEN;
333 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
335 pkt->ol_flags |= PKT_TX_IPV6;
336 pkt->l3_len = sizeof(struct ip6_hdr);
337 pkt->l2_len = ETHER_HDR_LEN;
339 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
342 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
343 sizeof(struct ether_addr));
344 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
345 sizeof(struct ether_addr));
349 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port)
352 const int32_t prefetch_offset = 2;
354 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
355 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
356 prepare_tx_pkt(pkts[i], port);
358 /* Process left packets */
359 for (; i < nb_pkts; i++)
360 prepare_tx_pkt(pkts[i], port);
363 /* Send burst of packets on an output interface */
364 static inline int32_t
365 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
367 struct rte_mbuf **m_table;
371 queueid = qconf->tx_queue_id[port];
372 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
374 prepare_tx_burst(m_table, n, port);
376 ret = rte_eth_tx_burst(port, queueid, m_table, n);
377 if (unlikely(ret < n)) {
379 rte_pktmbuf_free(m_table[ret]);
386 /* Enqueue a single packet, and send burst if queue is filled */
387 static inline int32_t
388 send_single_packet(struct rte_mbuf *m, uint16_t port)
392 struct lcore_conf *qconf;
394 lcore_id = rte_lcore_id();
396 qconf = &lcore_conf[lcore_id];
397 len = qconf->tx_mbufs[port].len;
398 qconf->tx_mbufs[port].m_table[len] = m;
401 /* enough pkts to be sent */
402 if (unlikely(len == MAX_PKT_BURST)) {
403 send_burst(qconf, MAX_PKT_BURST, port);
407 qconf->tx_mbufs[port].len = len;
412 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
416 uint32_t i, j, res, sa_idx;
418 if (ip->num == 0 || sp == NULL)
421 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
422 ip->num, DEFAULT_MAX_CATEGORIES);
425 for (i = 0; i < ip->num; i++) {
437 /* Only check SPI match for processed IPSec packets */
438 if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
443 sa_idx = ip->res[i] & PROTECT_MASK;
444 if (sa_idx >= IPSEC_SA_MAX_ENTRIES ||
445 !inbound_sa_check(sa, m, sa_idx)) {
455 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
464 for (i = 0; i < num; i++) {
467 ip = rte_pktmbuf_mtod(m, struct ip *);
469 if (ip->ip_v == IPVERSION) {
470 trf->ip4.pkts[n4] = m;
471 trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
472 uint8_t *, offsetof(struct ip, ip_p));
474 } else if (ip->ip_v == IP6_VERSION) {
475 trf->ip6.pkts[n6] = m;
476 trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
478 offsetof(struct ip6_hdr, ip6_nxt));
490 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
491 struct ipsec_traffic *traffic)
493 uint16_t nb_pkts_in, n_ip4, n_ip6;
495 n_ip4 = traffic->ip4.num;
496 n_ip6 = traffic->ip6.num;
498 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
499 traffic->ipsec.num, MAX_PKT_BURST);
501 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
503 inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
506 inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
511 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
512 struct traffic_type *ipsec)
515 uint32_t i, j, sa_idx;
517 if (ip->num == 0 || sp == NULL)
520 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
521 ip->num, DEFAULT_MAX_CATEGORIES);
524 for (i = 0; i < ip->num; i++) {
526 sa_idx = ip->res[i] & PROTECT_MASK;
527 if (ip->res[i] & DISCARD)
529 else if (ip->res[i] & BYPASS)
531 else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
532 ipsec->res[ipsec->num] = sa_idx;
533 ipsec->pkts[ipsec->num++] = m;
534 } else /* invalid SA idx */
541 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
542 struct ipsec_traffic *traffic)
545 uint16_t idx, nb_pkts_out, i;
547 /* Drop any IPsec traffic from protected ports */
548 for (i = 0; i < traffic->ipsec.num; i++)
549 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
551 traffic->ipsec.num = 0;
553 outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
555 outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
557 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
558 traffic->ipsec.res, traffic->ipsec.num,
561 for (i = 0; i < nb_pkts_out; i++) {
562 m = traffic->ipsec.pkts[i];
563 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
564 if (ip->ip_v == IPVERSION) {
565 idx = traffic->ip4.num++;
566 traffic->ip4.pkts[idx] = m;
568 idx = traffic->ip6.num++;
569 traffic->ip6.pkts[idx] = m;
575 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
576 struct ipsec_traffic *traffic)
579 uint32_t nb_pkts_in, i, idx;
581 /* Drop any IPv4 traffic from unprotected ports */
582 for (i = 0; i < traffic->ip4.num; i++)
583 rte_pktmbuf_free(traffic->ip4.pkts[i]);
585 traffic->ip4.num = 0;
587 /* Drop any IPv6 traffic from unprotected ports */
588 for (i = 0; i < traffic->ip6.num; i++)
589 rte_pktmbuf_free(traffic->ip6.pkts[i]);
591 traffic->ip6.num = 0;
593 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
594 traffic->ipsec.num, MAX_PKT_BURST);
596 for (i = 0; i < nb_pkts_in; i++) {
597 m = traffic->ipsec.pkts[i];
598 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
599 if (ip->ip_v == IPVERSION) {
600 idx = traffic->ip4.num++;
601 traffic->ip4.pkts[idx] = m;
603 idx = traffic->ip6.num++;
604 traffic->ip6.pkts[idx] = m;
610 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
611 struct ipsec_traffic *traffic)
614 uint32_t nb_pkts_out, i, n;
617 /* Drop any IPsec traffic from protected ports */
618 for (i = 0; i < traffic->ipsec.num; i++)
619 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
623 for (i = 0; i < traffic->ip4.num; i++) {
624 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
625 traffic->ipsec.res[n++] = single_sa_idx;
628 for (i = 0; i < traffic->ip6.num; i++) {
629 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
630 traffic->ipsec.res[n++] = single_sa_idx;
633 traffic->ip4.num = 0;
634 traffic->ip6.num = 0;
635 traffic->ipsec.num = n;
637 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
638 traffic->ipsec.res, traffic->ipsec.num,
641 /* They all sue the same SA (ip4 or ip6 tunnel) */
642 m = traffic->ipsec.pkts[i];
643 ip = rte_pktmbuf_mtod(m, struct ip *);
644 if (ip->ip_v == IPVERSION) {
645 traffic->ip4.num = nb_pkts_out;
646 for (i = 0; i < nb_pkts_out; i++)
647 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
649 traffic->ip6.num = nb_pkts_out;
650 for (i = 0; i < nb_pkts_out; i++)
651 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
655 static inline int32_t
656 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
658 struct ipsec_mbuf_metadata *priv;
661 priv = get_priv(pkt);
664 if (unlikely(sa == NULL)) {
665 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
673 return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
684 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
686 uint32_t hop[MAX_PKT_BURST * 2];
687 uint32_t dst_ip[MAX_PKT_BURST * 2];
690 uint16_t lpm_pkts = 0;
695 /* Need to do an LPM lookup for non-inline packets. Inline packets will
696 * have port ID in the SA
699 for (i = 0; i < nb_pkts; i++) {
700 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
701 /* Security offload not enabled. So an LPM lookup is
702 * required to get the hop
704 offset = offsetof(struct ip, ip_dst);
705 dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
707 dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
712 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
716 for (i = 0; i < nb_pkts; i++) {
717 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
718 /* Read hop from the SA */
719 pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
721 /* Need to use hop returned by lookup */
722 pkt_hop = hop[lpm_pkts++];
725 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
726 rte_pktmbuf_free(pkts[i]);
729 send_single_packet(pkts[i], pkt_hop & 0xff);
734 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
736 int32_t hop[MAX_PKT_BURST * 2];
737 uint8_t dst_ip[MAX_PKT_BURST * 2][16];
741 uint16_t lpm_pkts = 0;
746 /* Need to do an LPM lookup for non-inline packets. Inline packets will
747 * have port ID in the SA
750 for (i = 0; i < nb_pkts; i++) {
751 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
752 /* Security offload not enabled. So an LPM lookup is
753 * required to get the hop
755 offset = offsetof(struct ip6_hdr, ip6_dst);
756 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
758 memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
763 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
768 for (i = 0; i < nb_pkts; i++) {
769 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
770 /* Read hop from the SA */
771 pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
773 /* Need to use hop returned by lookup */
774 pkt_hop = hop[lpm_pkts++];
778 rte_pktmbuf_free(pkts[i]);
781 send_single_packet(pkts[i], pkt_hop & 0xff);
786 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
787 uint8_t nb_pkts, uint16_t portid)
789 struct ipsec_traffic traffic;
791 prepare_traffic(pkts, &traffic, nb_pkts);
793 if (unlikely(single_sa)) {
794 if (UNPROTECTED_PORT(portid))
795 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
797 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
799 if (UNPROTECTED_PORT(portid))
800 process_pkts_inbound(&qconf->inbound, &traffic);
802 process_pkts_outbound(&qconf->outbound, &traffic);
805 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
806 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
810 drain_tx_buffers(struct lcore_conf *qconf)
815 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
816 buf = &qconf->tx_mbufs[portid];
819 send_burst(qconf, buf->len, portid);
825 drain_crypto_buffers(struct lcore_conf *qconf)
828 struct ipsec_ctx *ctx;
830 /* drain inbound buffers*/
831 ctx = &qconf->inbound;
832 for (i = 0; i != ctx->nb_qps; i++) {
833 if (ctx->tbl[i].len != 0)
834 enqueue_cop_burst(ctx->tbl + i);
837 /* drain outbound buffers*/
838 ctx = &qconf->outbound;
839 for (i = 0; i != ctx->nb_qps; i++) {
840 if (ctx->tbl[i].len != 0)
841 enqueue_cop_burst(ctx->tbl + i);
846 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
847 struct ipsec_ctx *ctx)
850 struct ipsec_traffic trf;
852 /* dequeue packets from crypto-queue */
853 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
854 RTE_DIM(trf.ipsec.pkts));
861 /* split traffic by ipv4-ipv6 */
862 split46_traffic(&trf, trf.ipsec.pkts, n);
864 /* process ipv4 packets */
865 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
866 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
868 /* process ipv6 packets */
869 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
870 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
874 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
875 struct ipsec_ctx *ctx)
878 struct ipsec_traffic trf;
880 /* dequeue packets from crypto-queue */
881 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
882 RTE_DIM(trf.ipsec.pkts));
889 /* split traffic by ipv4-ipv6 */
890 split46_traffic(&trf, trf.ipsec.pkts, n);
892 /* process ipv4 packets */
893 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
895 /* process ipv6 packets */
896 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
899 /* main processing loop */
901 main_loop(__attribute__((unused)) void *dummy)
903 struct rte_mbuf *pkts[MAX_PKT_BURST];
905 uint64_t prev_tsc, diff_tsc, cur_tsc;
909 struct lcore_conf *qconf;
911 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
912 / US_PER_S * BURST_TX_DRAIN_US;
913 struct lcore_rx_queue *rxql;
916 lcore_id = rte_lcore_id();
917 qconf = &lcore_conf[lcore_id];
918 rxql = qconf->rx_queue_list;
919 socket_id = rte_lcore_to_socket_id(lcore_id);
921 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
922 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
923 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
924 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
925 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
926 qconf->inbound.cdev_map = cdev_map_in;
927 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
928 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
929 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
930 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
931 qconf->outbound.cdev_map = cdev_map_out;
932 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
934 if (qconf->nb_rx_queue == 0) {
935 RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
939 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
941 for (i = 0; i < qconf->nb_rx_queue; i++) {
942 portid = rxql[i].port_id;
943 queueid = rxql[i].queue_id;
945 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
946 lcore_id, portid, queueid);
950 cur_tsc = rte_rdtsc();
952 /* TX queue buffer drain */
953 diff_tsc = cur_tsc - prev_tsc;
955 if (unlikely(diff_tsc > drain_tsc)) {
956 drain_tx_buffers(qconf);
957 drain_crypto_buffers(qconf);
961 for (i = 0; i < qconf->nb_rx_queue; ++i) {
963 /* Read packets from RX queues */
964 portid = rxql[i].port_id;
965 queueid = rxql[i].queue_id;
966 nb_rx = rte_eth_rx_burst(portid, queueid,
967 pkts, MAX_PKT_BURST);
970 process_pkts(qconf, pkts, nb_rx, portid);
972 /* dequeue and process completed crypto-ops */
973 if (UNPROTECTED_PORT(portid))
974 drain_inbound_crypto_queues(qconf,
977 drain_outbound_crypto_queues(qconf,
991 if (lcore_params == NULL) {
992 printf("Error: No port/queue/core mappings\n");
996 for (i = 0; i < nb_lcore_params; ++i) {
997 lcore = lcore_params[i].lcore_id;
998 if (!rte_lcore_is_enabled(lcore)) {
999 printf("error: lcore %hhu is not enabled in "
1000 "lcore mask\n", lcore);
1003 socket_id = rte_lcore_to_socket_id(lcore);
1004 if (socket_id != 0 && numa_on == 0) {
1005 printf("warning: lcore %hhu is on socket %d "
1009 portid = lcore_params[i].port_id;
1010 if ((enabled_port_mask & (1 << portid)) == 0) {
1011 printf("port %u is not enabled in port mask\n", portid);
1014 if (!rte_eth_dev_is_valid_port(portid)) {
1015 printf("port %u is not present on the board\n", portid);
1023 get_port_nb_rx_queues(const uint16_t port)
1028 for (i = 0; i < nb_lcore_params; ++i) {
1029 if (lcore_params[i].port_id == port &&
1030 lcore_params[i].queue_id > queue)
1031 queue = lcore_params[i].queue_id;
1033 return (uint8_t)(++queue);
1037 init_lcore_rx_queues(void)
1039 uint16_t i, nb_rx_queue;
1042 for (i = 0; i < nb_lcore_params; ++i) {
1043 lcore = lcore_params[i].lcore_id;
1044 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
1045 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1046 printf("error: too many queues (%u) for lcore: %u\n",
1047 nb_rx_queue + 1, lcore);
1050 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1051 lcore_params[i].port_id;
1052 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1053 lcore_params[i].queue_id;
1054 lcore_conf[lcore].nb_rx_queue++;
1061 print_usage(const char *prgname)
1063 fprintf(stderr, "%s [EAL options] --"
1069 " --config (port,queue,lcore)[,(port,queue,lcore)]"
1070 " [--single-sa SAIDX]"
1071 " [--cryptodev_mask MASK]"
1073 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
1074 " -P : Enable promiscuous mode\n"
1075 " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
1076 " -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\n"
1078 " -f CONFIG_FILE: Configuration file\n"
1079 " --config (port,queue,lcore): Rx queue configuration\n"
1080 " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
1081 " bypassing the SP\n"
1082 " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
1083 " devices to configure\n"
1089 parse_portmask(const char *portmask)
1094 /* parse hexadecimal string */
1095 pm = strtoul(portmask, &end, 16);
1096 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1099 if ((pm == 0) && errno)
1106 parse_decimal(const char *str)
1111 num = strtoul(str, &end, 10);
1112 if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
1119 parse_config(const char *q_arg)
1122 const char *p, *p0 = q_arg;
1130 unsigned long int_fld[_NUM_FLD];
1131 char *str_fld[_NUM_FLD];
1135 nb_lcore_params = 0;
1137 while ((p = strchr(p0, '(')) != NULL) {
1139 p0 = strchr(p, ')');
1144 if (size >= sizeof(s))
1147 snprintf(s, sizeof(s), "%.*s", size, p);
1148 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1151 for (i = 0; i < _NUM_FLD; i++) {
1153 int_fld[i] = strtoul(str_fld[i], &end, 0);
1154 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1157 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1158 printf("exceeded max number of lcore params: %hu\n",
1162 lcore_params_array[nb_lcore_params].port_id =
1163 (uint8_t)int_fld[FLD_PORT];
1164 lcore_params_array[nb_lcore_params].queue_id =
1165 (uint8_t)int_fld[FLD_QUEUE];
1166 lcore_params_array[nb_lcore_params].lcore_id =
1167 (uint8_t)int_fld[FLD_LCORE];
1170 lcore_params = lcore_params_array;
1175 parse_args(int32_t argc, char **argv)
1179 int32_t option_index;
1180 char *prgname = argv[0];
1181 int32_t f_present = 0;
1185 while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:",
1186 lgopts, &option_index)) != EOF) {
1190 enabled_port_mask = parse_portmask(optarg);
1191 if (enabled_port_mask == 0) {
1192 printf("invalid portmask\n");
1193 print_usage(prgname);
1198 printf("Promiscuous mode selected\n");
1202 unprotected_port_mask = parse_portmask(optarg);
1203 if (unprotected_port_mask == 0) {
1204 printf("invalid unprotected portmask\n");
1205 print_usage(prgname);
1210 if (f_present == 1) {
1211 printf("\"-f\" option present more than "
1213 print_usage(prgname);
1216 if (parse_cfg_file(optarg) < 0) {
1217 printf("parsing file \"%s\" failed\n",
1219 print_usage(prgname);
1226 int32_t size = parse_decimal(optarg);
1228 printf("Invalid jumbo frame size\n");
1230 print_usage(prgname);
1233 printf("Using default value 9000\n");
1239 printf("Enabled jumbo frames size %u\n", frame_size);
1241 case CMD_LINE_OPT_CONFIG_NUM:
1242 ret = parse_config(optarg);
1244 printf("Invalid config\n");
1245 print_usage(prgname);
1249 case CMD_LINE_OPT_SINGLE_SA_NUM:
1250 ret = parse_decimal(optarg);
1252 printf("Invalid argument[sa_idx]\n");
1253 print_usage(prgname);
1259 single_sa_idx = ret;
1260 printf("Configured with single SA index %u\n",
1263 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1264 ret = parse_portmask(optarg);
1266 printf("Invalid argument[portmask]\n");
1267 print_usage(prgname);
1272 enabled_cryptodev_mask = ret;
1275 print_usage(prgname);
1280 if (f_present == 0) {
1281 printf("Mandatory option \"-f\" not present\n");
1286 argv[optind-1] = prgname;
1289 optind = 1; /* reset getopt lib */
1294 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1296 char buf[ETHER_ADDR_FMT_SIZE];
1297 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
1298 printf("%s%s", name, buf);
1301 /* Check the link status of all ports in up to 9s, and print them finally */
1303 check_all_ports_link_status(uint32_t port_mask)
1305 #define CHECK_INTERVAL 100 /* 100ms */
1306 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1308 uint8_t count, all_ports_up, print_flag = 0;
1309 struct rte_eth_link link;
1311 printf("\nChecking link status");
1313 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1315 RTE_ETH_FOREACH_DEV(portid) {
1316 if ((port_mask & (1 << portid)) == 0)
1318 memset(&link, 0, sizeof(link));
1319 rte_eth_link_get_nowait(portid, &link);
1320 /* print link status if flag set */
1321 if (print_flag == 1) {
1322 if (link.link_status)
1324 "Port%d Link Up - speed %u Mbps -%s\n",
1325 portid, link.link_speed,
1326 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1327 ("full-duplex") : ("half-duplex\n"));
1329 printf("Port %d Link Down\n", portid);
1332 /* clear all_ports_up flag if any link down */
1333 if (link.link_status == ETH_LINK_DOWN) {
1338 /* after finally printing all link status, get out */
1339 if (print_flag == 1)
1342 if (all_ports_up == 0) {
1345 rte_delay_ms(CHECK_INTERVAL);
1348 /* set the print_flag if all ports up or timeout */
1349 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1357 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1358 uint16_t qp, struct lcore_params *params,
1359 struct ipsec_ctx *ipsec_ctx,
1360 const struct rte_cryptodev_capabilities *cipher,
1361 const struct rte_cryptodev_capabilities *auth,
1362 const struct rte_cryptodev_capabilities *aead)
1366 struct cdev_key key = { 0 };
1368 key.lcore_id = params->lcore_id;
1370 key.cipher_algo = cipher->sym.cipher.algo;
1372 key.auth_algo = auth->sym.auth.algo;
1374 key.aead_algo = aead->sym.aead.algo;
1376 ret = rte_hash_lookup(map, &key);
1380 for (i = 0; i < ipsec_ctx->nb_qps; i++)
1381 if (ipsec_ctx->tbl[i].id == cdev_id)
1384 if (i == ipsec_ctx->nb_qps) {
1385 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1386 printf("Maximum number of crypto devices assigned to "
1387 "a core, increase MAX_QP_PER_LCORE value\n");
1390 ipsec_ctx->tbl[i].id = cdev_id;
1391 ipsec_ctx->tbl[i].qp = qp;
1392 ipsec_ctx->nb_qps++;
1393 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1394 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1398 ret = rte_hash_add_key_data(map, &key, (void *)i);
1400 printf("Faled to insert cdev mapping for (lcore %u, "
1401 "cdev %u, qp %u), errno %d\n",
1402 key.lcore_id, ipsec_ctx->tbl[i].id,
1403 ipsec_ctx->tbl[i].qp, ret);
1411 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1412 uint16_t qp, struct lcore_params *params)
1415 const struct rte_cryptodev_capabilities *i, *j;
1416 struct rte_hash *map;
1417 struct lcore_conf *qconf;
1418 struct ipsec_ctx *ipsec_ctx;
1421 qconf = &lcore_conf[params->lcore_id];
1423 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1425 ipsec_ctx = &qconf->outbound;
1429 ipsec_ctx = &qconf->inbound;
1433 /* Required cryptodevs with operation chainning */
1434 if (!(dev_info->feature_flags &
1435 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1438 for (i = dev_info->capabilities;
1439 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1440 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1443 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1444 ret |= add_mapping(map, str, cdev_id, qp, params,
1445 ipsec_ctx, NULL, NULL, i);
1449 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1452 for (j = dev_info->capabilities;
1453 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1454 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1457 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1460 ret |= add_mapping(map, str, cdev_id, qp, params,
1461 ipsec_ctx, i, j, NULL);
1468 /* Check if the device is enabled by cryptodev_mask */
1470 check_cryptodev_mask(uint8_t cdev_id)
1472 if (enabled_cryptodev_mask & (1 << cdev_id))
1479 cryptodevs_init(void)
1481 struct rte_cryptodev_config dev_conf;
1482 struct rte_cryptodev_qp_conf qp_conf;
1483 uint16_t idx, max_nb_qps, qp, i;
1484 int16_t cdev_id, port_id;
1485 struct rte_hash_parameters params = { 0 };
1487 params.entries = CDEV_MAP_ENTRIES;
1488 params.key_len = sizeof(struct cdev_key);
1489 params.hash_func = rte_jhash;
1490 params.hash_func_init_val = 0;
1491 params.socket_id = rte_socket_id();
1493 params.name = "cdev_map_in";
1494 cdev_map_in = rte_hash_create(¶ms);
1495 if (cdev_map_in == NULL)
1496 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1499 params.name = "cdev_map_out";
1500 cdev_map_out = rte_hash_create(¶ms);
1501 if (cdev_map_out == NULL)
1502 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1505 printf("lcore/cryptodev/qp mappings:\n");
1507 uint32_t max_sess_sz = 0, sess_sz;
1508 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1511 /* Get crypto priv session size */
1512 sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
1513 if (sess_sz > max_sess_sz)
1514 max_sess_sz = sess_sz;
1517 * If crypto device is security capable, need to check the
1518 * size of security session as well.
1521 /* Get security context of the crypto device */
1522 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
1523 if (sec_ctx == NULL)
1526 /* Get size of security session */
1527 sess_sz = rte_security_session_get_size(sec_ctx);
1528 if (sess_sz > max_sess_sz)
1529 max_sess_sz = sess_sz;
1531 RTE_ETH_FOREACH_DEV(port_id) {
1534 if ((enabled_port_mask & (1 << port_id)) == 0)
1537 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
1538 if (sec_ctx == NULL)
1541 sess_sz = rte_security_session_get_size(sec_ctx);
1542 if (sess_sz > max_sess_sz)
1543 max_sess_sz = sess_sz;
1547 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1548 struct rte_cryptodev_info cdev_info;
1550 if (check_cryptodev_mask((uint8_t)cdev_id))
1553 rte_cryptodev_info_get(cdev_id, &cdev_info);
1555 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1556 max_nb_qps = cdev_info.max_nb_queue_pairs;
1558 max_nb_qps = nb_lcore_params;
1562 while (qp < max_nb_qps && i < nb_lcore_params) {
1563 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1564 &lcore_params[idx]))
1567 idx = idx % nb_lcore_params;
1574 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1575 dev_conf.nb_queue_pairs = qp;
1577 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1578 if (dev_max_sess != 0 && dev_max_sess < (CDEV_MP_NB_OBJS / 2))
1579 rte_exit(EXIT_FAILURE,
1580 "Device does not support at least %u "
1581 "sessions", CDEV_MP_NB_OBJS / 2);
1583 if (!socket_ctx[dev_conf.socket_id].session_pool) {
1584 char mp_name[RTE_MEMPOOL_NAMESIZE];
1585 struct rte_mempool *sess_mp;
1587 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1588 "sess_mp_%u", dev_conf.socket_id);
1589 sess_mp = rte_mempool_create(mp_name,
1593 0, NULL, NULL, NULL,
1594 NULL, dev_conf.socket_id,
1596 if (sess_mp == NULL)
1597 rte_exit(EXIT_FAILURE,
1598 "Cannot create session pool on socket %d\n",
1599 dev_conf.socket_id);
1601 printf("Allocated session pool on socket %d\n",
1602 dev_conf.socket_id);
1603 socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
1606 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1607 rte_panic("Failed to initialize cryptodev %u\n",
1610 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1611 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1612 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1613 &qp_conf, dev_conf.socket_id,
1614 socket_ctx[dev_conf.socket_id].session_pool))
1615 rte_panic("Failed to setup queue %u for "
1616 "cdev_id %u\n", 0, cdev_id);
1618 if (rte_cryptodev_start(cdev_id))
1619 rte_panic("Failed to start cryptodev %u\n",
1623 /* create session pools for eth devices that implement security */
1624 RTE_ETH_FOREACH_DEV(port_id) {
1625 if ((enabled_port_mask & (1 << port_id)) &&
1626 rte_eth_dev_get_sec_ctx(port_id)) {
1627 int socket_id = rte_eth_dev_socket_id(port_id);
1629 if (!socket_ctx[socket_id].session_pool) {
1630 char mp_name[RTE_MEMPOOL_NAMESIZE];
1631 struct rte_mempool *sess_mp;
1633 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1634 "sess_mp_%u", socket_id);
1635 sess_mp = rte_mempool_create(mp_name,
1639 0, NULL, NULL, NULL,
1642 if (sess_mp == NULL)
1643 rte_exit(EXIT_FAILURE,
1644 "Cannot create session pool "
1645 "on socket %d\n", socket_id);
1647 printf("Allocated session pool "
1648 "on socket %d\n", socket_id);
1649 socket_ctx[socket_id].session_pool = sess_mp;
1661 port_init(uint16_t portid)
1663 struct rte_eth_dev_info dev_info;
1664 struct rte_eth_txconf *txconf;
1665 uint16_t nb_tx_queue, nb_rx_queue;
1666 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1667 int32_t ret, socket_id;
1668 struct lcore_conf *qconf;
1669 struct ether_addr ethaddr;
1670 struct rte_eth_conf local_port_conf = port_conf;
1672 rte_eth_dev_info_get(portid, &dev_info);
1674 printf("Configuring device port %u:\n", portid);
1676 rte_eth_macaddr_get(portid, ðaddr);
1677 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr);
1678 print_ethaddr("Address: ", ðaddr);
1681 nb_rx_queue = get_port_nb_rx_queues(portid);
1682 nb_tx_queue = nb_lcores;
1684 if (nb_rx_queue > dev_info.max_rx_queues)
1685 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1686 "(max rx queue is %u)\n",
1687 nb_rx_queue, dev_info.max_rx_queues);
1689 if (nb_tx_queue > dev_info.max_tx_queues)
1690 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1691 "(max tx queue is %u)\n",
1692 nb_tx_queue, dev_info.max_tx_queues);
1694 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1695 nb_rx_queue, nb_tx_queue);
1698 local_port_conf.rxmode.max_rx_pkt_len = frame_size;
1699 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1702 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
1703 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY;
1704 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
1705 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY;
1706 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1707 local_port_conf.txmode.offloads |=
1708 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1710 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1711 dev_info.flow_type_rss_offloads;
1712 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1713 port_conf.rx_adv_conf.rss_conf.rss_hf) {
1714 printf("Port %u modified RSS hash function based on hardware support,"
1715 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1717 port_conf.rx_adv_conf.rss_conf.rss_hf,
1718 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1721 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1724 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1725 "err=%d, port=%d\n", ret, portid);
1727 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
1729 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
1730 "err=%d, port=%d\n", ret, portid);
1732 /* init one TX queue per lcore */
1734 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1735 if (rte_lcore_is_enabled(lcore_id) == 0)
1739 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1744 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1746 txconf = &dev_info.default_txconf;
1747 txconf->offloads = local_port_conf.txmode.offloads;
1749 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
1752 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1753 "err=%d, port=%d\n", ret, portid);
1755 qconf = &lcore_conf[lcore_id];
1756 qconf->tx_queue_id[portid] = tx_queueid;
1759 /* init RX queues */
1760 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
1761 struct rte_eth_rxconf rxq_conf;
1763 if (portid != qconf->rx_queue_list[queue].port_id)
1766 rx_queueid = qconf->rx_queue_list[queue].queue_id;
1768 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
1771 rxq_conf = dev_info.default_rxconf;
1772 rxq_conf.offloads = local_port_conf.rxmode.offloads;
1773 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
1774 nb_rxd, socket_id, &rxq_conf,
1775 socket_ctx[socket_id].mbuf_pool);
1777 rte_exit(EXIT_FAILURE,
1778 "rte_eth_rx_queue_setup: err=%d, "
1779 "port=%d\n", ret, portid);
1786 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
1789 uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
1790 RTE_MBUF_DEFAULT_BUF_SIZE;
1793 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
1794 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
1795 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
1798 if (ctx->mbuf_pool == NULL)
1799 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
1802 printf("Allocated mbuf pool on socket %d\n", socket_id);
1806 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
1808 struct ipsec_sa *sa;
1810 /* For inline protocol processing, the metadata in the event will
1811 * uniquely identify the security session which raised the event.
1812 * Application would then need the userdata it had registered with the
1813 * security session to process the event.
1816 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
1819 /* userdata could not be retrieved */
1823 /* Sequence number over flow. SA need to be re-established */
1829 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
1830 void *param, void *ret_param)
1833 struct rte_eth_event_ipsec_desc *event_desc = NULL;
1834 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
1835 rte_eth_dev_get_sec_ctx(port_id);
1837 RTE_SET_USED(param);
1839 if (type != RTE_ETH_EVENT_IPSEC)
1842 event_desc = ret_param;
1843 if (event_desc == NULL) {
1844 printf("Event descriptor not set\n");
1848 md = event_desc->metadata;
1850 if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
1851 return inline_ipsec_event_esn_overflow(ctx, md);
1852 else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
1853 printf("Invalid IPsec event reported\n");
1861 main(int32_t argc, char **argv)
1869 ret = rte_eal_init(argc, argv);
1871 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1875 /* parse application arguments (after the EAL ones) */
1876 ret = parse_args(argc, argv);
1878 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
1880 if ((unprotected_port_mask & enabled_port_mask) !=
1881 unprotected_port_mask)
1882 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
1883 unprotected_port_mask);
1885 if (check_params() < 0)
1886 rte_exit(EXIT_FAILURE, "check_params failed\n");
1888 ret = init_lcore_rx_queues();
1890 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1892 nb_lcores = rte_lcore_count();
1894 /* Replicate each context per socket */
1895 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1896 if (rte_lcore_is_enabled(lcore_id) == 0)
1900 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1904 if (socket_ctx[socket_id].mbuf_pool)
1907 sa_init(&socket_ctx[socket_id], socket_id);
1909 sp4_init(&socket_ctx[socket_id], socket_id);
1911 sp6_init(&socket_ctx[socket_id], socket_id);
1913 rt_init(&socket_ctx[socket_id], socket_id);
1915 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
1918 RTE_ETH_FOREACH_DEV(portid) {
1919 if ((enabled_port_mask & (1 << portid)) == 0)
1928 RTE_ETH_FOREACH_DEV(portid) {
1929 if ((enabled_port_mask & (1 << portid)) == 0)
1933 ret = rte_eth_dev_start(portid);
1935 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1936 "err=%d, port=%d\n", ret, portid);
1938 * If enabled, put device in promiscuous mode.
1939 * This allows IO forwarding mode to forward packets
1940 * to itself through 2 cross-connected ports of the
1944 rte_eth_promiscuous_enable(portid);
1946 rte_eth_dev_callback_register(portid,
1947 RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
1950 check_all_ports_link_status(enabled_port_mask);
1952 /* launch per-lcore init on every lcore */
1953 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1954 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1955 if (rte_eal_wait_lcore(lcore_id) < 0)