2 * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev
4 * Copyright (c) 2017 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/udp/udp.h>
25 #include <dpdk/buffer.h>
26 #include <dpdk/ipsec/ipsec.h>
27 #include <dpdk/device/dpdk.h>
28 #include <dpdk/device/dpdk_priv.h>
30 #define foreach_esp_encrypt_next \
31 _(DROP, "error-drop") \
32 _(IP4_LOOKUP, "ip4-lookup") \
33 _(IP6_LOOKUP, "ip6-lookup") \
34 _(INTERFACE_OUTPUT, "interface-output")
36 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
39 foreach_esp_encrypt_next
44 #define foreach_esp_encrypt_error \
45 _(RX_PKTS, "ESP pkts received") \
46 _(SEQ_CYCLED, "Sequence number cycled") \
47 _(ENQ_FAIL, "Enqueue failed to crypto device") \
48 _(DISCARD, "Not enough crypto operations, discarding frame") \
49 _(SESSION, "Failed to get crypto session") \
50 _(NOSUP, "Cipher/Auth not supported")
55 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
56 foreach_esp_encrypt_error
59 } esp_encrypt_error_t;
61 static char *esp_encrypt_error_strings[] = {
62 #define _(sym,string) string,
63 foreach_esp_encrypt_error
67 extern vlib_node_registration_t dpdk_esp4_encrypt_node;
68 extern vlib_node_registration_t dpdk_esp6_encrypt_node;
72 ipsec_crypto_alg_t crypto_alg;
73 ipsec_integ_alg_t integ_alg;
75 } esp_encrypt_trace_t;
77 /* packet trace format function */
79 format_esp_encrypt_trace (u8 * s, va_list * args)
81 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
82 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
83 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
84 ip4_header_t *ih4 = (ip4_header_t *) t->packet_data;
85 u32 indent = format_get_indent (s), offset;
87 s = format (s, "cipher %U auth %U\n",
88 format_ipsec_crypto_alg, t->crypto_alg,
89 format_ipsec_integ_alg, t->integ_alg);
91 if ((ih4->ip_version_and_header_length & 0xF0) == 0x60)
93 s = format (s, "%U%U", format_white_space, indent,
94 format_ip6_header, ih4);
95 offset = sizeof (ip6_header_t);
99 s = format (s, "%U%U", format_white_space, indent,
100 format_ip4_header, ih4);
101 offset = ip4_header_bytes (ih4);
104 s = format (s, "\n%U%U", format_white_space, indent,
105 format_esp_header, t->packet_data + offset);
111 dpdk_esp_encrypt_inline (vlib_main_t * vm,
112 vlib_node_runtime_t * node,
113 vlib_frame_t * from_frame, int is_ip6, int is_tun)
115 u32 n_left_from, *from, *to_next, next_index, thread_index;
116 ipsec_main_t *im = &ipsec_main;
117 vnet_main_t *vnm = im->vnet_main;
118 vnet_interface_main_t *vim = &vnm->interface_main;
119 u32 thread_idx = vlib_get_thread_index ();
120 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
121 crypto_resource_t *res = 0;
123 crypto_alg_t *cipher_alg = 0, *auth_alg = 0;
124 struct rte_cryptodev_sym_session *session = 0;
125 u32 ret, last_sa_index = ~0;
126 u8 numa = rte_socket_id ();
128 crypto_worker_main_t *cwm =
129 vec_elt_at_index (dcm->workers_main, thread_idx);
130 struct rte_crypto_op **ops = cwm->ops;
132 from = vlib_frame_vector_args (from_frame);
133 n_left_from = from_frame->n_vectors;
134 thread_index = vm->thread_index;
136 ret = crypto_alloc_ops (numa, ops, n_left_from);
140 vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index,
141 ESP_ENCRYPT_ERROR_DISCARD, 1);
143 vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index,
144 ESP_ENCRYPT_ERROR_DISCARD, 1);
145 /* Discard whole frame */
149 next_index = ESP_ENCRYPT_NEXT_DROP;
151 while (n_left_from > 0)
155 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
157 while (n_left_from > 0 && n_left_to_next > 0)
161 vlib_buffer_t *b0, *b1;
163 ip4_and_esp_header_t *ih0, *oh0 = 0;
164 ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
165 ip4_and_udp_and_esp_header_t *ouh0 = 0;
173 u16 udp_encap_adv = 0;
174 struct rte_mbuf *mb0;
175 struct rte_crypto_op *op;
182 b0 = vlib_get_buffer (vm, bi0);
183 ih0 = vlib_buffer_get_current (b0);
184 mb0 = rte_mbuf_from_vlib_buffer (b0);
187 CLIB_PREFETCH (ih0, sizeof (ih6_0[0]), LOAD);
189 CLIB_PREFETCH (vlib_buffer_get_tail (b0), 20, STORE);
191 CLIB_PREFETCH (mb0, CLIB_CACHE_LINE_BYTES, STORE);
196 b1 = vlib_get_buffer (vm, bi1);
198 CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD);
199 CLIB_PREFETCH (b1->data - CLIB_CACHE_LINE_BYTES,
200 CLIB_CACHE_LINE_BYTES, STORE);
205 ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
207 dpdk_op_priv_t *priv = crypto_op_get_priv (op);
208 /* store bi in op private */
213 sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]);
214 CLIB_PREFETCH (op, op_len, STORE);
219 /* we are on a ipsec tunnel's feature arc */
220 sa_index0 = *(u32 *) vnet_feature_next_with_data (&tmp, b0,
225 sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
227 if (sa_index0 != last_sa_index)
229 sa0 = pool_elt_at_index (im->sad, sa_index0);
232 vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg);
233 auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg);
235 is_aead = (cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD);
238 auth_alg = cipher_alg;
240 res_idx = get_resource (cwm, sa0);
242 if (PREDICT_FALSE (res_idx == (u16) ~ 0))
244 clib_warning ("unsupported SA by thread index %u",
247 vlib_node_increment_counter (vm,
248 dpdk_esp6_encrypt_node.index,
249 ESP_ENCRYPT_ERROR_NOSUP, 1);
251 vlib_node_increment_counter (vm,
252 dpdk_esp4_encrypt_node.index,
253 ESP_ENCRYPT_ERROR_NOSUP, 1);
259 res = vec_elt_at_index (dcm->resource, res_idx);
261 error = crypto_get_session (&session, sa_index0, res, cwm, 1);
262 if (PREDICT_FALSE (error || !session))
264 clib_warning ("failed to get crypto session");
266 vlib_node_increment_counter (vm,
267 dpdk_esp6_encrypt_node.index,
268 ESP_ENCRYPT_ERROR_SESSION,
271 vlib_node_increment_counter (vm,
272 dpdk_esp4_encrypt_node.index,
273 ESP_ENCRYPT_ERROR_SESSION,
281 last_sa_index = sa_index0;
284 if (PREDICT_FALSE (esp_seq_advance (sa0)))
287 ("sequence number counter has cycled SPI %u (0x%08x)",
290 vlib_node_increment_counter (vm,
291 dpdk_esp6_encrypt_node.index,
292 ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
294 vlib_node_increment_counter (vm,
295 dpdk_esp4_encrypt_node.index,
296 ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
304 orig_sz = b0->current_length;
306 /* TODO multi-seg support - total_length_not_including_first_buffer */
307 vlib_increment_combined_counter
308 (&ipsec_sa_counters, thread_index, sa_index0,
309 1, b0->current_length);
311 /* Update tunnel interface tx counters */
313 vlib_increment_combined_counter
314 (vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
315 thread_index, vnet_buffer (b0)->sw_if_index[VLIB_TX],
316 1, b0->current_length);
318 res->ops[res->n_ops] = op;
319 res->bi[res->n_ops] = bi0;
322 dpdk_gcm_cnt_blk *icb = &priv->cb;
324 crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi);
326 iv_size = cipher_alg->iv_len;
327 trunc_size = auth_alg->trunc_size;
329 /* if UDP encapsulation is used adjust the address of the IP header */
330 if (ipsec_sa_is_set_UDP_ENCAP (sa0) && !is_ip6)
331 udp_encap_adv = sizeof (udp_header_t);
333 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
336 if (!is_ip6 && !ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)) /* ip4inip4 */
338 /* in tunnel mode send it back to FIB */
339 priv->next = DPDK_CRYPTO_INPUT_NEXT_IP4_LOOKUP;
340 u8 adv = sizeof (ip4_header_t) + udp_encap_adv +
341 sizeof (esp_header_t) + iv_size;
342 vlib_buffer_advance (b0, -adv);
343 oh0 = vlib_buffer_get_current (b0);
344 ouh0 = vlib_buffer_get_current (b0);
345 next_hdr_type = IP_PROTOCOL_IP_IN_IP;
347 * oh0->ip4.ip_version_and_header_length = 0x45;
348 * oh0->ip4.tos = ih0->ip4.tos;
349 * oh0->ip4.fragment_id = 0;
350 * oh0->ip4.flags_and_fragment_offset = 0;
352 oh0->ip4.checksum_data_64[0] =
353 clib_host_to_net_u64 (0x45ULL << 56);
355 * oh0->ip4.ttl = 254;
356 * oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
358 oh0->ip4.checksum_data_32[2] =
359 clib_host_to_net_u32 (0xfe320000);
361 oh0->ip4.src_address.as_u32 =
362 sa0->tunnel_src_addr.ip4.as_u32;
363 oh0->ip4.dst_address.as_u32 =
364 sa0->tunnel_dst_addr.ip4.as_u32;
366 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
368 oh0->ip4.protocol = IP_PROTOCOL_UDP;
373 esp0->spi = clib_host_to_net_u32 (sa0->spi);
374 esp0->seq = clib_host_to_net_u32 (sa0->seq);
376 else if (is_ip6 && ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
379 /* in tunnel mode send it back to FIB */
380 priv->next = DPDK_CRYPTO_INPUT_NEXT_IP6_LOOKUP;
383 sizeof (ip6_header_t) + sizeof (esp_header_t) + iv_size;
384 vlib_buffer_advance (b0, -adv);
385 ih6_0 = (ip6_and_esp_header_t *) ih0;
386 oh6_0 = vlib_buffer_get_current (b0);
388 next_hdr_type = IP_PROTOCOL_IPV6;
390 oh6_0->ip6.ip_version_traffic_class_and_flow_label =
391 ih6_0->ip6.ip_version_traffic_class_and_flow_label;
393 oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
394 oh6_0->ip6.hop_limit = 254;
395 oh6_0->ip6.src_address.as_u64[0] =
396 sa0->tunnel_src_addr.ip6.as_u64[0];
397 oh6_0->ip6.src_address.as_u64[1] =
398 sa0->tunnel_src_addr.ip6.as_u64[1];
399 oh6_0->ip6.dst_address.as_u64[0] =
400 sa0->tunnel_dst_addr.ip6.as_u64[0];
401 oh6_0->ip6.dst_address.as_u64[1] =
402 sa0->tunnel_dst_addr.ip6.as_u64[1];
404 oh6_0->esp.spi = clib_host_to_net_u32 (sa0->spi);
405 oh6_0->esp.seq = clib_host_to_net_u32 (sa0->seq);
407 else /* unsupported ip4inip6, ip6inip4 */
410 vlib_node_increment_counter (vm,
411 dpdk_esp6_encrypt_node.index,
412 ESP_ENCRYPT_ERROR_NOSUP, 1);
414 vlib_node_increment_counter (vm,
415 dpdk_esp4_encrypt_node.index,
416 ESP_ENCRYPT_ERROR_NOSUP, 1);
422 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
424 else /* transport mode */
426 priv->next = DPDK_CRYPTO_INPUT_NEXT_INTERFACE_OUTPUT;
427 rewrite_len = vnet_buffer (b0)->ip.save_rewrite_length;
428 u16 adv = sizeof (esp_header_t) + iv_size + udp_encap_adv;
429 vlib_buffer_advance (b0, -adv - rewrite_len);
430 u8 *src = ((u8 *) ih0) - rewrite_len;
431 u8 *dst = vlib_buffer_get_current (b0);
432 oh0 = vlib_buffer_get_current (b0) + rewrite_len;
436 orig_sz -= sizeof (ip6_header_t);
437 ih6_0 = (ip6_and_esp_header_t *) ih0;
438 next_hdr_type = ih6_0->ip6.protocol;
439 memmove (dst, src, rewrite_len + sizeof (ip6_header_t));
440 oh6_0 = (ip6_and_esp_header_t *) oh0;
441 oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
446 u16 ip_size = ip4_header_bytes (&ih0->ip4);
448 next_hdr_type = ih0->ip4.protocol;
449 memmove (dst, src, rewrite_len + ip_size);
450 oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
451 esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size);
452 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
454 oh0->ip4.protocol = IP_PROTOCOL_UDP;
455 esp0 = (esp_header_t *)
456 (((u8 *) oh0) + ip_size + udp_encap_adv);
460 oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
461 esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size);
464 esp0->spi = clib_host_to_net_u32 (sa0->spi);
465 esp0->seq = clib_host_to_net_u32 (sa0->seq);
468 if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0)
470 ouh0->udp.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
471 ouh0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
472 ouh0->udp.checksum = 0;
474 ASSERT (is_pow2 (cipher_alg->boundary));
475 u16 mask = cipher_alg->boundary - 1;
476 u16 pad_payload_len = ((orig_sz + 2) + mask) & ~mask;
477 u8 pad_bytes = pad_payload_len - 2 - orig_sz;
480 vlib_buffer_put_uninit (b0, pad_bytes + 2 + trunc_size);
482 /* The extra pad bytes would be overwritten by the digest */
484 clib_memcpy_fast (padding, pad_data, 16);
486 f0 = (esp_footer_t *) (padding + pad_bytes);
487 f0->pad_length = pad_bytes;
488 f0->next_header = next_hdr_type;
492 u16 len = b0->current_length - sizeof (ip6_header_t);
493 oh6_0->ip6.payload_length =
494 clib_host_to_net_u16 (len - rewrite_len);
499 clib_host_to_net_u16 (b0->current_length - rewrite_len);
500 oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
501 if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0)
504 clib_host_to_net_u16 (clib_net_to_host_u16
506 ip4_header_bytes (&ouh0->ip4));
510 b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
512 /* mbuf packet starts at ESP header */
513 mb0->data_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0);
514 mb0->pkt_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0);
515 mb0->data_off = ((void *) esp0) - mb0->buf_addr;
517 u32 cipher_off, cipher_len, auth_len = 0;
520 u8 *digest = vlib_buffer_get_tail (b0) - trunc_size;
522 mb0->buf_physaddr + digest - ((u8 *) mb0->buf_addr);
524 if (!is_aead && cipher_alg->alg == RTE_CRYPTO_CIPHER_AES_CBC)
526 cipher_off = sizeof (esp_header_t);
527 cipher_len = iv_size + pad_payload_len;
531 u32 *esp_iv = (u32 *) (esp0 + 1);
532 esp_iv[0] = sa0->seq;
533 esp_iv[1] = sa0->seq_hi;
535 cipher_off = sizeof (esp_header_t) + iv_size;
536 cipher_len = pad_payload_len;
541 aad = (u32 *) priv->aad;
542 aad[0] = clib_host_to_net_u32 (sa0->spi);
543 aad[1] = clib_host_to_net_u32 (sa0->seq);
545 /* aad[3] should always be 0 */
546 if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0)))
547 aad[2] = clib_host_to_net_u32 (sa0->seq_hi);
554 vlib_buffer_get_tail (b0) - ((u8 *) esp0) - trunc_size;
555 if (ipsec_sa_is_set_USE_ESN (sa0))
557 u32 *_digest = (u32 *) digest;
558 _digest[0] = clib_host_to_net_u32 (sa0->seq_hi);
563 crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len,
564 0, auth_len, (u8 *) aad, digest, digest_paddr);
567 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
569 esp_encrypt_trace_t *tr =
570 vlib_add_trace (vm, node, b0, sizeof (*tr));
571 tr->crypto_alg = sa0->crypto_alg;
572 tr->integ_alg = sa0->integ_alg;
573 u8 *p = vlib_buffer_get_current (b0);
574 if (!ipsec_sa_is_set_IS_TUNNEL (sa0))
575 p += vnet_buffer (b0)->ip.save_rewrite_length;
576 clib_memcpy_fast (tr->packet_data, p, sizeof (tr->packet_data));
579 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
583 vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index,
584 ESP_ENCRYPT_ERROR_RX_PKTS,
585 from_frame->n_vectors);
587 crypto_enqueue_ops (vm, cwm, dpdk_esp6_encrypt_node.index,
588 ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ );
592 vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index,
593 ESP_ENCRYPT_ERROR_RX_PKTS,
594 from_frame->n_vectors);
596 crypto_enqueue_ops (vm, cwm, dpdk_esp4_encrypt_node.index,
597 ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ );
600 crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops);
602 return from_frame->n_vectors;
605 VLIB_NODE_FN (dpdk_esp4_encrypt_node) (vlib_main_t * vm,
606 vlib_node_runtime_t * node,
607 vlib_frame_t * from_frame)
609 return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 0);
613 VLIB_REGISTER_NODE (dpdk_esp4_encrypt_node) = {
614 .name = "dpdk-esp4-encrypt",
615 .flags = VLIB_NODE_FLAG_IS_OUTPUT,
616 .vector_size = sizeof (u32),
617 .format_trace = format_esp_encrypt_trace,
618 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
619 .error_strings = esp_encrypt_error_strings,
623 [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
628 VLIB_NODE_FN (dpdk_esp6_encrypt_node) (vlib_main_t * vm,
629 vlib_node_runtime_t * node,
630 vlib_frame_t * from_frame)
632 return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 0);
636 VLIB_REGISTER_NODE (dpdk_esp6_encrypt_node) = {
637 .name = "dpdk-esp6-encrypt",
638 .flags = VLIB_NODE_FLAG_IS_OUTPUT,
639 .vector_size = sizeof (u32),
640 .format_trace = format_esp_encrypt_trace,
641 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
642 .error_strings = esp_encrypt_error_strings,
646 [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
651 VLIB_NODE_FN (dpdk_esp4_encrypt_tun_node) (vlib_main_t * vm,
652 vlib_node_runtime_t * node,
653 vlib_frame_t * from_frame)
655 return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 1);
659 VLIB_REGISTER_NODE (dpdk_esp4_encrypt_tun_node) = {
660 .name = "dpdk-esp4-encrypt-tun",
661 .flags = VLIB_NODE_FLAG_IS_OUTPUT,
662 .vector_size = sizeof (u32),
663 .format_trace = format_esp_encrypt_trace,
664 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
665 .error_strings = esp_encrypt_error_strings,
669 [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
673 VNET_FEATURE_INIT (dpdk_esp4_encrypt_tun_feat_node, static) =
675 .arc_name = "ip4-output",
676 .node_name = "dpdk-esp4-encrypt-tun",
677 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
681 VLIB_NODE_FN (dpdk_esp6_encrypt_tun_node) (vlib_main_t * vm,
682 vlib_node_runtime_t * node,
683 vlib_frame_t * from_frame)
685 return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 1);
689 VLIB_REGISTER_NODE (dpdk_esp6_encrypt_tun_node) = {
690 .name = "dpdk-esp6-encrypt-tun",
691 .flags = VLIB_NODE_FLAG_IS_OUTPUT,
692 .vector_size = sizeof (u32),
693 .format_trace = format_esp_encrypt_trace,
694 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
695 .error_strings = esp_encrypt_error_strings,
699 [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
703 VNET_FEATURE_INIT (dpdk_esp6_encrypt_tun_feat_node, static) =
705 .arc_name = "ip6-output",
706 .node_name = "dpdk-esp6-encrypt-tun",
707 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
712 * fd.io coding-style-patch-verification: ON
715 * eval: (c-set-style "gnu")