2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
25 #include <vnet/ipsec/ipsec_tun.h>
27 #include <vnet/gre/gre.h>
29 #define foreach_esp_decrypt_next \
30 _(DROP, "error-drop") \
31 _(IP4_INPUT, "ip4-input-no-checksum") \
32 _(IP6_INPUT, "ip6-input") \
33 _(L2_INPUT, "l2-input") \
36 #define _(v, s) ESP_DECRYPT_NEXT_##v,
39 foreach_esp_decrypt_next
45 #define foreach_esp_decrypt_error \
46 _(RX_PKTS, "ESP pkts received") \
47 _(DECRYPTION_FAILED, "ESP decryption failed") \
48 _(INTEG_ERROR, "Integrity check failed") \
49 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
50 _(REPLAY, "SA replayed packet") \
51 _(RUNT, "undersized packet") \
52 _(CHAINED_BUFFER, "chained buffers (packet dropped)") \
53 _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
54 _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
55 _(TUN_NO_PROTO, "no tunnel protocol") \
60 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
61 foreach_esp_decrypt_error
64 } esp_decrypt_error_t;
66 static char *esp_decrypt_error_strings[] = {
67 #define _(sym,string) string,
68 foreach_esp_decrypt_error
77 ipsec_crypto_alg_t crypto_alg;
78 ipsec_integ_alg_t integ_alg;
79 } esp_decrypt_trace_t;
81 /* packet trace format function */
83 format_esp_decrypt_trace (u8 * s, va_list * args)
85 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
86 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
87 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
91 "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u",
92 format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
93 t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi);
105 ipsec_sa_flags_t flags;
115 } esp_decrypt_packet_data_t;
117 STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64));
119 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
122 esp_decrypt_inline (vlib_main_t * vm,
123 vlib_node_runtime_t * node, vlib_frame_t * from_frame,
124 int is_ip6, int is_tun)
126 ipsec_main_t *im = &ipsec_main;
127 u32 thread_index = vm->thread_index;
128 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
130 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
131 u32 *from = vlib_frame_vector_args (from_frame);
132 u32 n, n_left = from_frame->n_vectors;
133 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
134 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
135 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
136 esp_decrypt_packet_data_t cpd = { };
137 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
138 const u8 esp_sz = sizeof (esp_header_t);
141 vlib_get_buffers (vm, from, b, n_left);
142 vec_reset_length (ptd->crypto_ops);
143 vec_reset_length (ptd->integ_ops);
144 clib_memset_u16 (nexts, -1, n_left);
153 vlib_prefetch_buffer_header (b[2], LOAD);
154 p = vlib_buffer_get_current (b[1]);
155 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
156 p -= CLIB_CACHE_LINE_BYTES;
157 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
160 if (vlib_buffer_chain_linearize (vm, b[0]) != 1)
162 b[0]->error = node->errors[ESP_DECRYPT_ERROR_CHAINED_BUFFER];
163 next[0] = ESP_DECRYPT_NEXT_DROP;
167 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
170 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
174 current_sa_bytes = current_sa_pkts = 0;
176 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
177 sa0 = pool_elt_at_index (im->sad, current_sa_index);
178 cpd.icv_sz = sa0->integ_icv_size;
179 cpd.iv_sz = sa0->crypto_iv_size;
180 cpd.flags = sa0->flags;
181 cpd.sa_index = current_sa_index;
184 if (PREDICT_FALSE (~0 == sa0->decrypt_thread_index))
186 /* this is the first packet to use this SA, claim the SA
187 * for this thread. this could happen simultaneously on
189 clib_atomic_cmp_and_swap (&sa0->decrypt_thread_index, ~0,
190 ipsec_sa_assign_thread (thread_index));
193 if (PREDICT_TRUE (thread_index != sa0->decrypt_thread_index))
195 next[0] = ESP_DECRYPT_NEXT_HANDOFF;
199 /* store packet data for next round for easier prefetch */
200 pd->sa_data = cpd.sa_data;
201 pd->current_data = b[0]->current_data;
202 pd->current_length = b[0]->current_length;
203 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
204 payload = b[0]->data + pd->current_data;
205 pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
207 /* we need 4 extra bytes for HMAC calculation when ESN are used */
208 if (ipsec_sa_is_set_USE_ESN (sa0) && pd->icv_sz &&
209 (pd->current_data + pd->current_length + 4 > buffer_data_size))
211 b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_TAIL_SPACE];
212 next[0] = ESP_DECRYPT_NEXT_DROP;
216 /* anti-reply check */
217 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
219 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
220 next[0] = ESP_DECRYPT_NEXT_DROP;
224 if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
226 b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
227 next[0] = ESP_DECRYPT_NEXT_DROP;
231 len = pd->current_length - cpd.icv_sz;
232 current_sa_pkts += 1;
233 current_sa_bytes += pd->current_length;
235 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
237 vnet_crypto_op_t *op;
238 vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
240 vnet_crypto_op_init (op, sa0->integ_op_id);
241 op->key_index = sa0->integ_key_index;
243 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
244 op->user_data = b - bufs;
245 op->digest = payload + len;
246 op->digest_len = cpd.icv_sz;
248 if (ipsec_sa_is_set_USE_ESN (sa0))
250 /* shift ICV by 4 bytes to insert ESN */
251 u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
252 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi);
253 clib_memcpy_fast (tmp, payload + len, ESP_MAX_ICV_SIZE);
254 clib_memcpy_fast (payload + len, &seq_hi, sz);
255 clib_memcpy_fast (payload + len + sz, tmp, ESP_MAX_ICV_SIZE);
264 if (sa0->crypto_enc_op_id != VNET_CRYPTO_OP_NONE)
266 vnet_crypto_op_t *op;
267 vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
268 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
269 op->key_index = sa0->crypto_key_index;
272 if (ipsec_sa_is_set_IS_AEAD (sa0))
279 * construct the AAD and the nonce (Salt || IV) in a scratch
280 * space in front of the IP header.
282 scratch = payload - esp_sz;
283 esp0 = (esp_header_t *) (scratch);
285 scratch -= (sizeof (*aad) + pd->hdr_sz);
288 esp_aad_fill (op, esp0, sa0);
291 * we don't need to refer to the ESP header anymore so we
292 * can overwrite it with the salt and use the IV where it is
293 * to form the nonce = (Salt + IV)
295 op->iv -= sizeof (sa0->salt);
296 clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt));
298 op->tag = payload + len;
301 op->src = op->dst = payload += cpd.iv_sz;
302 op->len = len - cpd.iv_sz;
303 op->user_data = b - bufs;
314 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
315 current_sa_index, current_sa_pkts,
318 if ((n = vec_len (ptd->integ_ops)))
320 vnet_crypto_op_t *op = ptd->integ_ops;
321 n -= vnet_crypto_process_ops (vm, op, n);
324 ASSERT (op - ptd->integ_ops < vec_len (ptd->integ_ops));
325 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
327 u32 err, bi = op->user_data;
328 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
329 err = ESP_DECRYPT_ERROR_INTEG_ERROR;
331 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
332 bufs[bi]->error = node->errors[err];
333 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
339 if ((n = vec_len (ptd->crypto_ops)))
341 vnet_crypto_op_t *op = ptd->crypto_ops;
342 n -= vnet_crypto_process_ops (vm, op, n);
345 ASSERT (op - ptd->crypto_ops < vec_len (ptd->crypto_ops));
346 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
352 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
353 err = ESP_DECRYPT_ERROR_DECRYPTION_FAILED;
355 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
357 bufs[bi]->error = node->errors[err];
358 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
365 /* Post decryption ronud - adjust packet data start and length and next
368 n_left = from_frame->n_vectors;
375 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL |
376 IPSEC_SA_FLAG_IS_TUNNEL_V6;
380 void *data = b[1]->data + pd[1].current_data;
382 /* buffer metadata */
383 vlib_prefetch_buffer_header (b[1], LOAD);
386 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
387 CLIB_CACHE_LINE_BYTES, LOAD);
390 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
391 CLIB_CACHE_LINE_BYTES * 2, LOAD);
394 if (next[0] < ESP_DECRYPT_N_NEXT)
397 sa0 = vec_elt_at_index (im->sad, pd->sa_index);
400 * redo the anti-reply check
401 * in this frame say we have sequence numbers, s, s+1, s+1, s+1
402 * and s and s+1 are in the window. When we did the anti-replay
403 * check above we did so against the state of the window (W),
404 * after packet s-1. So each of the packets in the sequence will be
406 * This time s will be cheked against Ws-1, s+1 chceked against Ws
407 * (i.e. the window state is updated/advnaced)
408 * so this time the successive s+! packet will be dropped.
409 * This is a consequence of batching the decrypts. If the
410 * check-dcrypt-advance process was done for each packet it would
411 * be fine. But we batch the decrypts because it's much more efficient
412 * to do so in SW and if we offload to HW and the process is async.
414 * You're probably thinking, but this means an attacker can send the
415 * above sequence and cause VPP to perform decrpyts that will fail,
416 * and that's true. But if the attacker can determine s (a valid
417 * sequence number in the window) which is non-trivial, it can generate
418 * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
419 * implementation, sequential or batching, from decrypting these.
421 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
423 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
424 next[0] = ESP_DECRYPT_NEXT_DROP;
428 ipsec_sa_anti_replay_advance (sa0, pd->seq);
430 esp_footer_t *f = (esp_footer_t *) (b[0]->data + pd->current_data +
431 pd->current_length - sizeof (*f) -
433 u16 adv = pd->iv_sz + esp_sz;
434 u16 tail = sizeof (esp_footer_t) + f->pad_length + pd->icv_sz;
436 if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
438 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
439 sizeof (udp_header_t) : 0;
440 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
441 u8 *old_ip = b[0]->data + pd->current_data - ip_hdr_sz - udp_sz;
442 u8 *ip = old_ip + adv + udp_sz;
444 if (is_ip6 && ip_hdr_sz > 64)
445 memmove (ip, old_ip, ip_hdr_sz);
447 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
449 b[0]->current_data = pd->current_data + adv - ip_hdr_sz;
450 b[0]->current_length = pd->current_length + ip_hdr_sz - tail - adv;
454 ip6_header_t *ip6 = (ip6_header_t *) ip;
455 u16 len = clib_net_to_host_u16 (ip6->payload_length);
457 ip6->payload_length = clib_host_to_net_u16 (len);
458 ip6->protocol = f->next_header;
459 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
463 ip4_header_t *ip4 = (ip4_header_t *) ip;
464 ip_csum_t sum = ip4->checksum;
465 u16 len = clib_net_to_host_u16 (ip4->length);
466 len = clib_host_to_net_u16 (len - adv - tail - udp_sz);
467 sum = ip_csum_update (sum, ip4->protocol, f->next_header,
468 ip4_header_t, protocol);
469 sum = ip_csum_update (sum, ip4->length, len,
470 ip4_header_t, length);
471 ip4->checksum = ip_csum_fold (sum);
472 ip4->protocol = f->next_header;
474 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
479 if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
481 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
482 b[0]->current_data = pd->current_data + adv;
483 b[0]->current_length = pd->current_length - adv - tail;
485 else if (f->next_header == IP_PROTOCOL_IPV6)
487 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
488 b[0]->current_data = pd->current_data + adv;
489 b[0]->current_length = pd->current_length - adv - tail;
493 if (is_tun && f->next_header == IP_PROTOCOL_GRE)
497 b[0]->current_data = pd->current_data + adv;
498 b[0]->current_length = pd->current_length - adv - tail;
500 gre = vlib_buffer_get_current (b[0]);
502 vlib_buffer_advance (b[0], sizeof (*gre));
504 switch (clib_net_to_host_u16 (gre->protocol))
506 case GRE_PROTOCOL_teb:
507 next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
509 case GRE_PROTOCOL_ip4:
510 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
512 case GRE_PROTOCOL_ip6:
513 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
516 next[0] = ESP_DECRYPT_NEXT_DROP;
522 next[0] = ESP_DECRYPT_NEXT_DROP;
524 node->errors[ESP_DECRYPT_ERROR_DECRYPTION_FAILED];
530 if (ipsec_sa_is_set_IS_PROTECT (sa0))
533 * Check that the reveal IP header matches that
534 * of the tunnel we are protecting
536 const ipsec_tun_protect_t *itp;
538 itp = ipsec_tun_protect_get
539 (vnet_buffer (b[0])->ipsec.protect_index);
541 if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
543 const ip4_header_t *ip4;
545 ip4 = vlib_buffer_get_current (b[0]);
547 if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
548 &ip4->dst_address) ||
549 !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
552 next[0] = ESP_DECRYPT_NEXT_DROP;
554 node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
557 else if (f->next_header == IP_PROTOCOL_IPV6)
559 const ip6_header_t *ip6;
561 ip6 = vlib_buffer_get_current (b[0]);
563 if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
564 &ip6->dst_address) ||
565 !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
568 next[0] = ESP_DECRYPT_NEXT_DROP;
570 node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
578 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
580 esp_decrypt_trace_t *tr;
581 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
582 sa0 = pool_elt_at_index (im->sad,
583 vnet_buffer (b[0])->ipsec.sad_index);
584 tr->crypto_alg = sa0->crypto_alg;
585 tr->integ_alg = sa0->integ_alg;
587 tr->sa_seq = sa0->last_seq;
588 tr->sa_seq_hi = sa0->seq_hi;
598 n_left = from_frame->n_vectors;
599 vlib_node_increment_counter (vm, node->node_index,
600 ESP_DECRYPT_ERROR_RX_PKTS, n_left);
602 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
608 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
609 vlib_node_runtime_t * node,
610 vlib_frame_t * from_frame)
612 return esp_decrypt_inline (vm, node, from_frame, 0, 0);
615 VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
616 vlib_node_runtime_t * node,
617 vlib_frame_t * from_frame)
619 return esp_decrypt_inline (vm, node, from_frame, 0, 1);
622 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
623 vlib_node_runtime_t * node,
624 vlib_frame_t * from_frame)
626 return esp_decrypt_inline (vm, node, from_frame, 1, 0);
629 VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
630 vlib_node_runtime_t * node,
631 vlib_frame_t * from_frame)
633 return esp_decrypt_inline (vm, node, from_frame, 1, 1);
637 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
638 .name = "esp4-decrypt",
639 .vector_size = sizeof (u32),
640 .format_trace = format_esp_decrypt_trace,
641 .type = VLIB_NODE_TYPE_INTERNAL,
643 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
644 .error_strings = esp_decrypt_error_strings,
646 .n_next_nodes = ESP_DECRYPT_N_NEXT,
648 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
649 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
650 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
651 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
652 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
656 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
657 .name = "esp6-decrypt",
658 .vector_size = sizeof (u32),
659 .format_trace = format_esp_decrypt_trace,
660 .type = VLIB_NODE_TYPE_INTERNAL,
662 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
663 .error_strings = esp_decrypt_error_strings,
665 .n_next_nodes = ESP_DECRYPT_N_NEXT,
667 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
668 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
669 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
670 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
671 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
675 VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
676 .name = "esp4-decrypt-tun",
677 .vector_size = sizeof (u32),
678 .format_trace = format_esp_decrypt_trace,
679 .type = VLIB_NODE_TYPE_INTERNAL,
680 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
681 .error_strings = esp_decrypt_error_strings,
682 .n_next_nodes = ESP_DECRYPT_N_NEXT,
684 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
685 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
686 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
687 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
688 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
692 VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
693 .name = "esp6-decrypt-tun",
694 .vector_size = sizeof (u32),
695 .format_trace = format_esp_decrypt_trace,
696 .type = VLIB_NODE_TYPE_INTERNAL,
697 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
698 .error_strings = esp_decrypt_error_strings,
699 .n_next_nodes = ESP_DECRYPT_N_NEXT,
701 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
702 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
703 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
704 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
705 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
711 * fd.io coding-style-patch-verification: ON
714 * eval: (c-set-style "gnu")