Implemented IKEv2 initiator features:
[vpp.git] / src / vnet / devices / dpdk / ipsec / esp_encrypt.c
1 /*
2  * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev
3  *
4  * Copyright (c) 2016 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/devices/dpdk/ipsec/ipsec.h>
24 #include <vnet/devices/dpdk/ipsec/esp.h>
25 #include <vnet/devices/dpdk/dpdk.h>
26 #include <vnet/devices/dpdk/dpdk_priv.h>
27
28 #define foreach_esp_encrypt_next                   \
29 _(DROP, "error-drop")                              \
30 _(IP4_LOOKUP, "ip4-lookup")                        \
31 _(IP6_LOOKUP, "ip6-lookup")                        \
32 _(INTERFACE_OUTPUT, "interface-output")
33
34 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
35 typedef enum
36 {
37   foreach_esp_encrypt_next
38 #undef _
39     ESP_ENCRYPT_N_NEXT,
40 } esp_encrypt_next_t;
41
42 #define foreach_esp_encrypt_error                   \
43  _(RX_PKTS, "ESP pkts received")                    \
44  _(SEQ_CYCLED, "sequence number cycled")            \
45  _(ENQ_FAIL, "Enqueue failed (buffer full)")        \
46  _(NO_CRYPTODEV, "Cryptodev not configured")        \
47  _(UNSUPPORTED, "Cipher/Auth not supported")
48
49
50 typedef enum
51 {
52 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
53   foreach_esp_encrypt_error
54 #undef _
55     ESP_ENCRYPT_N_ERROR,
56 } esp_encrypt_error_t;
57
58 static char *esp_encrypt_error_strings[] = {
59 #define _(sym,string) string,
60   foreach_esp_encrypt_error
61 #undef _
62 };
63
64 vlib_node_registration_t dpdk_esp_encrypt_node;
65
66 typedef struct
67 {
68   u32 spi;
69   u32 seq;
70   ipsec_crypto_alg_t crypto_alg;
71   ipsec_integ_alg_t integ_alg;
72 } esp_encrypt_trace_t;
73
74 /* packet trace format function */
75 static u8 *
76 format_esp_encrypt_trace (u8 * s, va_list * args)
77 {
78   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
79   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
80   esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
81
82   s = format (s, "esp: spi %u seq %u crypto %U integrity %U",
83               t->spi, t->seq,
84               format_ipsec_crypto_alg, t->crypto_alg,
85               format_ipsec_integ_alg, t->integ_alg);
86   return s;
87 }
88
89 static uword
90 dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
91                           vlib_node_runtime_t * node,
92                           vlib_frame_t * from_frame)
93 {
94   u32 n_left_from, *from, *to_next, next_index;
95   ipsec_main_t *im = &ipsec_main;
96   u32 cpu_index = os_get_cpu_number ();
97   dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
98   dpdk_esp_main_t *em = &dpdk_esp_main;
99   u32 i;
100
101   from = vlib_frame_vector_args (from_frame);
102   n_left_from = from_frame->n_vectors;
103
104   if (PREDICT_FALSE (!dcm->workers_main))
105     {
106       /* Likely there are not enough cryptodevs, so drop frame */
107       vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
108                                    ESP_ENCRYPT_ERROR_NO_CRYPTODEV,
109                                    n_left_from);
110       vlib_buffer_free (vm, from, n_left_from);
111       return n_left_from;
112     }
113
114   crypto_worker_main_t *cwm = vec_elt_at_index (dcm->workers_main, cpu_index);
115   u32 n_qps = vec_len (cwm->qp_data);
116   struct rte_crypto_op **cops_to_enq[n_qps];
117   u32 n_cop_qp[n_qps], *bi_to_enq[n_qps];
118
119   for (i = 0; i < n_qps; i++)
120     {
121       bi_to_enq[i] = cwm->qp_data[i].bi;
122       cops_to_enq[i] = cwm->qp_data[i].cops;
123     }
124
125   memset (n_cop_qp, 0, n_qps * sizeof (u32));
126
127   crypto_alloc_cops ();
128
129   next_index = ESP_ENCRYPT_NEXT_DROP;
130
131   while (n_left_from > 0)
132     {
133       u32 n_left_to_next;
134
135       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
136
137       while (n_left_from > 0 && n_left_to_next > 0)
138         {
139           u32 bi0, next0;
140           vlib_buffer_t *b0 = 0;
141           u32 sa_index0;
142           ipsec_sa_t *sa0;
143           ip4_and_esp_header_t *ih0, *oh0 = 0;
144           ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
145           struct rte_mbuf *mb0 = 0;
146           esp_footer_t *f0;
147           u8 is_ipv6;
148           u8 ip_hdr_size;
149           u8 next_hdr_type;
150           u8 transport_mode = 0;
151           const int BLOCK_SIZE = 16;
152           u32 iv_size;
153           u16 orig_sz;
154           crypto_sa_session_t *sa_sess;
155           void *sess;
156           struct rte_crypto_op *cop = 0;
157           u16 qp_index;
158
159           bi0 = from[0];
160           from += 1;
161           n_left_from -= 1;
162
163           b0 = vlib_get_buffer (vm, bi0);
164           sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
165           sa0 = pool_elt_at_index (im->sad, sa_index0);
166
167           if (PREDICT_FALSE (esp_seq_advance (sa0)))
168             {
169               clib_warning ("sequence number counter has cycled SPI %u",
170                             sa0->spi);
171               vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
172                                            ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
173               //TODO: rekey SA
174               to_next[0] = bi0;
175               to_next += 1;
176               n_left_to_next -= 1;
177               goto trace;
178             }
179
180           sa0->total_data_size += b0->current_length;
181
182           sa_sess = pool_elt_at_index (cwm->sa_sess_d[1], sa_index0);
183           if (PREDICT_FALSE (!sa_sess->sess))
184             {
185               int ret = create_sym_sess (sa0, sa_sess, 1);
186
187               if (PREDICT_FALSE (ret))
188                 {
189                   to_next[0] = bi0;
190                   to_next += 1;
191                   n_left_to_next -= 1;
192                   goto trace;
193                 }
194             }
195
196           qp_index = sa_sess->qp_index;
197           sess = sa_sess->sess;
198
199           ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0);
200           cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops);
201           ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
202
203           cops_to_enq[qp_index][0] = cop;
204           cops_to_enq[qp_index] += 1;
205           n_cop_qp[qp_index] += 1;
206           bi_to_enq[qp_index][0] = bi0;
207           bi_to_enq[qp_index] += 1;
208
209           ssize_t adv;
210           iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
211           ih0 = vlib_buffer_get_current (b0);
212           orig_sz = b0->current_length;
213           is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60;
214           /* is ipv6 */
215           if (PREDICT_TRUE (sa0->is_tunnel))
216             {
217               if (PREDICT_TRUE (!is_ipv6))
218                 adv = -sizeof (ip4_and_esp_header_t);
219               else
220                 adv = -sizeof (ip6_and_esp_header_t);
221             }
222           else
223             {
224               adv = -sizeof (esp_header_t);
225               if (PREDICT_TRUE (!is_ipv6))
226                 orig_sz -= sizeof (ip4_header_t);
227               else
228                 orig_sz -= sizeof (ip6_header_t);
229             }
230
231           /*transport mode save the eth header before it is overwritten */
232           if (PREDICT_FALSE (!sa0->is_tunnel))
233             {
234               ethernet_header_t *ieh0 = (ethernet_header_t *)
235                 ((u8 *) vlib_buffer_get_current (b0) -
236                  sizeof (ethernet_header_t));
237               ethernet_header_t *oeh0 =
238                 (ethernet_header_t *) ((u8 *) ieh0 + (adv - iv_size));
239               clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
240             }
241
242           vlib_buffer_advance (b0, adv - iv_size);
243
244           /* XXX IP6/ip4 and IP4/IP6 not supported, only IP4/IP4 and IP6/IP6 */
245
246           /* is ipv6 */
247           if (PREDICT_FALSE (is_ipv6))
248             {
249               ih6_0 = (ip6_and_esp_header_t *) ih0;
250               ip_hdr_size = sizeof (ip6_header_t);
251               oh6_0 = vlib_buffer_get_current (b0);
252
253               if (PREDICT_TRUE (sa0->is_tunnel))
254                 {
255                   next_hdr_type = IP_PROTOCOL_IPV6;
256                   oh6_0->ip6.ip_version_traffic_class_and_flow_label =
257                     ih6_0->ip6.ip_version_traffic_class_and_flow_label;
258                 }
259               else
260                 {
261                   next_hdr_type = ih6_0->ip6.protocol;
262                   memmove (oh6_0, ih6_0, sizeof (ip6_header_t));
263                 }
264
265               oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
266               oh6_0->ip6.hop_limit = 254;
267               oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi);
268               oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq);
269             }
270           else
271             {
272               ip_hdr_size = sizeof (ip4_header_t);
273               oh0 = vlib_buffer_get_current (b0);
274
275               if (PREDICT_TRUE (sa0->is_tunnel))
276                 {
277                   next_hdr_type = IP_PROTOCOL_IP_IN_IP;
278                   oh0->ip4.tos = ih0->ip4.tos;
279                 }
280               else
281                 {
282                   next_hdr_type = ih0->ip4.protocol;
283                   memmove (oh0, ih0, sizeof (ip4_header_t));
284                 }
285
286               oh0->ip4.ip_version_and_header_length = 0x45;
287               oh0->ip4.fragment_id = 0;
288               oh0->ip4.flags_and_fragment_offset = 0;
289               oh0->ip4.ttl = 254;
290               oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
291               oh0->esp.spi = clib_net_to_host_u32 (sa0->spi);
292               oh0->esp.seq = clib_net_to_host_u32 (sa0->seq);
293             }
294
295           if (PREDICT_TRUE (sa0->is_tunnel && !sa0->is_tunnel_ip6))
296             {
297               oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
298               oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
299
300               /* in tunnel mode send it back to FIB */
301               next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
302               vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
303             }
304           else if (sa0->is_tunnel && sa0->is_tunnel_ip6)
305             {
306               oh6_0->ip6.src_address.as_u64[0] =
307                 sa0->tunnel_src_addr.ip6.as_u64[0];
308               oh6_0->ip6.src_address.as_u64[1] =
309                 sa0->tunnel_src_addr.ip6.as_u64[1];
310               oh6_0->ip6.dst_address.as_u64[0] =
311                 sa0->tunnel_dst_addr.ip6.as_u64[0];
312               oh6_0->ip6.dst_address.as_u64[1] =
313                 sa0->tunnel_dst_addr.ip6.as_u64[1];
314
315               /* in tunnel mode send it back to FIB */
316               next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
317               vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
318             }
319           else
320             {
321               next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
322               transport_mode = 1;
323             }
324
325           ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
326           ASSERT (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE);
327
328           int blocks = 1 + (orig_sz + 1) / BLOCK_SIZE;
329
330           /* pad packet in input buffer */
331           u8 pad_bytes = BLOCK_SIZE * blocks - 2 - orig_sz;
332           u8 i;
333           u8 *padding = vlib_buffer_get_current (b0) + b0->current_length;
334
335           for (i = 0; i < pad_bytes; ++i)
336             padding[i] = i + 1;
337
338           f0 = vlib_buffer_get_current (b0) + b0->current_length + pad_bytes;
339           f0->pad_length = pad_bytes;
340           f0->next_header = next_hdr_type;
341           b0->current_length += pad_bytes + 2 +
342             em->esp_integ_algs[sa0->integ_alg].trunc_size;
343
344           vnet_buffer (b0)->sw_if_index[VLIB_RX] =
345             vnet_buffer (b0)->sw_if_index[VLIB_RX];
346           b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
347
348           struct rte_crypto_sym_op *sym_cop;
349           sym_cop = (struct rte_crypto_sym_op *) (cop + 1);
350
351           dpdk_cop_priv_t *priv = (dpdk_cop_priv_t *) (sym_cop + 1);
352
353           vnet_buffer (b0)->unused[0] = next0;
354
355           mb0 = rte_mbuf_from_vlib_buffer (b0);
356           mb0->data_len = b0->current_length;
357           mb0->pkt_len = b0->current_length;
358           mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
359
360           rte_crypto_op_attach_sym_session (cop, sess);
361
362           sym_cop->m_src = mb0;
363
364           dpdk_gcm_cnt_blk *icb = &priv->cb;
365           icb->salt = sa0->salt;
366           icb->iv[0] = sa0->seq;
367           icb->iv[1] = sa0->seq_hi;
368
369           if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
370             {
371               icb->cnt = clib_host_to_net_u32 (1);
372               clib_memcpy (vlib_buffer_get_current (b0) + ip_hdr_size +
373                            sizeof (esp_header_t), icb->iv, 8);
374               sym_cop->cipher.data.offset =
375                 ip_hdr_size + sizeof (esp_header_t) + iv_size;
376               sym_cop->cipher.data.length = BLOCK_SIZE * blocks;
377               sym_cop->cipher.iv.length = 16;
378             }
379           else
380             {
381               sym_cop->cipher.data.offset =
382                 ip_hdr_size + sizeof (esp_header_t);
383               sym_cop->cipher.data.length = BLOCK_SIZE * blocks + iv_size;
384               sym_cop->cipher.iv.length = iv_size;
385             }
386
387           sym_cop->cipher.iv.data = (u8 *) icb;
388           sym_cop->cipher.iv.phys_addr = cop->phys_addr + (uintptr_t) icb
389             - (uintptr_t) cop;
390
391
392           ASSERT (sa0->integ_alg < IPSEC_INTEG_N_ALG);
393           ASSERT (sa0->integ_alg != IPSEC_INTEG_ALG_NONE);
394
395           if (PREDICT_FALSE (sa0->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128))
396             {
397               u8 *aad = priv->aad;
398               clib_memcpy (aad, vlib_buffer_get_current (b0) + ip_hdr_size,
399                            8);
400               sym_cop->auth.aad.data = aad;
401               sym_cop->auth.aad.phys_addr = cop->phys_addr +
402                 (uintptr_t) aad - (uintptr_t) cop;
403
404               if (PREDICT_FALSE (sa0->use_esn))
405                 {
406                   *((u32 *) & aad[8]) = sa0->seq_hi;
407                   sym_cop->auth.aad.length = 12;
408                 }
409               else
410                 {
411                   sym_cop->auth.aad.length = 8;
412                 }
413             }
414           else
415             {
416               sym_cop->auth.data.offset = ip_hdr_size;
417               sym_cop->auth.data.length = b0->current_length - ip_hdr_size
418                 - em->esp_integ_algs[sa0->integ_alg].trunc_size;
419
420               if (PREDICT_FALSE (sa0->use_esn))
421                 {
422                   u8 *payload_end =
423                     vlib_buffer_get_current (b0) + b0->current_length;
424                   *((u32 *) payload_end) = sa0->seq_hi;
425                   sym_cop->auth.data.length += sizeof (sa0->seq_hi);
426                 }
427             }
428           sym_cop->auth.digest.data = vlib_buffer_get_current (b0) +
429             b0->current_length -
430             em->esp_integ_algs[sa0->integ_alg].trunc_size;
431           sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset (mb0,
432                                                                        b0->current_length
433                                                                        -
434                                                                        em->esp_integ_algs
435                                                                        [sa0->integ_alg].trunc_size);
436           sym_cop->auth.digest.length =
437             em->esp_integ_algs[sa0->integ_alg].trunc_size;
438
439
440           if (PREDICT_FALSE (is_ipv6))
441             {
442               oh6_0->ip6.payload_length =
443                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
444                                       sizeof (ip6_header_t));
445             }
446           else
447             {
448               oh0->ip4.length =
449                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
450               oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
451             }
452
453           if (transport_mode)
454             vlib_buffer_advance (b0, -sizeof (ethernet_header_t));
455
456         trace:
457           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
458             {
459               esp_encrypt_trace_t *tr =
460                 vlib_add_trace (vm, node, b0, sizeof (*tr));
461               tr->spi = sa0->spi;
462               tr->seq = sa0->seq - 1;
463               tr->crypto_alg = sa0->crypto_alg;
464               tr->integ_alg = sa0->integ_alg;
465             }
466         }
467       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
468     }
469   vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
470                                ESP_ENCRYPT_ERROR_RX_PKTS,
471                                from_frame->n_vectors);
472   crypto_qp_data_t *qpd;
473   /* *INDENT-OFF* */
474   vec_foreach_index (i, cwm->qp_data)
475     {
476       u32 enq;
477
478       qpd = vec_elt_at_index(cwm->qp_data, i);
479       enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
480                                         qpd->cops, n_cop_qp[i]);
481       qpd->inflights += enq;
482
483       if (PREDICT_FALSE(enq < n_cop_qp[i]))
484         {
485           crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq);
486           vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq);
487
488           vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
489                                        ESP_ENCRYPT_ERROR_ENQ_FAIL,
490                                        n_cop_qp[i] - enq);
491         }
492     }
493   /* *INDENT-ON* */
494
495   return from_frame->n_vectors;
496 }
497
498 VLIB_REGISTER_NODE (dpdk_esp_encrypt_node) =
499 {
500   .function = dpdk_esp_encrypt_node_fn,.name = "dpdk-esp-encrypt",.flags =
501     VLIB_NODE_FLAG_IS_OUTPUT,.vector_size = sizeof (u32),.format_trace =
502     format_esp_encrypt_trace,.n_errors =
503     ARRAY_LEN (esp_encrypt_error_strings),.error_strings =
504     esp_encrypt_error_strings,.n_next_nodes = 1,.next_nodes =
505   {
506   [ESP_ENCRYPT_NEXT_DROP] = "error-drop",}
507 };
508
509 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_node, dpdk_esp_encrypt_node_fn)
510 /*
511  * ESP Encrypt Post Node
512  */
513 #define foreach_esp_encrypt_post_error              \
514  _(PKTS, "ESP post pkts")
515      typedef enum
516      {
517 #define _(sym,str) ESP_ENCRYPT_POST_ERROR_##sym,
518        foreach_esp_encrypt_post_error
519 #undef _
520          ESP_ENCRYPT_POST_N_ERROR,
521      } esp_encrypt_post_error_t;
522
523      static char *esp_encrypt_post_error_strings[] = {
524 #define _(sym,string) string,
525        foreach_esp_encrypt_post_error
526 #undef _
527      };
528
529 vlib_node_registration_t dpdk_esp_encrypt_post_node;
530
531 static u8 *
532 format_esp_encrypt_post_trace (u8 * s, va_list * args)
533 {
534   return s;
535 }
536
537 static uword
538 dpdk_esp_encrypt_post_node_fn (vlib_main_t * vm,
539                                vlib_node_runtime_t * node,
540                                vlib_frame_t * from_frame)
541 {
542   u32 n_left_from, *from, *to_next = 0, next_index;
543
544   from = vlib_frame_vector_args (from_frame);
545   n_left_from = from_frame->n_vectors;
546
547   next_index = node->cached_next_index;
548
549   while (n_left_from > 0)
550     {
551       u32 n_left_to_next;
552
553       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
554
555       while (n_left_from > 0 && n_left_to_next > 0)
556         {
557           u32 bi0, next0;
558           vlib_buffer_t *b0 = 0;
559
560           bi0 = from[0];
561           from += 1;
562           n_left_from -= 1;
563           n_left_to_next -= 1;
564
565           b0 = vlib_get_buffer (vm, bi0);
566
567           to_next[0] = bi0;
568           to_next += 1;
569
570           next0 = vnet_buffer (b0)->unused[0];
571
572           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
573                                            to_next, n_left_to_next, bi0,
574                                            next0);
575         }
576       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
577     }
578
579   vlib_node_increment_counter (vm, dpdk_esp_encrypt_post_node.index,
580                                ESP_ENCRYPT_POST_ERROR_PKTS,
581                                from_frame->n_vectors);
582
583   return from_frame->n_vectors;
584 }
585
586 VLIB_REGISTER_NODE (dpdk_esp_encrypt_post_node) =
587 {
588   .function = dpdk_esp_encrypt_post_node_fn,.name =
589     "dpdk-esp-encrypt-post",.vector_size = sizeof (u32),.format_trace =
590     format_esp_encrypt_post_trace,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors =
591     ARRAY_LEN (esp_encrypt_post_error_strings),.error_strings =
592     esp_encrypt_post_error_strings,.n_next_nodes =
593     ESP_ENCRYPT_N_NEXT,.next_nodes =
594   {
595 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
596     foreach_esp_encrypt_next
597 #undef _
598   }
599 };
600
601 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_post_node,
602                               dpdk_esp_encrypt_post_node_fn)
603 /*
604  * fd.io coding-style-patch-verification: ON
605  *
606  * Local Variables:
607  * eval: (c-set-style "gnu")
608  * End:
609  */