dpdk: fix ipsec coverity warnings
[vpp.git] / src / plugins / dpdk / ipsec / esp_encrypt.c
1 /*
2  * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev
3  *
4  * Copyright (c) 2016 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <dpdk/ipsec/ipsec.h>
24 #include <dpdk/ipsec/esp.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27
28 #define foreach_esp_encrypt_next                   \
29 _(DROP, "error-drop")                              \
30 _(IP4_LOOKUP, "ip4-lookup")                        \
31 _(IP6_LOOKUP, "ip6-lookup")                        \
32 _(INTERFACE_OUTPUT, "interface-output")
33
34 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
35 typedef enum
36 {
37   foreach_esp_encrypt_next
38 #undef _
39     ESP_ENCRYPT_N_NEXT,
40 } esp_encrypt_next_t;
41
42 #define foreach_esp_encrypt_error                   \
43  _(RX_PKTS, "ESP pkts received")                    \
44  _(SEQ_CYCLED, "sequence number cycled")            \
45  _(ENQ_FAIL, "Enqueue failed (buffer full)")        \
46  _(NO_CRYPTODEV, "Cryptodev not configured")        \
47  _(UNSUPPORTED, "Cipher/Auth not supported")
48
49
50 typedef enum
51 {
52 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
53   foreach_esp_encrypt_error
54 #undef _
55     ESP_ENCRYPT_N_ERROR,
56 } esp_encrypt_error_t;
57
58 static char *esp_encrypt_error_strings[] = {
59 #define _(sym,string) string,
60   foreach_esp_encrypt_error
61 #undef _
62 };
63
64 vlib_node_registration_t dpdk_esp_encrypt_node;
65
66 typedef struct
67 {
68   u32 spi;
69   u32 seq;
70   ipsec_crypto_alg_t crypto_alg;
71   ipsec_integ_alg_t integ_alg;
72 } esp_encrypt_trace_t;
73
74 /* packet trace format function */
75 static u8 *
76 format_esp_encrypt_trace (u8 * s, va_list * args)
77 {
78   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
79   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
80   esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
81
82   s = format (s, "esp: spi %u seq %u crypto %U integrity %U",
83               t->spi, t->seq,
84               format_ipsec_crypto_alg, t->crypto_alg,
85               format_ipsec_integ_alg, t->integ_alg);
86   return s;
87 }
88
89 static uword
90 dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
91                           vlib_node_runtime_t * node,
92                           vlib_frame_t * from_frame)
93 {
94   u32 n_left_from, *from, *to_next, next_index;
95   ipsec_main_t *im = &ipsec_main;
96   u32 thread_index = vlib_get_thread_index ();
97   dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
98   dpdk_esp_main_t *em = &dpdk_esp_main;
99   u32 i;
100
101   from = vlib_frame_vector_args (from_frame);
102   n_left_from = from_frame->n_vectors;
103
104   if (PREDICT_FALSE (!dcm->workers_main))
105     {
106       /* Likely there are not enough cryptodevs, so drop frame */
107       vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
108                                    ESP_ENCRYPT_ERROR_NO_CRYPTODEV,
109                                    n_left_from);
110       vlib_buffer_free (vm, from, n_left_from);
111       return n_left_from;
112     }
113
114   crypto_worker_main_t *cwm =
115     vec_elt_at_index (dcm->workers_main, thread_index);
116   u32 n_qps = vec_len (cwm->qp_data);
117   struct rte_crypto_op **cops_to_enq[n_qps];
118   u32 n_cop_qp[n_qps], *bi_to_enq[n_qps];
119
120   for (i = 0; i < n_qps; i++)
121     {
122       bi_to_enq[i] = cwm->qp_data[i].bi;
123       cops_to_enq[i] = cwm->qp_data[i].cops;
124     }
125
126   memset (n_cop_qp, 0, n_qps * sizeof (u32));
127
128   crypto_alloc_cops ();
129
130   next_index = ESP_ENCRYPT_NEXT_DROP;
131
132   while (n_left_from > 0)
133     {
134       u32 n_left_to_next;
135
136       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
137
138       while (n_left_from > 0 && n_left_to_next > 0)
139         {
140           u32 bi0, next0;
141           vlib_buffer_t *b0 = 0;
142           u32 sa_index0;
143           ipsec_sa_t *sa0;
144           ip4_and_esp_header_t *ih0, *oh0 = 0;
145           ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
146           struct rte_mbuf *mb0 = 0;
147           esp_footer_t *f0;
148           u8 is_ipv6;
149           u8 ip_hdr_size;
150           u8 next_hdr_type;
151           u8 transport_mode = 0;
152           const int BLOCK_SIZE = 16;
153           u32 iv_size;
154           u16 orig_sz;
155           crypto_sa_session_t *sa_sess;
156           void *sess;
157           struct rte_crypto_op *cop = 0;
158           u16 qp_index;
159
160           bi0 = from[0];
161           from += 1;
162           n_left_from -= 1;
163
164           b0 = vlib_get_buffer (vm, bi0);
165           sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
166           sa0 = pool_elt_at_index (im->sad, sa_index0);
167
168           if (PREDICT_FALSE (esp_seq_advance (sa0)))
169             {
170               clib_warning ("sequence number counter has cycled SPI %u",
171                             sa0->spi);
172               vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
173                                            ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
174               //TODO: rekey SA
175               to_next[0] = bi0;
176               to_next += 1;
177               n_left_to_next -= 1;
178               goto trace;
179             }
180
181           sa0->total_data_size += b0->current_length;
182
183           sa_sess = pool_elt_at_index (cwm->sa_sess_d[1], sa_index0);
184           if (PREDICT_FALSE (!sa_sess->sess))
185             {
186               int ret = create_sym_sess (sa0, sa_sess, 1);
187
188               if (PREDICT_FALSE (ret))
189                 {
190                   to_next[0] = bi0;
191                   to_next += 1;
192                   n_left_to_next -= 1;
193                   goto trace;
194                 }
195             }
196
197           qp_index = sa_sess->qp_index;
198           sess = sa_sess->sess;
199
200           ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0);
201           cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops);
202           ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
203
204           cops_to_enq[qp_index][0] = cop;
205           cops_to_enq[qp_index] += 1;
206           n_cop_qp[qp_index] += 1;
207           bi_to_enq[qp_index][0] = bi0;
208           bi_to_enq[qp_index] += 1;
209
210           ssize_t adv;
211           iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
212           ih0 = vlib_buffer_get_current (b0);
213           orig_sz = b0->current_length;
214           is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60;
215           /* is ipv6 */
216           if (PREDICT_TRUE (sa0->is_tunnel))
217             {
218               if (PREDICT_TRUE (!is_ipv6))
219                 adv = -sizeof (ip4_and_esp_header_t);
220               else
221                 adv = -sizeof (ip6_and_esp_header_t);
222             }
223           else
224             {
225               adv = -sizeof (esp_header_t);
226               if (PREDICT_TRUE (!is_ipv6))
227                 orig_sz -= sizeof (ip4_header_t);
228               else
229                 orig_sz -= sizeof (ip6_header_t);
230             }
231
232           /*transport mode save the eth header before it is overwritten */
233           if (PREDICT_FALSE (!sa0->is_tunnel))
234             {
235               ethernet_header_t *ieh0 = (ethernet_header_t *)
236                 ((u8 *) vlib_buffer_get_current (b0) -
237                  sizeof (ethernet_header_t));
238               ethernet_header_t *oeh0 =
239                 (ethernet_header_t *) ((u8 *) ieh0 + (adv - iv_size));
240               clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
241             }
242
243           vlib_buffer_advance (b0, adv - iv_size);
244
245           /* XXX IP6/ip4 and IP4/IP6 not supported, only IP4/IP4 and IP6/IP6 */
246
247           /* is ipv6 */
248           if (PREDICT_FALSE (is_ipv6))
249             {
250               ih6_0 = (ip6_and_esp_header_t *) ih0;
251               ip_hdr_size = sizeof (ip6_header_t);
252               oh6_0 = vlib_buffer_get_current (b0);
253
254               if (PREDICT_TRUE (sa0->is_tunnel))
255                 {
256                   next_hdr_type = IP_PROTOCOL_IPV6;
257                   oh6_0->ip6.ip_version_traffic_class_and_flow_label =
258                     ih6_0->ip6.ip_version_traffic_class_and_flow_label;
259                 }
260               else
261                 {
262                   next_hdr_type = ih6_0->ip6.protocol;
263                   memmove (oh6_0, ih6_0, sizeof (ip6_header_t));
264                 }
265
266               oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
267               oh6_0->ip6.hop_limit = 254;
268               oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi);
269               oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq);
270             }
271           else
272             {
273               ip_hdr_size = sizeof (ip4_header_t);
274               oh0 = vlib_buffer_get_current (b0);
275
276               if (PREDICT_TRUE (sa0->is_tunnel))
277                 {
278                   next_hdr_type = IP_PROTOCOL_IP_IN_IP;
279                   oh0->ip4.tos = ih0->ip4.tos;
280                 }
281               else
282                 {
283                   next_hdr_type = ih0->ip4.protocol;
284                   memmove (oh0, ih0, sizeof (ip4_header_t));
285                 }
286
287               oh0->ip4.ip_version_and_header_length = 0x45;
288               oh0->ip4.fragment_id = 0;
289               oh0->ip4.flags_and_fragment_offset = 0;
290               oh0->ip4.ttl = 254;
291               oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
292               oh0->esp.spi = clib_net_to_host_u32 (sa0->spi);
293               oh0->esp.seq = clib_net_to_host_u32 (sa0->seq);
294             }
295
296           if (PREDICT_TRUE
297               (!is_ipv6 && sa0->is_tunnel && !sa0->is_tunnel_ip6))
298             {
299               oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
300               oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
301
302               /* in tunnel mode send it back to FIB */
303               next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
304               vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
305             }
306           else if (is_ipv6 && sa0->is_tunnel && sa0->is_tunnel_ip6)
307             {
308               oh6_0->ip6.src_address.as_u64[0] =
309                 sa0->tunnel_src_addr.ip6.as_u64[0];
310               oh6_0->ip6.src_address.as_u64[1] =
311                 sa0->tunnel_src_addr.ip6.as_u64[1];
312               oh6_0->ip6.dst_address.as_u64[0] =
313                 sa0->tunnel_dst_addr.ip6.as_u64[0];
314               oh6_0->ip6.dst_address.as_u64[1] =
315                 sa0->tunnel_dst_addr.ip6.as_u64[1];
316
317               /* in tunnel mode send it back to FIB */
318               next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
319               vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
320             }
321           else
322             {
323               next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
324               transport_mode = 1;
325             }
326
327           ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
328           ASSERT (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE);
329
330           int blocks = 1 + (orig_sz + 1) / BLOCK_SIZE;
331
332           /* pad packet in input buffer */
333           u8 pad_bytes = BLOCK_SIZE * blocks - 2 - orig_sz;
334           u8 i;
335           u8 *padding = vlib_buffer_get_current (b0) + b0->current_length;
336
337           for (i = 0; i < pad_bytes; ++i)
338             padding[i] = i + 1;
339
340           f0 = vlib_buffer_get_current (b0) + b0->current_length + pad_bytes;
341           f0->pad_length = pad_bytes;
342           f0->next_header = next_hdr_type;
343           b0->current_length += pad_bytes + 2 +
344             em->esp_integ_algs[sa0->integ_alg].trunc_size;
345
346           vnet_buffer (b0)->sw_if_index[VLIB_RX] =
347             vnet_buffer (b0)->sw_if_index[VLIB_RX];
348           b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
349
350           struct rte_crypto_sym_op *sym_cop;
351           sym_cop = (struct rte_crypto_sym_op *) (cop + 1);
352
353           dpdk_cop_priv_t *priv = (dpdk_cop_priv_t *) (sym_cop + 1);
354
355           vnet_buffer (b0)->unused[0] = next0;
356
357           mb0 = rte_mbuf_from_vlib_buffer (b0);
358           mb0->data_len = b0->current_length;
359           mb0->pkt_len = b0->current_length;
360           mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
361
362           rte_crypto_op_attach_sym_session (cop, sess);
363
364           sym_cop->m_src = mb0;
365
366           dpdk_gcm_cnt_blk *icb = &priv->cb;
367           icb->salt = sa0->salt;
368           icb->iv[0] = sa0->seq;
369           icb->iv[1] = sa0->seq_hi;
370
371           if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
372             {
373               icb->cnt = clib_host_to_net_u32 (1);
374               clib_memcpy (vlib_buffer_get_current (b0) + ip_hdr_size +
375                            sizeof (esp_header_t), icb->iv, 8);
376               sym_cop->cipher.data.offset =
377                 ip_hdr_size + sizeof (esp_header_t) + iv_size;
378               sym_cop->cipher.data.length = BLOCK_SIZE * blocks;
379               sym_cop->cipher.iv.length = 16;
380             }
381           else
382             {
383               sym_cop->cipher.data.offset =
384                 ip_hdr_size + sizeof (esp_header_t);
385               sym_cop->cipher.data.length = BLOCK_SIZE * blocks + iv_size;
386               sym_cop->cipher.iv.length = iv_size;
387             }
388
389           sym_cop->cipher.iv.data = (u8 *) icb;
390           sym_cop->cipher.iv.phys_addr = cop->phys_addr + (uintptr_t) icb
391             - (uintptr_t) cop;
392
393
394           ASSERT (sa0->integ_alg < IPSEC_INTEG_N_ALG);
395           ASSERT (sa0->integ_alg != IPSEC_INTEG_ALG_NONE);
396
397           if (PREDICT_FALSE (sa0->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128))
398             {
399               u8 *aad = priv->aad;
400               clib_memcpy (aad, vlib_buffer_get_current (b0) + ip_hdr_size,
401                            8);
402               sym_cop->auth.aad.data = aad;
403               sym_cop->auth.aad.phys_addr = cop->phys_addr +
404                 (uintptr_t) aad - (uintptr_t) cop;
405
406               if (PREDICT_FALSE (sa0->use_esn))
407                 {
408                   *((u32 *) & aad[8]) = sa0->seq_hi;
409                   sym_cop->auth.aad.length = 12;
410                 }
411               else
412                 {
413                   sym_cop->auth.aad.length = 8;
414                 }
415             }
416           else
417             {
418               sym_cop->auth.data.offset = ip_hdr_size;
419               sym_cop->auth.data.length = b0->current_length - ip_hdr_size
420                 - em->esp_integ_algs[sa0->integ_alg].trunc_size;
421
422               if (PREDICT_FALSE (sa0->use_esn))
423                 {
424                   u8 *payload_end =
425                     vlib_buffer_get_current (b0) + b0->current_length;
426                   *((u32 *) payload_end) = sa0->seq_hi;
427                   sym_cop->auth.data.length += sizeof (sa0->seq_hi);
428                 }
429             }
430           sym_cop->auth.digest.data = vlib_buffer_get_current (b0) +
431             b0->current_length -
432             em->esp_integ_algs[sa0->integ_alg].trunc_size;
433           sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset (mb0,
434                                                                        b0->current_length
435                                                                        -
436                                                                        em->esp_integ_algs
437                                                                        [sa0->integ_alg].trunc_size);
438           sym_cop->auth.digest.length =
439             em->esp_integ_algs[sa0->integ_alg].trunc_size;
440
441
442           if (PREDICT_FALSE (is_ipv6))
443             {
444               oh6_0->ip6.payload_length =
445                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
446                                       sizeof (ip6_header_t));
447             }
448           else
449             {
450               oh0->ip4.length =
451                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
452               oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
453             }
454
455           if (transport_mode)
456             vlib_buffer_advance (b0, -sizeof (ethernet_header_t));
457
458         trace:
459           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
460             {
461               esp_encrypt_trace_t *tr =
462                 vlib_add_trace (vm, node, b0, sizeof (*tr));
463               tr->spi = sa0->spi;
464               tr->seq = sa0->seq - 1;
465               tr->crypto_alg = sa0->crypto_alg;
466               tr->integ_alg = sa0->integ_alg;
467             }
468         }
469       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
470     }
471   vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
472                                ESP_ENCRYPT_ERROR_RX_PKTS,
473                                from_frame->n_vectors);
474   crypto_qp_data_t *qpd;
475   /* *INDENT-OFF* */
476   vec_foreach_index (i, cwm->qp_data)
477     {
478       u32 enq;
479
480       qpd = vec_elt_at_index(cwm->qp_data, i);
481       enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
482                                         qpd->cops, n_cop_qp[i]);
483       qpd->inflights += enq;
484
485       if (PREDICT_FALSE(enq < n_cop_qp[i]))
486         {
487           crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq);
488           vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq);
489
490           vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
491                                        ESP_ENCRYPT_ERROR_ENQ_FAIL,
492                                        n_cop_qp[i] - enq);
493         }
494     }
495   /* *INDENT-ON* */
496
497   return from_frame->n_vectors;
498 }
499
500 VLIB_REGISTER_NODE (dpdk_esp_encrypt_node) =
501 {
502   .function = dpdk_esp_encrypt_node_fn,.name = "dpdk-esp-encrypt",.flags =
503     VLIB_NODE_FLAG_IS_OUTPUT,.vector_size = sizeof (u32),.format_trace =
504     format_esp_encrypt_trace,.n_errors =
505     ARRAY_LEN (esp_encrypt_error_strings),.error_strings =
506     esp_encrypt_error_strings,.n_next_nodes = 1,.next_nodes =
507   {
508   [ESP_ENCRYPT_NEXT_DROP] = "error-drop",}
509 };
510
511 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_node, dpdk_esp_encrypt_node_fn)
512 /*
513  * ESP Encrypt Post Node
514  */
515 #define foreach_esp_encrypt_post_error              \
516  _(PKTS, "ESP post pkts")
517      typedef enum
518      {
519 #define _(sym,str) ESP_ENCRYPT_POST_ERROR_##sym,
520        foreach_esp_encrypt_post_error
521 #undef _
522          ESP_ENCRYPT_POST_N_ERROR,
523      } esp_encrypt_post_error_t;
524
525      static char *esp_encrypt_post_error_strings[] = {
526 #define _(sym,string) string,
527        foreach_esp_encrypt_post_error
528 #undef _
529      };
530
531 vlib_node_registration_t dpdk_esp_encrypt_post_node;
532
533 static u8 *
534 format_esp_encrypt_post_trace (u8 * s, va_list * args)
535 {
536   return s;
537 }
538
539 static uword
540 dpdk_esp_encrypt_post_node_fn (vlib_main_t * vm,
541                                vlib_node_runtime_t * node,
542                                vlib_frame_t * from_frame)
543 {
544   u32 n_left_from, *from, *to_next = 0, next_index;
545
546   from = vlib_frame_vector_args (from_frame);
547   n_left_from = from_frame->n_vectors;
548
549   next_index = node->cached_next_index;
550
551   while (n_left_from > 0)
552     {
553       u32 n_left_to_next;
554
555       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
556
557       while (n_left_from > 0 && n_left_to_next > 0)
558         {
559           u32 bi0, next0;
560           vlib_buffer_t *b0 = 0;
561
562           bi0 = from[0];
563           from += 1;
564           n_left_from -= 1;
565           n_left_to_next -= 1;
566
567           b0 = vlib_get_buffer (vm, bi0);
568
569           to_next[0] = bi0;
570           to_next += 1;
571
572           next0 = vnet_buffer (b0)->unused[0];
573
574           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
575                                            to_next, n_left_to_next, bi0,
576                                            next0);
577         }
578       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
579     }
580
581   vlib_node_increment_counter (vm, dpdk_esp_encrypt_post_node.index,
582                                ESP_ENCRYPT_POST_ERROR_PKTS,
583                                from_frame->n_vectors);
584
585   return from_frame->n_vectors;
586 }
587
588 VLIB_REGISTER_NODE (dpdk_esp_encrypt_post_node) =
589 {
590   .function = dpdk_esp_encrypt_post_node_fn,.name =
591     "dpdk-esp-encrypt-post",.vector_size = sizeof (u32),.format_trace =
592     format_esp_encrypt_post_trace,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors =
593     ARRAY_LEN (esp_encrypt_post_error_strings),.error_strings =
594     esp_encrypt_post_error_strings,.n_next_nodes =
595     ESP_ENCRYPT_N_NEXT,.next_nodes =
596   {
597 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
598     foreach_esp_encrypt_next
599 #undef _
600   }
601 };
602
603 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_post_node,
604                               dpdk_esp_encrypt_post_node_fn)
605 /*
606  * fd.io coding-style-patch-verification: ON
607  *
608  * Local Variables:
609  * eval: (c-set-style "gnu")
610  * End:
611  */