dpdk: rework cryptodev ipsec build and setup
[vpp.git] / src / vnet / devices / dpdk / ipsec / esp_decrypt.c
1 /*
2  * esp_decrypt.c : IPSec ESP Decrypt node using DPDK Cryptodev
3  *
4  * Copyright (c) 2016 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/devices/dpdk/ipsec/ipsec.h>
24 #include <vnet/devices/dpdk/ipsec/esp.h>
25 #include <vnet/devices/dpdk/dpdk.h>
26 #include <vnet/devices/dpdk/dpdk_priv.h>
27
28 #define foreach_esp_decrypt_next               \
29 _(DROP, "error-drop")                          \
30 _(IP4_INPUT, "ip4-input")                      \
31 _(IP6_INPUT, "ip6-input")
32
33 #define _(v, s) ESP_DECRYPT_NEXT_##v,
34 typedef enum {
35   foreach_esp_decrypt_next
36 #undef _
37   ESP_DECRYPT_N_NEXT,
38 } esp_decrypt_next_t;
39
40 #define foreach_esp_decrypt_error                \
41  _(RX_PKTS, "ESP pkts received")                 \
42  _(DECRYPTION_FAILED, "ESP decryption failed")   \
43  _(REPLAY, "SA replayed packet")                 \
44  _(NOT_IP, "Not IP packet (dropped)")            \
45  _(ENQ_FAIL, "Enqueue failed (buffer full)")     \
46  _(NO_CRYPTODEV, "Cryptodev not configured")     \
47  _(BAD_LEN, "Invalid ciphertext length")         \
48  _(UNSUPPORTED, "Cipher/Auth not supported")
49
50
51 typedef enum {
52 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
53   foreach_esp_decrypt_error
54 #undef _
55   ESP_DECRYPT_N_ERROR,
56 } esp_decrypt_error_t;
57
58 static char * esp_decrypt_error_strings[] = {
59 #define _(sym,string) string,
60   foreach_esp_decrypt_error
61 #undef _
62 };
63
64 vlib_node_registration_t dpdk_esp_decrypt_node;
65
66 typedef struct {
67   ipsec_crypto_alg_t crypto_alg;
68   ipsec_integ_alg_t integ_alg;
69 } esp_decrypt_trace_t;
70
71 /* packet trace format function */
72 static u8 * format_esp_decrypt_trace (u8 * s, va_list * args)
73 {
74   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
75   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
76   esp_decrypt_trace_t * t = va_arg (*args, esp_decrypt_trace_t *);
77
78   s = format (s, "esp: crypto %U integrity %U",
79               format_ipsec_crypto_alg, t->crypto_alg,
80               format_ipsec_integ_alg, t->integ_alg);
81   return s;
82 }
83
84 static uword
85 dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
86              vlib_node_runtime_t * node,
87              vlib_frame_t * from_frame)
88 {
89   u32 n_left_from, *from, *to_next, next_index;
90   ipsec_main_t *im = &ipsec_main;
91   u32 cpu_index = os_get_cpu_number();
92   dpdk_crypto_main_t * dcm = &dpdk_crypto_main;
93   dpdk_esp_main_t * em = &dpdk_esp_main;
94   u32 i;
95
96   from = vlib_frame_vector_args (from_frame);
97   n_left_from = from_frame->n_vectors;
98
99   if (PREDICT_FALSE(!dcm->workers_main))
100     {
101       vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
102               ESP_DECRYPT_ERROR_NO_CRYPTODEV, n_left_from);
103       vlib_buffer_free(vm, from, n_left_from);
104       return n_left_from;
105     }
106
107   crypto_worker_main_t *cwm = vec_elt_at_index(dcm->workers_main, cpu_index);
108   u32 n_qps = vec_len(cwm->qp_data);
109   struct rte_crypto_op ** cops_to_enq[n_qps];
110   u32 n_cop_qp[n_qps], * bi_to_enq[n_qps];
111
112   for (i = 0; i < n_qps; i++)
113     {
114       bi_to_enq[i] = cwm->qp_data[i].bi;
115       cops_to_enq[i] = cwm->qp_data[i].cops;
116     }
117
118   memset(n_cop_qp, 0, n_qps * sizeof(u32));
119
120   crypto_alloc_cops();
121
122   next_index = ESP_DECRYPT_NEXT_DROP;
123
124   while (n_left_from > 0)
125     {
126       u32 n_left_to_next;
127
128       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
129
130       while (n_left_from > 0 && n_left_to_next > 0)
131         {
132           u32 bi0, sa_index0 = ~0, seq, icv_size, iv_size;
133           vlib_buffer_t * b0;
134           esp_header_t * esp0;
135           ipsec_sa_t * sa0;
136           struct rte_mbuf * mb0 = 0;
137           const int BLOCK_SIZE = 16;
138           crypto_sa_session_t * sa_sess;
139           void * sess;
140           u16 qp_index;
141           struct rte_crypto_op * cop = 0;
142
143           bi0 = from[0];
144           from += 1;
145           n_left_from -= 1;
146
147           b0 = vlib_get_buffer (vm, bi0);
148           esp0 = vlib_buffer_get_current (b0);
149
150           sa_index0 = vnet_buffer(b0)->ipsec.sad_index;
151           sa0 = pool_elt_at_index (im->sad, sa_index0);
152
153           seq = clib_host_to_net_u32(esp0->seq);
154
155           /* anti-replay check */
156           if (sa0->use_anti_replay)
157             {
158               int rv = 0;
159
160               if (PREDICT_TRUE(sa0->use_esn))
161                 rv = esp_replay_check_esn(sa0, seq);
162               else
163                 rv = esp_replay_check(sa0, seq);
164
165               if (PREDICT_FALSE(rv))
166                 {
167                   clib_warning ("anti-replay SPI %u seq %u", sa0->spi, seq);
168                   vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
169                                                ESP_DECRYPT_ERROR_REPLAY, 1);
170                   to_next[0] = bi0;
171                   to_next += 1;
172                   n_left_to_next -= 1;
173                   goto trace;
174                 }
175             }
176
177           if (PREDICT_FALSE(sa0->integ_alg == IPSEC_INTEG_ALG_NONE) ||
178                   PREDICT_FALSE(sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE))
179             {
180               clib_warning ("SPI %u : only cipher + auth supported", sa0->spi);
181               vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
182                                            ESP_DECRYPT_ERROR_UNSUPPORTED, 1);
183               to_next[0] = bi0;
184               to_next += 1;
185               n_left_to_next -= 1;
186               goto trace;
187             }
188
189           sa_sess = pool_elt_at_index(cwm->sa_sess_d[0], sa_index0);
190
191           if (PREDICT_FALSE(!sa_sess->sess))
192             {
193               int ret = create_sym_sess(sa0, sa_sess, 0);
194
195               if (PREDICT_FALSE (ret))
196                 {
197                   to_next[0] = bi0;
198                   to_next += 1;
199                   n_left_to_next -= 1;
200                   goto trace;
201                 }
202             }
203
204           sess = sa_sess->sess;
205           qp_index = sa_sess->qp_index;
206
207           ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0);
208           cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops);
209           ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
210
211           cops_to_enq[qp_index][0] = cop;
212           cops_to_enq[qp_index] += 1;
213           n_cop_qp[qp_index] += 1;
214           bi_to_enq[qp_index][0] = bi0;
215           bi_to_enq[qp_index] += 1;
216
217           rte_crypto_op_attach_sym_session(cop, sess);
218
219           icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
220           iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
221
222           /* Convert vlib buffer to mbuf */
223           mb0 = rte_mbuf_from_vlib_buffer(b0);
224           mb0->data_len = b0->current_length;
225           mb0->pkt_len = b0->current_length;
226           mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
227
228           /* Outer IP header has already been stripped */
229           u16 payload_len = rte_pktmbuf_pkt_len(mb0) - sizeof (esp_header_t) -
230               iv_size - icv_size;
231
232           if ((payload_len & (BLOCK_SIZE - 1)) || (payload_len <= 0))
233             {
234               clib_warning ("payload %u not multiple of %d\n",
235                             payload_len, BLOCK_SIZE);
236               vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
237                                            ESP_DECRYPT_ERROR_BAD_LEN, 1);
238               vec_add (vec_elt (cwm->qp_data, qp_index).free_cops, &cop, 1);
239               bi_to_enq[qp_index] -= 1;
240               cops_to_enq[qp_index] -= 1;
241               n_cop_qp[qp_index] -= 1;
242               to_next[0] = bi0;
243               to_next += 1;
244               n_left_to_next -= 1;
245               goto trace;
246             }
247
248           struct rte_crypto_sym_op *sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
249
250           sym_cop->m_src = mb0;
251           sym_cop->cipher.data.offset = sizeof (esp_header_t) + iv_size;
252           sym_cop->cipher.data.length = payload_len;
253
254           u8 *iv = rte_pktmbuf_mtod_offset(mb0, void*, sizeof (esp_header_t));
255           dpdk_cop_priv_t * priv = (dpdk_cop_priv_t *)(sym_cop + 1);
256
257           if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
258             {
259               dpdk_gcm_cnt_blk *icb = &priv->cb;
260               icb->salt = sa0->salt;
261               clib_memcpy(icb->iv, iv, 8);
262               icb->cnt = clib_host_to_net_u32(1);
263               sym_cop->cipher.iv.data = (u8 *)icb;
264               sym_cop->cipher.iv.phys_addr = cop->phys_addr +
265                 (uintptr_t)icb - (uintptr_t)cop;
266               sym_cop->cipher.iv.length = 16;
267
268               u8 *aad = priv->aad;
269               clib_memcpy(aad, iv - sizeof(esp_header_t), 8);
270               sym_cop->auth.aad.data = aad;
271               sym_cop->auth.aad.phys_addr = cop->phys_addr +
272                   (uintptr_t)aad - (uintptr_t)cop;
273               if (sa0->use_esn)
274                 {
275                   *((u32*)&aad[8]) = sa0->seq_hi;
276                   sym_cop->auth.aad.length = 12;
277                 }
278               else
279                 {
280                   sym_cop->auth.aad.length = 8;
281                 }
282
283               sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
284                        rte_pktmbuf_pkt_len(mb0) - icv_size);
285               sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
286                        rte_pktmbuf_pkt_len(mb0) - icv_size);
287               sym_cop->auth.digest.length = icv_size;
288
289             }
290           else
291             {
292               sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(mb0, void*,
293                        sizeof (esp_header_t));
294               sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
295                        sizeof (esp_header_t));
296               sym_cop->cipher.iv.length = iv_size;
297
298               if (sa0->use_esn)
299                 {
300                   dpdk_cop_priv_t* priv = (dpdk_cop_priv_t*) (sym_cop + 1);
301                   u8* payload_end = rte_pktmbuf_mtod_offset(
302                       mb0, u8*, sizeof(esp_header_t) + iv_size + payload_len);
303
304                   clib_memcpy (priv->icv, payload_end, icv_size);
305                   *((u32*) payload_end) = sa0->seq_hi;
306                   sym_cop->auth.data.offset = 0;
307                   sym_cop->auth.data.length = sizeof(esp_header_t) + iv_size
308                       + payload_len + sizeof(sa0->seq_hi);
309                   sym_cop->auth.digest.data = priv->icv;
310                   sym_cop->auth.digest.phys_addr = cop->phys_addr
311                       + (uintptr_t) priv->icv - (uintptr_t) cop;
312                   sym_cop->auth.digest.length = icv_size;
313                 }
314               else
315                 {
316                   sym_cop->auth.data.offset = 0;
317                   sym_cop->auth.data.length = sizeof(esp_header_t) +
318                            iv_size + payload_len;
319
320                   sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
321                            rte_pktmbuf_pkt_len(mb0) - icv_size);
322                   sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
323                            rte_pktmbuf_pkt_len(mb0) - icv_size);
324                   sym_cop->auth.digest.length = icv_size;
325                 }
326             }
327
328 trace:
329           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
330             {
331               esp_decrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
332               tr->crypto_alg = sa0->crypto_alg;
333               tr->integ_alg = sa0->integ_alg;
334             }
335         }
336       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
337     }
338   vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
339                                ESP_DECRYPT_ERROR_RX_PKTS,
340                                from_frame->n_vectors);
341   crypto_qp_data_t *qpd;
342   /* *INDENT-OFF* */
343   vec_foreach_index (i, cwm->qp_data)
344     {
345       u32 enq;
346
347       qpd = vec_elt_at_index(cwm->qp_data, i);
348       enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
349                                         qpd->cops, n_cop_qp[i]);
350       qpd->inflights += enq;
351
352       if (PREDICT_FALSE(enq < n_cop_qp[i]))
353         {
354           crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq);
355           vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq);
356
357           vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
358                                        ESP_DECRYPT_ERROR_ENQ_FAIL,
359                                        n_cop_qp[i] - enq);
360         }
361     }
362   /* *INDENT-ON* */
363
364   return from_frame->n_vectors;
365 }
366
367 VLIB_REGISTER_NODE (dpdk_esp_decrypt_node) = {
368   .function = dpdk_esp_decrypt_node_fn,
369   .name = "dpdk-esp-decrypt",
370   .vector_size = sizeof (u32),
371   .format_trace = format_esp_decrypt_trace,
372   .type = VLIB_NODE_TYPE_INTERNAL,
373
374   .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
375   .error_strings = esp_decrypt_error_strings,
376
377   .n_next_nodes = ESP_DECRYPT_N_NEXT,
378   .next_nodes = {
379 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
380     foreach_esp_decrypt_next
381 #undef _
382   },
383 };
384
385 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_decrypt_node, dpdk_esp_decrypt_node_fn)
386
387 /*
388  * Decrypt Post Node
389  */
390
391 #define foreach_esp_decrypt_post_error        \
392  _(PKTS, "ESP post pkts")
393
394 typedef enum {
395 #define _(sym,str) ESP_DECRYPT_POST_ERROR_##sym,
396   foreach_esp_decrypt_post_error
397 #undef _
398   ESP_DECRYPT_POST_N_ERROR,
399 } esp_decrypt_post_error_t;
400
401 static char * esp_decrypt_post_error_strings[] = {
402 #define _(sym,string) string,
403   foreach_esp_decrypt_post_error
404 #undef _
405 };
406
407 vlib_node_registration_t dpdk_esp_decrypt_post_node;
408
409 static u8 * format_esp_decrypt_post_trace (u8 * s, va_list * args)
410 {
411   return s;
412 }
413
414 static uword
415 dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
416              vlib_node_runtime_t * node,
417              vlib_frame_t * from_frame)
418 {
419   u32 n_left_from, *from, *to_next = 0, next_index;
420   ipsec_sa_t * sa0;
421   u32 sa_index0 = ~0;
422   ipsec_main_t *im = &ipsec_main;
423   dpdk_esp_main_t *em = &dpdk_esp_main;
424
425   from = vlib_frame_vector_args (from_frame);
426   n_left_from = from_frame->n_vectors;
427
428   next_index = node->cached_next_index;
429
430   while (n_left_from > 0)
431     {
432       u32 n_left_to_next;
433
434       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
435
436       while (n_left_from > 0 && n_left_to_next > 0)
437         {
438           esp_footer_t * f0;
439           u32 bi0, next0, icv_size, iv_size;
440           vlib_buffer_t * b0 = 0;
441           ip4_header_t *ih4 = 0, *oh4 = 0;
442           ip6_header_t *ih6 = 0, *oh6 = 0;
443           u8 tunnel_mode = 1;
444           u8 transport_ip6 = 0;
445
446           next0 = ESP_DECRYPT_NEXT_DROP;
447
448           bi0 = from[0];
449           from += 1;
450           n_left_from -= 1;
451           n_left_to_next -= 1;
452
453           b0 = vlib_get_buffer (vm, bi0);
454
455           sa_index0 = vnet_buffer(b0)->ipsec.sad_index;
456           sa0 = pool_elt_at_index (im->sad, sa_index0);
457
458           to_next[0] = bi0;
459           to_next += 1;
460
461           icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
462           iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
463
464           if (sa0->use_anti_replay)
465             {
466               esp_header_t * esp0 = vlib_buffer_get_current (b0);
467               u32 seq;
468               seq = clib_host_to_net_u32(esp0->seq);
469               if (PREDICT_TRUE(sa0->use_esn))
470                 esp_replay_advance_esn(sa0, seq);
471               else
472                 esp_replay_advance(sa0, seq);
473             }
474
475           ih4 = (ip4_header_t *) (b0->data + sizeof(ethernet_header_t));
476           vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size);
477
478           b0->current_length -= (icv_size + 2);
479           b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
480           f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (b0) +
481                                  b0->current_length);
482           b0->current_length -= f0->pad_length;
483
484           /* transport mode */
485           if (PREDICT_FALSE(!sa0->is_tunnel && !sa0->is_tunnel_ip6))
486             {
487               tunnel_mode = 0;
488
489               if (PREDICT_TRUE((ih4->ip_version_and_header_length & 0xF0) != 0x40))
490                 {
491                   if (PREDICT_TRUE((ih4->ip_version_and_header_length & 0xF0) == 0x60))
492                     transport_ip6 = 1;
493                   else
494                     {
495                       clib_warning("next header: 0x%x", f0->next_header);
496                       vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
497                                                    ESP_DECRYPT_ERROR_NOT_IP, 1);
498                       goto trace;
499                     }
500                 }
501             }
502
503           if (PREDICT_TRUE (tunnel_mode))
504             {
505               if (PREDICT_TRUE(f0->next_header == IP_PROTOCOL_IP_IN_IP))
506                 next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
507               else if (f0->next_header == IP_PROTOCOL_IPV6)
508                 next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
509               else
510                 {
511                   clib_warning("next header: 0x%x", f0->next_header);
512                   vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
513                                                ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
514                                                1);
515                   goto trace;
516                 }
517             }
518           /* transport mode */
519           else
520             {
521               if (PREDICT_FALSE(transport_ip6))
522                 {
523                   ih6 = (ip6_header_t *) (b0->data + sizeof(ethernet_header_t));
524                   vlib_buffer_advance (b0, -sizeof(ip6_header_t));
525                   oh6 = vlib_buffer_get_current (b0);
526                   memmove(oh6, ih6, sizeof(ip6_header_t));
527
528                   next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
529                   oh6->protocol = f0->next_header;
530                   oh6->payload_length =
531                       clib_host_to_net_u16 (
532                           vlib_buffer_length_in_chain(vm, b0) -
533                           sizeof (ip6_header_t));
534                 }
535               else
536                 {
537                   vlib_buffer_advance (b0, -sizeof(ip4_header_t));
538                   oh4 = vlib_buffer_get_current (b0);
539                   memmove(oh4, ih4, sizeof(ip4_header_t));
540
541                   next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
542                   oh4->ip_version_and_header_length = 0x45;
543                   oh4->fragment_id = 0;
544                   oh4->flags_and_fragment_offset = 0;
545                   oh4->protocol = f0->next_header;
546                   oh4->length = clib_host_to_net_u16 (
547                       vlib_buffer_length_in_chain (vm, b0));
548                   oh4->checksum = ip4_header_checksum (oh4);
549                 }
550             }
551
552           vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32)~0;
553
554 trace:
555           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
556             {
557               esp_decrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
558               tr->crypto_alg = sa0->crypto_alg;
559               tr->integ_alg = sa0->integ_alg;
560             }
561
562           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
563                                            to_next, n_left_to_next, bi0, next0);
564         }
565       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
566     }
567   vlib_node_increment_counter (vm, dpdk_esp_decrypt_post_node.index,
568                                ESP_DECRYPT_POST_ERROR_PKTS,
569                                from_frame->n_vectors);
570
571   return from_frame->n_vectors;
572 }
573
574 VLIB_REGISTER_NODE (dpdk_esp_decrypt_post_node) = {
575   .function = dpdk_esp_decrypt_post_node_fn,
576   .name = "dpdk-esp-decrypt-post",
577   .vector_size = sizeof (u32),
578   .format_trace = format_esp_decrypt_post_trace,
579   .type = VLIB_NODE_TYPE_INTERNAL,
580
581   .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings),
582   .error_strings = esp_decrypt_post_error_strings,
583
584   .n_next_nodes = ESP_DECRYPT_N_NEXT,
585   .next_nodes = {
586 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
587     foreach_esp_decrypt_next
588 #undef _
589   },
590 };
591
592 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_decrypt_post_node, dpdk_esp_decrypt_post_node_fn)