Use thread local storage for thread index
[vpp.git] / src / plugins / dpdk / ipsec / esp_decrypt.c
1 /*
2  * esp_decrypt.c : IPSec ESP Decrypt node using DPDK Cryptodev
3  *
4  * Copyright (c) 2016 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <dpdk/ipsec/ipsec.h>
24 #include <dpdk/ipsec/esp.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27
28 #define foreach_esp_decrypt_next               \
29 _(DROP, "error-drop")                          \
30 _(IP4_INPUT, "ip4-input")                      \
31 _(IP6_INPUT, "ip6-input")
32
33 #define _(v, s) ESP_DECRYPT_NEXT_##v,
34 typedef enum {
35   foreach_esp_decrypt_next
36 #undef _
37   ESP_DECRYPT_N_NEXT,
38 } esp_decrypt_next_t;
39
40 #define foreach_esp_decrypt_error                \
41  _(RX_PKTS, "ESP pkts received")                 \
42  _(DECRYPTION_FAILED, "ESP decryption failed")   \
43  _(REPLAY, "SA replayed packet")                 \
44  _(NOT_IP, "Not IP packet (dropped)")            \
45  _(ENQ_FAIL, "Enqueue failed (buffer full)")     \
46  _(NO_CRYPTODEV, "Cryptodev not configured")     \
47  _(BAD_LEN, "Invalid ciphertext length")         \
48  _(UNSUPPORTED, "Cipher/Auth not supported")
49
50
51 typedef enum {
52 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
53   foreach_esp_decrypt_error
54 #undef _
55   ESP_DECRYPT_N_ERROR,
56 } esp_decrypt_error_t;
57
58 static char * esp_decrypt_error_strings[] = {
59 #define _(sym,string) string,
60   foreach_esp_decrypt_error
61 #undef _
62 };
63
64 vlib_node_registration_t dpdk_esp_decrypt_node;
65
66 typedef struct {
67   ipsec_crypto_alg_t crypto_alg;
68   ipsec_integ_alg_t integ_alg;
69 } esp_decrypt_trace_t;
70
71 /* packet trace format function */
72 static u8 * format_esp_decrypt_trace (u8 * s, va_list * args)
73 {
74   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
75   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
76   esp_decrypt_trace_t * t = va_arg (*args, esp_decrypt_trace_t *);
77
78   s = format (s, "esp: crypto %U integrity %U",
79               format_ipsec_crypto_alg, t->crypto_alg,
80               format_ipsec_integ_alg, t->integ_alg);
81   return s;
82 }
83
84 static uword
85 dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
86              vlib_node_runtime_t * node,
87              vlib_frame_t * from_frame)
88 {
89   u32 n_left_from, *from, *to_next, next_index;
90   ipsec_main_t *im = &ipsec_main;
91   u32 thread_index = vlib_get_thread_index();
92   dpdk_crypto_main_t * dcm = &dpdk_crypto_main;
93   dpdk_esp_main_t * em = &dpdk_esp_main;
94   u32 i;
95
96   from = vlib_frame_vector_args (from_frame);
97   n_left_from = from_frame->n_vectors;
98
99   if (PREDICT_FALSE(!dcm->workers_main))
100     {
101       vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
102               ESP_DECRYPT_ERROR_NO_CRYPTODEV, n_left_from);
103       vlib_buffer_free(vm, from, n_left_from);
104       return n_left_from;
105     }
106
107   crypto_worker_main_t *cwm = vec_elt_at_index(dcm->workers_main, thread_index);
108   u32 n_qps = vec_len(cwm->qp_data);
109   struct rte_crypto_op ** cops_to_enq[n_qps];
110   u32 n_cop_qp[n_qps], * bi_to_enq[n_qps];
111
112   for (i = 0; i < n_qps; i++)
113     {
114       bi_to_enq[i] = cwm->qp_data[i].bi;
115       cops_to_enq[i] = cwm->qp_data[i].cops;
116     }
117
118   memset(n_cop_qp, 0, n_qps * sizeof(u32));
119
120   crypto_alloc_cops();
121
122   next_index = ESP_DECRYPT_NEXT_DROP;
123
124   while (n_left_from > 0)
125     {
126       u32 n_left_to_next;
127
128       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
129
130       while (n_left_from > 0 && n_left_to_next > 0)
131         {
132           u32 bi0, sa_index0 = ~0, seq, icv_size, iv_size;
133           vlib_buffer_t * b0;
134           esp_header_t * esp0;
135           ipsec_sa_t * sa0;
136           struct rte_mbuf * mb0 = 0;
137           const int BLOCK_SIZE = 16;
138           crypto_sa_session_t * sa_sess;
139           void * sess;
140           u16 qp_index;
141           struct rte_crypto_op * cop = 0;
142
143           bi0 = from[0];
144           from += 1;
145           n_left_from -= 1;
146
147           b0 = vlib_get_buffer (vm, bi0);
148           esp0 = vlib_buffer_get_current (b0);
149
150           sa_index0 = vnet_buffer(b0)->ipsec.sad_index;
151           sa0 = pool_elt_at_index (im->sad, sa_index0);
152
153           seq = clib_host_to_net_u32(esp0->seq);
154
155           /* anti-replay check */
156           if (sa0->use_anti_replay)
157             {
158               int rv = 0;
159
160               if (PREDICT_TRUE(sa0->use_esn))
161                 rv = esp_replay_check_esn(sa0, seq);
162               else
163                 rv = esp_replay_check(sa0, seq);
164
165               if (PREDICT_FALSE(rv))
166                 {
167                   clib_warning ("anti-replay SPI %u seq %u", sa0->spi, seq);
168                   vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
169                                                ESP_DECRYPT_ERROR_REPLAY, 1);
170                   to_next[0] = bi0;
171                   to_next += 1;
172                   n_left_to_next -= 1;
173                   goto trace;
174                 }
175             }
176
177           sa0->total_data_size += b0->current_length;
178
179           if (PREDICT_FALSE(sa0->integ_alg == IPSEC_INTEG_ALG_NONE) ||
180                   PREDICT_FALSE(sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE))
181             {
182               clib_warning ("SPI %u : only cipher + auth supported", sa0->spi);
183               vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
184                                            ESP_DECRYPT_ERROR_UNSUPPORTED, 1);
185               to_next[0] = bi0;
186               to_next += 1;
187               n_left_to_next -= 1;
188               goto trace;
189             }
190
191           sa_sess = pool_elt_at_index(cwm->sa_sess_d[0], sa_index0);
192
193           if (PREDICT_FALSE(!sa_sess->sess))
194             {
195               int ret = create_sym_sess(sa0, sa_sess, 0);
196
197               if (PREDICT_FALSE (ret))
198                 {
199                   to_next[0] = bi0;
200                   to_next += 1;
201                   n_left_to_next -= 1;
202                   goto trace;
203                 }
204             }
205
206           sess = sa_sess->sess;
207           qp_index = sa_sess->qp_index;
208
209           ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0);
210           cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops);
211           ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
212
213           cops_to_enq[qp_index][0] = cop;
214           cops_to_enq[qp_index] += 1;
215           n_cop_qp[qp_index] += 1;
216           bi_to_enq[qp_index][0] = bi0;
217           bi_to_enq[qp_index] += 1;
218
219           rte_crypto_op_attach_sym_session(cop, sess);
220
221           icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
222           iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
223
224           /* Convert vlib buffer to mbuf */
225           mb0 = rte_mbuf_from_vlib_buffer(b0);
226           mb0->data_len = b0->current_length;
227           mb0->pkt_len = b0->current_length;
228           mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
229
230           /* Outer IP header has already been stripped */
231           u16 payload_len = rte_pktmbuf_pkt_len(mb0) - sizeof (esp_header_t) -
232               iv_size - icv_size;
233
234           if ((payload_len & (BLOCK_SIZE - 1)) || (payload_len <= 0))
235             {
236               clib_warning ("payload %u not multiple of %d\n",
237                             payload_len, BLOCK_SIZE);
238               vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
239                                            ESP_DECRYPT_ERROR_BAD_LEN, 1);
240               vec_add (vec_elt (cwm->qp_data, qp_index).free_cops, &cop, 1);
241               bi_to_enq[qp_index] -= 1;
242               cops_to_enq[qp_index] -= 1;
243               n_cop_qp[qp_index] -= 1;
244               to_next[0] = bi0;
245               to_next += 1;
246               n_left_to_next -= 1;
247               goto trace;
248             }
249
250           struct rte_crypto_sym_op *sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
251
252           sym_cop->m_src = mb0;
253           sym_cop->cipher.data.offset = sizeof (esp_header_t) + iv_size;
254           sym_cop->cipher.data.length = payload_len;
255
256           u8 *iv = rte_pktmbuf_mtod_offset(mb0, void*, sizeof (esp_header_t));
257           dpdk_cop_priv_t * priv = (dpdk_cop_priv_t *)(sym_cop + 1);
258
259           if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
260             {
261               dpdk_gcm_cnt_blk *icb = &priv->cb;
262               icb->salt = sa0->salt;
263               clib_memcpy(icb->iv, iv, 8);
264               icb->cnt = clib_host_to_net_u32(1);
265               sym_cop->cipher.iv.data = (u8 *)icb;
266               sym_cop->cipher.iv.phys_addr = cop->phys_addr +
267                 (uintptr_t)icb - (uintptr_t)cop;
268               sym_cop->cipher.iv.length = 16;
269
270               u8 *aad = priv->aad;
271               clib_memcpy(aad, iv - sizeof(esp_header_t), 8);
272               sym_cop->auth.aad.data = aad;
273               sym_cop->auth.aad.phys_addr = cop->phys_addr +
274                   (uintptr_t)aad - (uintptr_t)cop;
275               if (sa0->use_esn)
276                 {
277                   *((u32*)&aad[8]) = sa0->seq_hi;
278                   sym_cop->auth.aad.length = 12;
279                 }
280               else
281                 {
282                   sym_cop->auth.aad.length = 8;
283                 }
284
285               sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
286                        rte_pktmbuf_pkt_len(mb0) - icv_size);
287               sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
288                        rte_pktmbuf_pkt_len(mb0) - icv_size);
289               sym_cop->auth.digest.length = icv_size;
290
291             }
292           else
293             {
294               sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(mb0, void*,
295                        sizeof (esp_header_t));
296               sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
297                        sizeof (esp_header_t));
298               sym_cop->cipher.iv.length = iv_size;
299
300               if (sa0->use_esn)
301                 {
302                   dpdk_cop_priv_t* priv = (dpdk_cop_priv_t*) (sym_cop + 1);
303                   u8* payload_end = rte_pktmbuf_mtod_offset(
304                       mb0, u8*, sizeof(esp_header_t) + iv_size + payload_len);
305
306                   clib_memcpy (priv->icv, payload_end, icv_size);
307                   *((u32*) payload_end) = sa0->seq_hi;
308                   sym_cop->auth.data.offset = 0;
309                   sym_cop->auth.data.length = sizeof(esp_header_t) + iv_size
310                       + payload_len + sizeof(sa0->seq_hi);
311                   sym_cop->auth.digest.data = priv->icv;
312                   sym_cop->auth.digest.phys_addr = cop->phys_addr
313                       + (uintptr_t) priv->icv - (uintptr_t) cop;
314                   sym_cop->auth.digest.length = icv_size;
315                 }
316               else
317                 {
318                   sym_cop->auth.data.offset = 0;
319                   sym_cop->auth.data.length = sizeof(esp_header_t) +
320                            iv_size + payload_len;
321
322                   sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(mb0, void*,
323                            rte_pktmbuf_pkt_len(mb0) - icv_size);
324                   sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mb0,
325                            rte_pktmbuf_pkt_len(mb0) - icv_size);
326                   sym_cop->auth.digest.length = icv_size;
327                 }
328             }
329
330 trace:
331           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
332             {
333               esp_decrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
334               tr->crypto_alg = sa0->crypto_alg;
335               tr->integ_alg = sa0->integ_alg;
336             }
337         }
338       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
339     }
340   vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
341                                ESP_DECRYPT_ERROR_RX_PKTS,
342                                from_frame->n_vectors);
343   crypto_qp_data_t *qpd;
344   /* *INDENT-OFF* */
345   vec_foreach_index (i, cwm->qp_data)
346     {
347       u32 enq;
348
349       qpd = vec_elt_at_index(cwm->qp_data, i);
350       enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
351                                         qpd->cops, n_cop_qp[i]);
352       qpd->inflights += enq;
353
354       if (PREDICT_FALSE(enq < n_cop_qp[i]))
355         {
356           crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq);
357           vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq);
358
359           vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
360                                        ESP_DECRYPT_ERROR_ENQ_FAIL,
361                                        n_cop_qp[i] - enq);
362         }
363     }
364   /* *INDENT-ON* */
365
366   return from_frame->n_vectors;
367 }
368
369 VLIB_REGISTER_NODE (dpdk_esp_decrypt_node) = {
370   .function = dpdk_esp_decrypt_node_fn,
371   .name = "dpdk-esp-decrypt",
372   .vector_size = sizeof (u32),
373   .format_trace = format_esp_decrypt_trace,
374   .type = VLIB_NODE_TYPE_INTERNAL,
375
376   .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
377   .error_strings = esp_decrypt_error_strings,
378
379   .n_next_nodes = ESP_DECRYPT_N_NEXT,
380   .next_nodes = {
381 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
382     foreach_esp_decrypt_next
383 #undef _
384   },
385 };
386
387 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_decrypt_node, dpdk_esp_decrypt_node_fn)
388
389 /*
390  * Decrypt Post Node
391  */
392
393 #define foreach_esp_decrypt_post_error        \
394  _(PKTS, "ESP post pkts")
395
396 typedef enum {
397 #define _(sym,str) ESP_DECRYPT_POST_ERROR_##sym,
398   foreach_esp_decrypt_post_error
399 #undef _
400   ESP_DECRYPT_POST_N_ERROR,
401 } esp_decrypt_post_error_t;
402
403 static char * esp_decrypt_post_error_strings[] = {
404 #define _(sym,string) string,
405   foreach_esp_decrypt_post_error
406 #undef _
407 };
408
409 vlib_node_registration_t dpdk_esp_decrypt_post_node;
410
411 static u8 * format_esp_decrypt_post_trace (u8 * s, va_list * args)
412 {
413   return s;
414 }
415
416 static uword
417 dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
418              vlib_node_runtime_t * node,
419              vlib_frame_t * from_frame)
420 {
421   u32 n_left_from, *from, *to_next = 0, next_index;
422   ipsec_sa_t * sa0;
423   u32 sa_index0 = ~0;
424   ipsec_main_t *im = &ipsec_main;
425   dpdk_esp_main_t *em = &dpdk_esp_main;
426
427   from = vlib_frame_vector_args (from_frame);
428   n_left_from = from_frame->n_vectors;
429
430   next_index = node->cached_next_index;
431
432   while (n_left_from > 0)
433     {
434       u32 n_left_to_next;
435
436       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
437
438       while (n_left_from > 0 && n_left_to_next > 0)
439         {
440           esp_footer_t * f0;
441           u32 bi0, next0, icv_size, iv_size;
442           vlib_buffer_t * b0 = 0;
443           ip4_header_t *ih4 = 0, *oh4 = 0;
444           ip6_header_t *ih6 = 0, *oh6 = 0;
445           u8 tunnel_mode = 1;
446           u8 transport_ip6 = 0;
447
448           next0 = ESP_DECRYPT_NEXT_DROP;
449
450           bi0 = from[0];
451           from += 1;
452           n_left_from -= 1;
453           n_left_to_next -= 1;
454
455           b0 = vlib_get_buffer (vm, bi0);
456
457           sa_index0 = vnet_buffer(b0)->ipsec.sad_index;
458           sa0 = pool_elt_at_index (im->sad, sa_index0);
459
460           to_next[0] = bi0;
461           to_next += 1;
462
463           icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
464           iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
465
466           if (sa0->use_anti_replay)
467             {
468               esp_header_t * esp0 = vlib_buffer_get_current (b0);
469               u32 seq;
470               seq = clib_host_to_net_u32(esp0->seq);
471               if (PREDICT_TRUE(sa0->use_esn))
472                 esp_replay_advance_esn(sa0, seq);
473               else
474                 esp_replay_advance(sa0, seq);
475             }
476
477           ih4 = (ip4_header_t *) (b0->data + sizeof(ethernet_header_t));
478           vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size);
479
480           b0->current_length -= (icv_size + 2);
481           b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
482           f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (b0) +
483                                  b0->current_length);
484           b0->current_length -= f0->pad_length;
485
486           /* transport mode */
487           if (PREDICT_FALSE(!sa0->is_tunnel && !sa0->is_tunnel_ip6))
488             {
489               tunnel_mode = 0;
490
491               if (PREDICT_TRUE((ih4->ip_version_and_header_length & 0xF0) != 0x40))
492                 {
493                   if (PREDICT_TRUE((ih4->ip_version_and_header_length & 0xF0) == 0x60))
494                     transport_ip6 = 1;
495                   else
496                     {
497                       clib_warning("next header: 0x%x", f0->next_header);
498                       vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
499                                                    ESP_DECRYPT_ERROR_NOT_IP, 1);
500                       goto trace;
501                     }
502                 }
503             }
504
505           if (PREDICT_TRUE (tunnel_mode))
506             {
507               if (PREDICT_TRUE(f0->next_header == IP_PROTOCOL_IP_IN_IP))
508                 next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
509               else if (f0->next_header == IP_PROTOCOL_IPV6)
510                 next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
511               else
512                 {
513                   clib_warning("next header: 0x%x", f0->next_header);
514                   vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
515                                                ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
516                                                1);
517                   goto trace;
518                 }
519             }
520           /* transport mode */
521           else
522             {
523               if (PREDICT_FALSE(transport_ip6))
524                 {
525                   ih6 = (ip6_header_t *) (b0->data + sizeof(ethernet_header_t));
526                   vlib_buffer_advance (b0, -sizeof(ip6_header_t));
527                   oh6 = vlib_buffer_get_current (b0);
528                   memmove(oh6, ih6, sizeof(ip6_header_t));
529
530                   next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
531                   oh6->protocol = f0->next_header;
532                   oh6->payload_length =
533                       clib_host_to_net_u16 (
534                           vlib_buffer_length_in_chain(vm, b0) -
535                           sizeof (ip6_header_t));
536                 }
537               else
538                 {
539                   vlib_buffer_advance (b0, -sizeof(ip4_header_t));
540                   oh4 = vlib_buffer_get_current (b0);
541                   memmove(oh4, ih4, sizeof(ip4_header_t));
542
543                   next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
544                   oh4->ip_version_and_header_length = 0x45;
545                   oh4->fragment_id = 0;
546                   oh4->flags_and_fragment_offset = 0;
547                   oh4->protocol = f0->next_header;
548                   oh4->length = clib_host_to_net_u16 (
549                       vlib_buffer_length_in_chain (vm, b0));
550                   oh4->checksum = ip4_header_checksum (oh4);
551                 }
552             }
553
554           vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32)~0;
555
556 trace:
557           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
558             {
559               esp_decrypt_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
560               tr->crypto_alg = sa0->crypto_alg;
561               tr->integ_alg = sa0->integ_alg;
562             }
563
564           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
565                                            to_next, n_left_to_next, bi0, next0);
566         }
567       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
568     }
569   vlib_node_increment_counter (vm, dpdk_esp_decrypt_post_node.index,
570                                ESP_DECRYPT_POST_ERROR_PKTS,
571                                from_frame->n_vectors);
572
573   return from_frame->n_vectors;
574 }
575
576 VLIB_REGISTER_NODE (dpdk_esp_decrypt_post_node) = {
577   .function = dpdk_esp_decrypt_post_node_fn,
578   .name = "dpdk-esp-decrypt-post",
579   .vector_size = sizeof (u32),
580   .format_trace = format_esp_decrypt_post_trace,
581   .type = VLIB_NODE_TYPE_INTERNAL,
582
583   .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings),
584   .error_strings = esp_decrypt_post_error_strings,
585
586   .n_next_nodes = ESP_DECRYPT_N_NEXT,
587   .next_nodes = {
588 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
589     foreach_esp_decrypt_next
590 #undef _
591   },
592 };
593
594 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_decrypt_post_node, dpdk_esp_decrypt_post_node_fn)