2ff7e41c51326d2dff78d011a25483bdda2b3424
[vpp.git] / src / plugins / dpdk / ipsec / esp_encrypt.c
1 /*
2  * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev
3  *
4  * Copyright (c) 2017 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/udp/udp.h>
25 #include <dpdk/buffer.h>
26 #include <dpdk/ipsec/ipsec.h>
27 #include <dpdk/device/dpdk.h>
28 #include <dpdk/device/dpdk_priv.h>
29
30 #define foreach_esp_encrypt_next                   \
31 _(DROP, "error-drop")                              \
32 _(IP4_LOOKUP, "ip4-lookup")                        \
33 _(IP6_LOOKUP, "ip6-lookup")                        \
34 _(INTERFACE_OUTPUT, "interface-output")
35
36 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
37 typedef enum
38 {
39   foreach_esp_encrypt_next
40 #undef _
41     ESP_ENCRYPT_N_NEXT,
42 } esp_encrypt_next_t;
43
44 #define foreach_esp_encrypt_error                   \
45  _(RX_PKTS, "ESP pkts received")                    \
46  _(SEQ_CYCLED, "Sequence number cycled")            \
47  _(ENQ_FAIL, "Enqueue failed to crypto device")     \
48  _(DISCARD, "Not enough crypto operations, discarding frame")  \
49  _(SESSION, "Failed to get crypto session")         \
50  _(NOSUP, "Cipher/Auth not supported")
51
52
53 typedef enum
54 {
55 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
56   foreach_esp_encrypt_error
57 #undef _
58     ESP_ENCRYPT_N_ERROR,
59 } esp_encrypt_error_t;
60
61 static char *esp_encrypt_error_strings[] = {
62 #define _(sym,string) string,
63   foreach_esp_encrypt_error
64 #undef _
65 };
66
67 extern vlib_node_registration_t dpdk_esp4_encrypt_node;
68 extern vlib_node_registration_t dpdk_esp6_encrypt_node;
69
70 typedef struct
71 {
72   ipsec_crypto_alg_t crypto_alg;
73   ipsec_integ_alg_t integ_alg;
74   u8 packet_data[64];
75 } esp_encrypt_trace_t;
76
77 /* packet trace format function */
78 static u8 *
79 format_esp_encrypt_trace (u8 * s, va_list * args)
80 {
81   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
82   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
83   esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
84   ip4_header_t *ih4 = (ip4_header_t *) t->packet_data;
85   u32 indent = format_get_indent (s), offset;
86
87   s = format (s, "cipher %U auth %U\n",
88               format_ipsec_crypto_alg, t->crypto_alg,
89               format_ipsec_integ_alg, t->integ_alg);
90
91   if ((ih4->ip_version_and_header_length & 0xF0) == 0x60)
92     {
93       s = format (s, "%U%U", format_white_space, indent,
94                   format_ip6_header, ih4);
95       offset = sizeof (ip6_header_t);
96     }
97   else
98     {
99       s = format (s, "%U%U", format_white_space, indent,
100                   format_ip4_header, ih4);
101       offset = ip4_header_bytes (ih4);
102     }
103
104   s = format (s, "\n%U%U", format_white_space, indent,
105               format_esp_header, t->packet_data + offset);
106
107   return s;
108 }
109
110 always_inline uword
111 dpdk_esp_encrypt_inline (vlib_main_t * vm,
112                          vlib_node_runtime_t * node,
113                          vlib_frame_t * from_frame, int is_ip6, int is_tun)
114 {
115   u32 n_left_from, *from, *to_next, next_index, thread_index;
116   ipsec_main_t *im = &ipsec_main;
117   vnet_main_t *vnm = im->vnet_main;
118   vnet_interface_main_t *vim = &vnm->interface_main;
119   u32 thread_idx = vlib_get_thread_index ();
120   dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
121   crypto_resource_t *res = 0;
122   ipsec_sa_t *sa0 = 0;
123   crypto_alg_t *cipher_alg = 0, *auth_alg = 0;
124   struct rte_cryptodev_sym_session *session = 0;
125   u32 ret, last_sa_index = ~0;
126   u8 numa = rte_socket_id ();
127   u8 is_aead = 0;
128   crypto_worker_main_t *cwm =
129     vec_elt_at_index (dcm->workers_main, thread_idx);
130   struct rte_crypto_op **ops = cwm->ops;
131
132   from = vlib_frame_vector_args (from_frame);
133   n_left_from = from_frame->n_vectors;
134   thread_index = vm->thread_index;
135
136   ret = crypto_alloc_ops (numa, ops, n_left_from);
137   if (ret)
138     {
139       if (is_ip6)
140         vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index,
141                                      ESP_ENCRYPT_ERROR_DISCARD, 1);
142       else
143         vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index,
144                                      ESP_ENCRYPT_ERROR_DISCARD, 1);
145       /* Discard whole frame */
146       return n_left_from;
147     }
148
149   next_index = ESP_ENCRYPT_NEXT_DROP;
150
151   while (n_left_from > 0)
152     {
153       u32 n_left_to_next;
154
155       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
156
157       while (n_left_from > 0 && n_left_to_next > 0)
158         {
159           clib_error_t *error;
160           u32 bi0, bi1;
161           vlib_buffer_t *b0, *b1;
162           u32 sa_index0;
163           ip4_and_esp_header_t *ih0, *oh0 = 0;
164           ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
165           ip4_and_udp_and_esp_header_t *ouh0 = 0;
166           esp_header_t *esp0;
167           esp_footer_t *f0;
168           u8 next_hdr_type;
169           u32 iv_size;
170           u16 orig_sz;
171           u8 trunc_size;
172           u16 rewrite_len;
173           u16 udp_encap_adv = 0;
174           struct rte_mbuf *mb0;
175           struct rte_crypto_op *op;
176           u16 res_idx;
177
178           bi0 = from[0];
179           from += 1;
180           n_left_from -= 1;
181
182           b0 = vlib_get_buffer (vm, bi0);
183           ih0 = vlib_buffer_get_current (b0);
184           mb0 = rte_mbuf_from_vlib_buffer (b0);
185
186           /* ih0/ih6_0 */
187           CLIB_PREFETCH (ih0, sizeof (ih6_0[0]), LOAD);
188           /* f0 */
189           CLIB_PREFETCH (vlib_buffer_get_tail (b0), 20, STORE);
190           /* mb0 */
191           CLIB_PREFETCH (mb0, CLIB_CACHE_LINE_BYTES, STORE);
192
193           if (n_left_from > 1)
194             {
195               bi1 = from[1];
196               b1 = vlib_get_buffer (vm, bi1);
197
198               CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD);
199               CLIB_PREFETCH (b1->data - CLIB_CACHE_LINE_BYTES,
200                              CLIB_CACHE_LINE_BYTES, STORE);
201             }
202
203           op = ops[0];
204           ops += 1;
205           ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
206
207           dpdk_op_priv_t *priv = crypto_op_get_priv (op);
208           /* store bi in op private */
209           priv->bi = bi0;
210
211           u16 op_len =
212             sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]);
213           CLIB_PREFETCH (op, op_len, STORE);
214
215           if (is_tun)
216             {
217               u32 tmp;
218               /* we are on a ipsec tunnel's feature arc */
219               sa_index0 = *(u32 *) vnet_feature_next_with_data (&tmp, b0,
220                                                                 sizeof
221                                                                 (sa_index0));
222             }
223           else
224             sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
225
226           if (sa_index0 != last_sa_index)
227             {
228               sa0 = pool_elt_at_index (im->sad, sa_index0);
229
230               cipher_alg =
231                 vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg);
232               auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg);
233
234               is_aead = (cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD);
235
236               if (is_aead)
237                 auth_alg = cipher_alg;
238
239               res_idx = get_resource (cwm, sa0);
240
241               if (PREDICT_FALSE (res_idx == (u16) ~ 0))
242                 {
243                   clib_warning ("unsupported SA by thread index %u",
244                                 thread_idx);
245                   if (is_ip6)
246                     vlib_node_increment_counter (vm,
247                                                  dpdk_esp6_encrypt_node.index,
248                                                  ESP_ENCRYPT_ERROR_NOSUP, 1);
249                   else
250                     vlib_node_increment_counter (vm,
251                                                  dpdk_esp4_encrypt_node.index,
252                                                  ESP_ENCRYPT_ERROR_NOSUP, 1);
253                   to_next[0] = bi0;
254                   to_next += 1;
255                   n_left_to_next -= 1;
256                   goto trace;
257                 }
258               res = vec_elt_at_index (dcm->resource, res_idx);
259
260               error = crypto_get_session (&session, sa_index0, res, cwm, 1);
261               if (PREDICT_FALSE (error || !session))
262                 {
263                   clib_warning ("failed to get crypto session");
264                   if (is_ip6)
265                     vlib_node_increment_counter (vm,
266                                                  dpdk_esp6_encrypt_node.index,
267                                                  ESP_ENCRYPT_ERROR_SESSION,
268                                                  1);
269                   else
270                     vlib_node_increment_counter (vm,
271                                                  dpdk_esp4_encrypt_node.index,
272                                                  ESP_ENCRYPT_ERROR_SESSION,
273                                                  1);
274                   to_next[0] = bi0;
275                   to_next += 1;
276                   n_left_to_next -= 1;
277                   goto trace;
278                 }
279
280               last_sa_index = sa_index0;
281             }
282
283           if (PREDICT_FALSE (esp_seq_advance (sa0)))
284             {
285               clib_warning ("sequence number counter has cycled SPI %u",
286                             sa0->spi);
287               if (is_ip6)
288                 vlib_node_increment_counter (vm,
289                                              dpdk_esp6_encrypt_node.index,
290                                              ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
291               else
292                 vlib_node_increment_counter (vm,
293                                              dpdk_esp4_encrypt_node.index,
294                                              ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
295               //TODO: rekey SA
296               to_next[0] = bi0;
297               to_next += 1;
298               n_left_to_next -= 1;
299               goto trace;
300             }
301
302           orig_sz = b0->current_length;
303
304           /* TODO multi-seg support - total_length_not_including_first_buffer */
305           vlib_increment_combined_counter
306             (&ipsec_sa_counters, thread_index, sa_index0,
307              1, b0->current_length);
308
309           /* Update tunnel interface tx counters */
310           if (is_tun)
311             vlib_increment_combined_counter
312               (vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
313                thread_index, vnet_buffer (b0)->sw_if_index[VLIB_TX],
314                1, b0->current_length);
315
316           res->ops[res->n_ops] = op;
317           res->bi[res->n_ops] = bi0;
318           res->n_ops += 1;
319
320           dpdk_gcm_cnt_blk *icb = &priv->cb;
321
322           crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi);
323
324           iv_size = cipher_alg->iv_len;
325           trunc_size = auth_alg->trunc_size;
326
327           /* if UDP encapsulation is used adjust the address of the IP header */
328           if (ipsec_sa_is_set_UDP_ENCAP (sa0) && !is_ip6)
329             udp_encap_adv = sizeof (udp_header_t);
330
331           if (ipsec_sa_is_set_IS_TUNNEL (sa0))
332             {
333               rewrite_len = 0;
334               if (!is_ip6 && !ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))       /* ip4inip4 */
335                 {
336                   /* in tunnel mode send it back to FIB */
337                   priv->next = DPDK_CRYPTO_INPUT_NEXT_IP4_LOOKUP;
338                   u8 adv = sizeof (ip4_header_t) + udp_encap_adv +
339                     sizeof (esp_header_t) + iv_size;
340                   vlib_buffer_advance (b0, -adv);
341                   oh0 = vlib_buffer_get_current (b0);
342                   ouh0 = vlib_buffer_get_current (b0);
343                   next_hdr_type = IP_PROTOCOL_IP_IN_IP;
344                   /*
345                    * oh0->ip4.ip_version_and_header_length = 0x45;
346                    * oh0->ip4.tos = ih0->ip4.tos;
347                    * oh0->ip4.fragment_id = 0;
348                    * oh0->ip4.flags_and_fragment_offset = 0;
349                    */
350                   oh0->ip4.checksum_data_64[0] =
351                     clib_host_to_net_u64 (0x45ULL << 56);
352                   /*
353                    * oh0->ip4.ttl = 254;
354                    * oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
355                    */
356                   oh0->ip4.checksum_data_32[2] =
357                     clib_host_to_net_u32 (0xfe320000);
358
359                   oh0->ip4.src_address.as_u32 =
360                     sa0->tunnel_src_addr.ip4.as_u32;
361                   oh0->ip4.dst_address.as_u32 =
362                     sa0->tunnel_dst_addr.ip4.as_u32;
363
364                   if (ipsec_sa_is_set_UDP_ENCAP (sa0))
365                     {
366                       oh0->ip4.protocol = IP_PROTOCOL_UDP;
367                       esp0 = &ouh0->esp;
368                     }
369                   else
370                     esp0 = &oh0->esp;
371                   esp0->spi = clib_host_to_net_u32 (sa0->spi);
372                   esp0->seq = clib_host_to_net_u32 (sa0->seq);
373                 }
374               else if (is_ip6 && ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
375                 {
376                   /* ip6inip6 */
377                   /* in tunnel mode send it back to FIB */
378                   priv->next = DPDK_CRYPTO_INPUT_NEXT_IP6_LOOKUP;
379
380                   u8 adv =
381                     sizeof (ip6_header_t) + sizeof (esp_header_t) + iv_size;
382                   vlib_buffer_advance (b0, -adv);
383                   ih6_0 = (ip6_and_esp_header_t *) ih0;
384                   oh6_0 = vlib_buffer_get_current (b0);
385
386                   next_hdr_type = IP_PROTOCOL_IPV6;
387
388                   oh6_0->ip6.ip_version_traffic_class_and_flow_label =
389                     ih6_0->ip6.ip_version_traffic_class_and_flow_label;
390
391                   oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
392                   oh6_0->ip6.hop_limit = 254;
393                   oh6_0->ip6.src_address.as_u64[0] =
394                     sa0->tunnel_src_addr.ip6.as_u64[0];
395                   oh6_0->ip6.src_address.as_u64[1] =
396                     sa0->tunnel_src_addr.ip6.as_u64[1];
397                   oh6_0->ip6.dst_address.as_u64[0] =
398                     sa0->tunnel_dst_addr.ip6.as_u64[0];
399                   oh6_0->ip6.dst_address.as_u64[1] =
400                     sa0->tunnel_dst_addr.ip6.as_u64[1];
401                   esp0 = &oh6_0->esp;
402                   oh6_0->esp.spi = clib_host_to_net_u32 (sa0->spi);
403                   oh6_0->esp.seq = clib_host_to_net_u32 (sa0->seq);
404                 }
405               else              /* unsupported ip4inip6, ip6inip4 */
406                 {
407                   if (is_ip6)
408                     vlib_node_increment_counter (vm,
409                                                  dpdk_esp6_encrypt_node.index,
410                                                  ESP_ENCRYPT_ERROR_NOSUP, 1);
411                   else
412                     vlib_node_increment_counter (vm,
413                                                  dpdk_esp4_encrypt_node.index,
414                                                  ESP_ENCRYPT_ERROR_NOSUP, 1);
415                   to_next[0] = bi0;
416                   to_next += 1;
417                   n_left_to_next -= 1;
418                   goto trace;
419                 }
420               vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
421             }
422           else                  /* transport mode */
423             {
424               priv->next = DPDK_CRYPTO_INPUT_NEXT_INTERFACE_OUTPUT;
425               rewrite_len = vnet_buffer (b0)->ip.save_rewrite_length;
426               u16 adv = sizeof (esp_header_t) + iv_size + udp_encap_adv;
427               vlib_buffer_advance (b0, -adv - rewrite_len);
428               u8 *src = ((u8 *) ih0) - rewrite_len;
429               u8 *dst = vlib_buffer_get_current (b0);
430               oh0 = vlib_buffer_get_current (b0) + rewrite_len;
431
432               if (is_ip6)
433                 {
434                   orig_sz -= sizeof (ip6_header_t);
435                   ih6_0 = (ip6_and_esp_header_t *) ih0;
436                   next_hdr_type = ih6_0->ip6.protocol;
437                   memmove (dst, src, rewrite_len + sizeof (ip6_header_t));
438                   oh6_0 = (ip6_and_esp_header_t *) oh0;
439                   oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
440                   esp0 = &oh6_0->esp;
441                 }
442               else              /* ipv4 */
443                 {
444                   u16 ip_size = ip4_header_bytes (&ih0->ip4);
445                   orig_sz -= ip_size;
446                   next_hdr_type = ih0->ip4.protocol;
447                   memmove (dst, src, rewrite_len + ip_size);
448                   oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
449                   esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size);
450                   if (ipsec_sa_is_set_UDP_ENCAP (sa0))
451                     {
452                       oh0->ip4.protocol = IP_PROTOCOL_UDP;
453                       esp0 = (esp_header_t *)
454                         (((u8 *) oh0) + ip_size + udp_encap_adv);
455                     }
456                   else
457                     {
458                       oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
459                       esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size);
460                     }
461                 }
462               esp0->spi = clib_host_to_net_u32 (sa0->spi);
463               esp0->seq = clib_host_to_net_u32 (sa0->seq);
464             }
465
466           if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0)
467             {
468               ouh0->udp.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
469               ouh0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
470               ouh0->udp.checksum = 0;
471             }
472           ASSERT (is_pow2 (cipher_alg->boundary));
473           u16 mask = cipher_alg->boundary - 1;
474           u16 pad_payload_len = ((orig_sz + 2) + mask) & ~mask;
475           u8 pad_bytes = pad_payload_len - 2 - orig_sz;
476
477           u8 *padding =
478             vlib_buffer_put_uninit (b0, pad_bytes + 2 + trunc_size);
479
480           /* The extra pad bytes would be overwritten by the digest */
481           if (pad_bytes)
482             clib_memcpy_fast (padding, pad_data, 16);
483
484           f0 = (esp_footer_t *) (padding + pad_bytes);
485           f0->pad_length = pad_bytes;
486           f0->next_header = next_hdr_type;
487
488           if (is_ip6)
489             {
490               u16 len = b0->current_length - sizeof (ip6_header_t);
491               oh6_0->ip6.payload_length =
492                 clib_host_to_net_u16 (len - rewrite_len);
493             }
494           else
495             {
496               oh0->ip4.length =
497                 clib_host_to_net_u16 (b0->current_length - rewrite_len);
498               oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
499               if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0)
500                 {
501                   ouh0->udp.length =
502                     clib_host_to_net_u16 (clib_net_to_host_u16
503                                           (ouh0->ip4.length) -
504                                           ip4_header_bytes (&ouh0->ip4));
505                 }
506             }
507
508           b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
509
510           /* mbuf packet starts at ESP header */
511           mb0->data_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0);
512           mb0->pkt_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0);
513           mb0->data_off = ((void *) esp0) - mb0->buf_addr;
514
515           u32 cipher_off, cipher_len, auth_len = 0;
516           u32 *aad = NULL;
517
518           u8 *digest = vlib_buffer_get_tail (b0) - trunc_size;
519           u64 digest_paddr =
520             mb0->buf_physaddr + digest - ((u8 *) mb0->buf_addr);
521
522           if (!is_aead && cipher_alg->alg == RTE_CRYPTO_CIPHER_AES_CBC)
523             {
524               cipher_off = sizeof (esp_header_t);
525               cipher_len = iv_size + pad_payload_len;
526             }
527           else                  /* CTR/GCM */
528             {
529               u32 *esp_iv = (u32 *) (esp0 + 1);
530               esp_iv[0] = sa0->seq;
531               esp_iv[1] = sa0->seq_hi;
532
533               cipher_off = sizeof (esp_header_t) + iv_size;
534               cipher_len = pad_payload_len;
535             }
536
537           if (is_aead)
538             {
539               aad = (u32 *) priv->aad;
540               aad[0] = clib_host_to_net_u32 (sa0->spi);
541               aad[1] = clib_host_to_net_u32 (sa0->seq);
542
543               /* aad[3] should always be 0 */
544               if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0)))
545                 aad[2] = clib_host_to_net_u32 (sa0->seq_hi);
546               else
547                 aad[2] = 0;
548             }
549           else
550             {
551               auth_len =
552                 vlib_buffer_get_tail (b0) - ((u8 *) esp0) - trunc_size;
553               if (ipsec_sa_is_set_USE_ESN (sa0))
554                 {
555                   u32 *_digest = (u32 *) digest;
556                   _digest[0] = clib_host_to_net_u32 (sa0->seq_hi);
557                   auth_len += 4;
558                 }
559             }
560
561           crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len,
562                            0, auth_len, (u8 *) aad, digest, digest_paddr);
563
564         trace:
565           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
566             {
567               esp_encrypt_trace_t *tr =
568                 vlib_add_trace (vm, node, b0, sizeof (*tr));
569               tr->crypto_alg = sa0->crypto_alg;
570               tr->integ_alg = sa0->integ_alg;
571               u8 *p = vlib_buffer_get_current (b0);
572               if (!ipsec_sa_is_set_IS_TUNNEL (sa0))
573                 p += vnet_buffer (b0)->ip.save_rewrite_length;
574               clib_memcpy_fast (tr->packet_data, p, sizeof (tr->packet_data));
575             }
576         }
577       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
578     }
579   if (is_ip6)
580     {
581       vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index,
582                                    ESP_ENCRYPT_ERROR_RX_PKTS,
583                                    from_frame->n_vectors);
584
585       crypto_enqueue_ops (vm, cwm, dpdk_esp6_encrypt_node.index,
586                           ESP_ENCRYPT_ERROR_ENQ_FAIL, numa);
587     }
588   else
589     {
590       vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index,
591                                    ESP_ENCRYPT_ERROR_RX_PKTS,
592                                    from_frame->n_vectors);
593
594       crypto_enqueue_ops (vm, cwm, dpdk_esp4_encrypt_node.index,
595                           ESP_ENCRYPT_ERROR_ENQ_FAIL, numa);
596     }
597
598   crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops);
599
600   return from_frame->n_vectors;
601 }
602
603 VLIB_NODE_FN (dpdk_esp4_encrypt_node) (vlib_main_t * vm,
604                                        vlib_node_runtime_t * node,
605                                        vlib_frame_t * from_frame)
606 {
607   return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 0);
608 }
609
610 /* *INDENT-OFF* */
611 VLIB_REGISTER_NODE (dpdk_esp4_encrypt_node) = {
612   .name = "dpdk-esp4-encrypt",
613   .flags = VLIB_NODE_FLAG_IS_OUTPUT,
614   .vector_size = sizeof (u32),
615   .format_trace = format_esp_encrypt_trace,
616   .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
617   .error_strings = esp_encrypt_error_strings,
618   .n_next_nodes = 1,
619   .next_nodes =
620     {
621       [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
622     }
623 };
624 /* *INDENT-ON* */
625
626 VLIB_NODE_FN (dpdk_esp6_encrypt_node) (vlib_main_t * vm,
627                                        vlib_node_runtime_t * node,
628                                        vlib_frame_t * from_frame)
629 {
630   return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 0);
631 }
632
633 /* *INDENT-OFF* */
634 VLIB_REGISTER_NODE (dpdk_esp6_encrypt_node) = {
635   .name = "dpdk-esp6-encrypt",
636   .flags = VLIB_NODE_FLAG_IS_OUTPUT,
637   .vector_size = sizeof (u32),
638   .format_trace = format_esp_encrypt_trace,
639   .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
640   .error_strings = esp_encrypt_error_strings,
641   .n_next_nodes = 1,
642   .next_nodes =
643     {
644       [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
645     }
646 };
647 /* *INDENT-ON* */
648
649 VLIB_NODE_FN (dpdk_esp4_encrypt_tun_node) (vlib_main_t * vm,
650                                            vlib_node_runtime_t * node,
651                                            vlib_frame_t * from_frame)
652 {
653   return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 1);
654 }
655
656 /* *INDENT-OFF* */
657 VLIB_REGISTER_NODE (dpdk_esp4_encrypt_tun_node) = {
658   .name = "dpdk-esp4-encrypt-tun",
659   .flags = VLIB_NODE_FLAG_IS_OUTPUT,
660   .vector_size = sizeof (u32),
661   .format_trace = format_esp_encrypt_trace,
662   .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
663   .error_strings = esp_encrypt_error_strings,
664   .n_next_nodes = 1,
665   .next_nodes =
666     {
667       [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
668     }
669 };
670
671 VNET_FEATURE_INIT (dpdk_esp4_encrypt_tun_feat_node, static) =
672 {
673   .arc_name = "ip4-output",
674   .node_name = "dpdk-esp4-encrypt-tun",
675   .runs_before = VNET_FEATURES ("adj-midchain-tx"),
676 };
677 /* *INDENT-ON* */
678
679 VLIB_NODE_FN (dpdk_esp6_encrypt_tun_node) (vlib_main_t * vm,
680                                            vlib_node_runtime_t * node,
681                                            vlib_frame_t * from_frame)
682 {
683   return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 1);
684 }
685
686 /* *INDENT-OFF* */
687 VLIB_REGISTER_NODE (dpdk_esp6_encrypt_tun_node) = {
688   .name = "dpdk-esp6-encrypt-tun",
689   .flags = VLIB_NODE_FLAG_IS_OUTPUT,
690   .vector_size = sizeof (u32),
691   .format_trace = format_esp_encrypt_trace,
692   .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
693   .error_strings = esp_encrypt_error_strings,
694   .n_next_nodes = 1,
695   .next_nodes =
696     {
697       [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
698     }
699 };
700
701 VNET_FEATURE_INIT (dpdk_esp6_encrypt_tun_feat_node, static) =
702 {
703   .arc_name = "ip6-output",
704   .node_name = "dpdk-esp6-encrypt-tun",
705   .runs_before = VNET_FEATURES ("adj-midchain-tx"),
706 };
707 /* *INDENT-ON* */
708
709 /*
710  * fd.io coding-style-patch-verification: ON
711  *
712  * Local Variables:
713  * eval: (c-set-style "gnu")
714  * End:
715  */