2 * esp_decrypt.c : IPSec ESP Decrypt node using DPDK Cryptodev
4 * Copyright (c) 2017 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a opy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <dpdk/ipsec/ipsec.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #define foreach_esp_decrypt_next \
29 _(DROP, "error-drop") \
30 _(IP4_INPUT, "ip4-input-no-checksum") \
31 _(IP6_INPUT, "ip6-input")
33 #define _(v, s) ESP_DECRYPT_NEXT_##v,
36 foreach_esp_decrypt_next
41 #define foreach_esp_decrypt_error \
42 _(RX_PKTS, "ESP pkts received") \
43 _(DECRYPTION_FAILED, "ESP decryption failed") \
44 _(REPLAY, "SA replayed packet") \
45 _(NOT_IP, "Not IP packet (dropped)") \
46 _(ENQ_FAIL, "Enqueue failed (buffer full)") \
47 _(DISCARD, "Not enough crypto operations, discarding frame") \
48 _(BAD_LEN, "Invalid ciphertext length") \
49 _(SESSION, "Failed to get crypto session") \
50 _(NOSUP, "Cipher/Auth not supported")
55 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
56 foreach_esp_decrypt_error
59 } esp_decrypt_error_t;
61 static char *esp_decrypt_error_strings[] = {
62 #define _(sym,string) string,
63 foreach_esp_decrypt_error
67 extern vlib_node_registration_t dpdk_esp4_decrypt_node;
68 extern vlib_node_registration_t dpdk_esp6_decrypt_node;
72 ipsec_crypto_alg_t crypto_alg;
73 ipsec_integ_alg_t integ_alg;
75 } esp_decrypt_trace_t;
77 /* packet trace format function */
79 format_esp_decrypt_trace (u8 * s, va_list * args)
81 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
82 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
83 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
84 u32 indent = format_get_indent (s);
86 s = format (s, "cipher %U auth %U\n",
87 format_ipsec_crypto_alg, t->crypto_alg,
88 format_ipsec_integ_alg, t->integ_alg);
89 s = format (s, "%U%U",
90 format_white_space, indent, format_esp_header, t->packet_data);
95 dpdk_esp_decrypt_inline (vlib_main_t * vm,
96 vlib_node_runtime_t * node,
97 vlib_frame_t * from_frame, int is_ip6)
99 u32 n_left_from, *from, *to_next, next_index;
100 ipsec_main_t *im = &ipsec_main;
101 u32 thread_idx = vlib_get_thread_index ();
102 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
103 crypto_resource_t *res = 0;
105 crypto_alg_t *cipher_alg = 0, *auth_alg = 0;
106 struct rte_cryptodev_sym_session *session = 0;
107 u32 ret, last_sa_index = ~0;
108 u8 numa = rte_socket_id ();
110 crypto_worker_main_t *cwm =
111 vec_elt_at_index (dcm->workers_main, thread_idx);
112 struct rte_crypto_op **ops = cwm->ops;
114 from = vlib_frame_vector_args (from_frame);
115 n_left_from = from_frame->n_vectors;
117 ret = crypto_alloc_ops (numa, ops, n_left_from);
121 vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index,
122 ESP_DECRYPT_ERROR_DISCARD, 1);
124 vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index,
125 ESP_DECRYPT_ERROR_DISCARD, 1);
126 /* Discard whole frame */
130 next_index = ESP_DECRYPT_NEXT_DROP;
132 while (n_left_from > 0)
136 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
138 while (n_left_from > 0 && n_left_to_next > 0)
141 u32 bi0, sa_index0, seq, iv_size;
145 struct rte_mbuf *mb0;
146 struct rte_crypto_op *op;
153 b0 = vlib_get_buffer (vm, bi0);
154 mb0 = rte_mbuf_from_vlib_buffer (b0);
155 esp0 = vlib_buffer_get_current (b0);
158 CLIB_PREFETCH (esp0, sizeof (esp0[0]) + 16, LOAD);
160 CLIB_PREFETCH (mb0, CLIB_CACHE_LINE_BYTES, STORE);
164 ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
166 dpdk_op_priv_t *priv = crypto_op_get_priv (op);
169 sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]);
170 CLIB_PREFETCH (op, op_len, STORE);
172 sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
174 if (sa_index0 != last_sa_index)
176 sa0 = pool_elt_at_index (im->sad, sa_index0);
179 vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg);
180 auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg);
182 is_aead = (cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD);
184 auth_alg = cipher_alg;
186 res_idx = get_resource (cwm, sa0);
188 if (PREDICT_FALSE (res_idx == (u16) ~ 0))
190 clib_warning ("unsupported SA by thread index %u",
193 vlib_node_increment_counter (vm,
194 dpdk_esp6_decrypt_node.index,
195 ESP_DECRYPT_ERROR_NOSUP, 1);
197 vlib_node_increment_counter (vm,
198 dpdk_esp4_decrypt_node.index,
199 ESP_DECRYPT_ERROR_NOSUP, 1);
205 res = vec_elt_at_index (dcm->resource, res_idx);
207 error = crypto_get_session (&session, sa_index0, res, cwm, 0);
208 if (PREDICT_FALSE (error || !session))
210 clib_warning ("failed to get crypto session");
212 vlib_node_increment_counter (vm,
213 dpdk_esp6_decrypt_node.index,
214 ESP_DECRYPT_ERROR_SESSION,
217 vlib_node_increment_counter (vm,
218 dpdk_esp4_decrypt_node.index,
219 ESP_DECRYPT_ERROR_SESSION,
227 last_sa_index = sa_index0;
230 /* anti-replay check */
231 if (sa0->use_anti_replay)
235 seq = clib_net_to_host_u32 (esp0->seq);
237 if (PREDICT_TRUE (sa0->use_esn))
238 rv = esp_replay_check_esn (sa0, seq);
240 rv = esp_replay_check (sa0, seq);
242 if (PREDICT_FALSE (rv))
244 clib_warning ("failed anti-replay check");
246 vlib_node_increment_counter (vm,
247 dpdk_esp6_decrypt_node.index,
248 ESP_DECRYPT_ERROR_REPLAY, 1);
250 vlib_node_increment_counter (vm,
251 dpdk_esp4_decrypt_node.index,
252 ESP_DECRYPT_ERROR_REPLAY, 1);
261 priv->next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT6_POST;
263 priv->next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT4_POST;
265 /* FIXME multi-seg */
266 sa0->total_data_size += b0->current_length;
268 res->ops[res->n_ops] = op;
269 res->bi[res->n_ops] = bi0;
272 /* Convert vlib buffer to mbuf */
273 mb0->data_len = b0->current_length;
274 mb0->pkt_len = b0->current_length;
275 mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
277 trunc_size = auth_alg->trunc_size;
278 iv_size = cipher_alg->iv_len;
280 /* Outer IP header has already been stripped */
282 b0->current_length - sizeof (esp_header_t) - iv_size - trunc_size;
284 ASSERT (payload_len >= 4);
286 if (payload_len & (cipher_alg->boundary - 1))
288 clib_warning ("payload %u not multiple of %d\n",
289 payload_len, cipher_alg->boundary);
291 vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index,
292 ESP_DECRYPT_ERROR_BAD_LEN, 1);
294 vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index,
295 ESP_DECRYPT_ERROR_BAD_LEN, 1);
303 u32 cipher_off, cipher_len;
307 u8 *iv = (u8 *) (esp0 + 1);
309 dpdk_gcm_cnt_blk *icb = &priv->cb;
311 cipher_off = sizeof (esp_header_t) + iv_size;
312 cipher_len = payload_len;
314 u8 *digest = vlib_buffer_get_tail (b0) - trunc_size;
316 mb0->buf_physaddr + digest - ((u8 *) mb0->buf_addr);
318 if (!is_aead && cipher_alg->alg == RTE_CRYPTO_CIPHER_AES_CBC)
319 clib_memcpy_fast (icb, iv, 16);
322 u32 *_iv = (u32 *) iv;
324 crypto_set_icb (icb, sa0->salt, _iv[0], _iv[1]);
330 u32 *_aad = (u32 *) aad;
331 clib_memcpy_fast (aad, esp0, 8);
333 /* _aad[3] should always be 0 */
334 if (PREDICT_FALSE (sa0->use_esn))
335 _aad[2] = clib_host_to_net_u32 (sa0->seq_hi);
341 auth_len = sizeof (esp_header_t) + iv_size + payload_len;
345 clib_memcpy_fast (priv->icv, digest, trunc_size);
346 u32 *_digest = (u32 *) digest;
347 _digest[0] = clib_host_to_net_u32 (sa0->seq_hi);
348 auth_len += sizeof (sa0->seq_hi);
352 op->phys_addr + (uintptr_t) priv->icv - (uintptr_t) op;
356 crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len,
357 0, auth_len, aad, digest, digest_paddr);
359 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
361 esp_decrypt_trace_t *tr =
362 vlib_add_trace (vm, node, b0, sizeof (*tr));
363 tr->crypto_alg = sa0->crypto_alg;
364 tr->integ_alg = sa0->integ_alg;
365 clib_memcpy_fast (tr->packet_data, vlib_buffer_get_current (b0),
366 sizeof (esp_header_t));
369 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
374 vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index,
375 ESP_DECRYPT_ERROR_RX_PKTS,
376 from_frame->n_vectors);
378 crypto_enqueue_ops (vm, cwm, 0, dpdk_esp6_decrypt_node.index,
379 ESP_DECRYPT_ERROR_ENQ_FAIL, numa);
383 vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index,
384 ESP_DECRYPT_ERROR_RX_PKTS,
385 from_frame->n_vectors);
387 crypto_enqueue_ops (vm, cwm, 0, dpdk_esp4_decrypt_node.index,
388 ESP_DECRYPT_ERROR_ENQ_FAIL, numa);
391 crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops);
393 return from_frame->n_vectors;
396 VLIB_NODE_FN (dpdk_esp4_decrypt_node) (vlib_main_t * vm,
397 vlib_node_runtime_t * node,
398 vlib_frame_t * from_frame)
400 return dpdk_esp_decrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ );
404 VLIB_REGISTER_NODE (dpdk_esp4_decrypt_node) = {
405 .name = "dpdk-esp4-decrypt",
406 .vector_size = sizeof (u32),
407 .format_trace = format_esp_decrypt_trace,
408 .type = VLIB_NODE_TYPE_INTERNAL,
410 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
411 .error_strings = esp_decrypt_error_strings,
413 .n_next_nodes = ESP_DECRYPT_N_NEXT,
415 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
416 foreach_esp_decrypt_next
422 VLIB_NODE_FN (dpdk_esp6_decrypt_node) (vlib_main_t * vm,
423 vlib_node_runtime_t * node,
424 vlib_frame_t * from_frame)
426 return dpdk_esp_decrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ );
430 VLIB_REGISTER_NODE (dpdk_esp6_decrypt_node) = {
431 .name = "dpdk-esp6-decrypt",
432 .vector_size = sizeof (u32),
433 .format_trace = format_esp_decrypt_trace,
434 .type = VLIB_NODE_TYPE_INTERNAL,
436 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
437 .error_strings = esp_decrypt_error_strings,
439 .n_next_nodes = ESP_DECRYPT_N_NEXT,
441 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
442 foreach_esp_decrypt_next
452 #define foreach_esp_decrypt_post_error \
453 _(PKTS, "ESP post pkts")
457 #define _(sym,str) ESP_DECRYPT_POST_ERROR_##sym,
458 foreach_esp_decrypt_post_error
460 ESP_DECRYPT_POST_N_ERROR,
461 } esp_decrypt_post_error_t;
463 static char *esp_decrypt_post_error_strings[] = {
464 #define _(sym,string) string,
465 foreach_esp_decrypt_post_error
469 extern vlib_node_registration_t dpdk_esp4_decrypt_post_node;
470 extern vlib_node_registration_t dpdk_esp6_decrypt_post_node;
473 format_esp_decrypt_post_trace (u8 * s, va_list * args)
475 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
476 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
477 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
478 u32 indent = format_get_indent (s);
480 s = format (s, "cipher %U auth %U\n",
481 format_ipsec_crypto_alg, t->crypto_alg,
482 format_ipsec_integ_alg, t->integ_alg);
484 ip4_header_t *ih4 = (ip4_header_t *) t->packet_data;
485 if ((ih4->ip_version_and_header_length & 0xF0) == 0x60)
487 format (s, "%U%U", format_white_space, indent, format_ip6_header, ih4);
490 format (s, "%U%U", format_white_space, indent, format_ip4_header, ih4);
496 dpdk_esp_decrypt_post_inline (vlib_main_t * vm,
497 vlib_node_runtime_t * node,
498 vlib_frame_t * from_frame, int is_ip6)
500 u32 n_left_from, *from, *to_next = 0, next_index;
503 ipsec_main_t *im = &ipsec_main;
504 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
506 from = vlib_frame_vector_args (from_frame);
507 n_left_from = from_frame->n_vectors;
509 next_index = node->cached_next_index;
511 while (n_left_from > 0)
515 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
517 while (n_left_from > 0 && n_left_to_next > 0)
520 u32 bi0, iv_size, next0;
521 vlib_buffer_t *b0 = 0;
522 ip4_header_t *ih4 = 0, *oh4 = 0;
523 ip6_header_t *ih6 = 0, *oh6 = 0;
524 crypto_alg_t *cipher_alg, *auth_alg;
526 u8 trunc_size, is_aead;
527 u16 udp_encap_adv = 0;
529 next0 = ESP_DECRYPT_NEXT_DROP;
536 b0 = vlib_get_buffer (vm, bi0);
537 esp0 = vlib_buffer_get_current (b0);
539 sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
540 sa0 = pool_elt_at_index (im->sad, sa_index0);
545 cipher_alg = vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg);
546 auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg);
547 is_aead = cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD;
549 auth_alg = cipher_alg;
551 trunc_size = auth_alg->trunc_size;
553 iv_size = cipher_alg->iv_len;
555 if (sa0->use_anti_replay)
558 seq = clib_host_to_net_u32 (esp0->seq);
559 if (PREDICT_TRUE (sa0->use_esn))
560 esp_replay_advance_esn (sa0, seq);
562 esp_replay_advance (sa0, seq);
565 /* if UDP encapsulation is used adjust the address of the IP header */
566 if (sa0->udp_encap && (b0->flags & VNET_BUFFER_F_IS_IP4))
568 udp_encap_adv = sizeof (udp_header_t);
571 if (b0->flags & VNET_BUFFER_F_IS_IP4)
572 ih4 = (ip4_header_t *)
573 ((u8 *) esp0 - udp_encap_adv - sizeof (ip4_header_t));
575 ih4 = (ip4_header_t *) ((u8 *) esp0 - sizeof (ip6_header_t));
577 vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size);
579 b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
580 f0 = (esp_footer_t *) (vlib_buffer_get_tail (b0) - trunc_size - 2);
581 b0->current_length -= (f0->pad_length + trunc_size + 2);
584 const u8 *padding = vlib_buffer_get_tail (b0);
585 if (PREDICT_FALSE (memcmp (padding, pad_data, f0->pad_length)))
587 clib_warning ("bad padding");
588 vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index,
589 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
596 if (f0->next_header == IP_PROTOCOL_IP_IN_IP)
597 next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
598 else if (sa0->is_tunnel_ip6
599 && f0->next_header == IP_PROTOCOL_IPV6)
600 next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
603 clib_warning ("next header: 0x%x", f0->next_header);
605 vlib_node_increment_counter (vm,
606 dpdk_esp6_decrypt_node.index,
607 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
610 vlib_node_increment_counter (vm,
611 dpdk_esp4_decrypt_node.index,
612 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
617 else /* transport mode */
619 if ((ih4->ip_version_and_header_length & 0xF0) == 0x40)
621 u16 ih4_len = ip4_header_bytes (ih4);
622 vlib_buffer_advance (b0, -ih4_len - udp_encap_adv);
623 next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
626 oh4 = vlib_buffer_get_current (b0);
627 memmove (oh4, ih4, ih4_len);
628 oh4->protocol = f0->next_header;
629 oh4->length = clib_host_to_net_u16 (b0->current_length);
630 oh4->checksum = ip4_header_checksum (oh4);
633 else if ((ih4->ip_version_and_header_length & 0xF0) == 0x60)
635 ih6 = (ip6_header_t *) ih4;
636 vlib_buffer_advance (b0, -sizeof (ip6_header_t));
637 oh6 = vlib_buffer_get_current (b0);
638 memmove (oh6, ih6, sizeof (ip6_header_t));
640 next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
641 oh6->protocol = f0->next_header;
642 u16 len = b0->current_length - sizeof (ip6_header_t);
643 oh6->payload_length = clib_host_to_net_u16 (len);
647 clib_warning ("next header: 0x%x", f0->next_header);
649 vlib_node_increment_counter (vm,
650 dpdk_esp6_decrypt_node.index,
651 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
654 vlib_node_increment_counter (vm,
655 dpdk_esp4_decrypt_node.index,
656 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
662 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
665 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
667 esp_decrypt_trace_t *tr =
668 vlib_add_trace (vm, node, b0, sizeof (*tr));
669 tr->crypto_alg = sa0->crypto_alg;
670 tr->integ_alg = sa0->integ_alg;
671 ih4 = vlib_buffer_get_current (b0);
672 clib_memcpy_fast (tr->packet_data, ih4, sizeof (ip6_header_t));
675 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
676 to_next, n_left_to_next, bi0,
679 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
683 vlib_node_increment_counter (vm, dpdk_esp6_decrypt_post_node.index,
684 ESP_DECRYPT_POST_ERROR_PKTS,
685 from_frame->n_vectors);
687 vlib_node_increment_counter (vm, dpdk_esp4_decrypt_post_node.index,
688 ESP_DECRYPT_POST_ERROR_PKTS,
689 from_frame->n_vectors);
691 return from_frame->n_vectors;
694 VLIB_NODE_FN (dpdk_esp4_decrypt_post_node) (vlib_main_t * vm,
695 vlib_node_runtime_t * node,
696 vlib_frame_t * from_frame)
698 return dpdk_esp_decrypt_post_inline (vm, node, from_frame, 0 /*is_ip6 */ );
702 VLIB_REGISTER_NODE (dpdk_esp4_decrypt_post_node) = {
703 .name = "dpdk-esp4-decrypt-post",
704 .vector_size = sizeof (u32),
705 .format_trace = format_esp_decrypt_post_trace,
706 .type = VLIB_NODE_TYPE_INTERNAL,
708 .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings),
709 .error_strings = esp_decrypt_post_error_strings,
711 .n_next_nodes = ESP_DECRYPT_N_NEXT,
713 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
714 foreach_esp_decrypt_next
720 VLIB_NODE_FN (dpdk_esp6_decrypt_post_node) (vlib_main_t * vm,
721 vlib_node_runtime_t * node,
722 vlib_frame_t * from_frame)
724 return dpdk_esp_decrypt_post_inline (vm, node, from_frame, 0 /*is_ip6 */ );
728 VLIB_REGISTER_NODE (dpdk_esp6_decrypt_post_node) = {
729 .name = "dpdk-esp6-decrypt-post",
730 .vector_size = sizeof (u32),
731 .format_trace = format_esp_decrypt_post_trace,
732 .type = VLIB_NODE_TYPE_INTERNAL,
734 .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings),
735 .error_strings = esp_decrypt_post_error_strings,
737 .n_next_nodes = ESP_DECRYPT_N_NEXT,
739 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
740 foreach_esp_decrypt_next
747 * fd.io coding-style-patch-verification: ON
750 * eval: (c-set-style "gnu")