2 * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev
4 * Copyright (c) 2016 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <dpdk/ipsec/ipsec.h>
24 #include <dpdk/ipsec/esp.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #define foreach_esp_encrypt_next \
29 _(DROP, "error-drop") \
30 _(IP4_LOOKUP, "ip4-lookup") \
31 _(IP6_LOOKUP, "ip6-lookup") \
32 _(INTERFACE_OUTPUT, "interface-output")
34 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
37 foreach_esp_encrypt_next
42 #define foreach_esp_encrypt_error \
43 _(RX_PKTS, "ESP pkts received") \
44 _(SEQ_CYCLED, "sequence number cycled") \
45 _(ENQ_FAIL, "Enqueue failed (buffer full)") \
46 _(NO_CRYPTODEV, "Cryptodev not configured") \
47 _(UNSUPPORTED, "Cipher/Auth not supported")
52 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
53 foreach_esp_encrypt_error
56 } esp_encrypt_error_t;
58 static char *esp_encrypt_error_strings[] = {
59 #define _(sym,string) string,
60 foreach_esp_encrypt_error
64 vlib_node_registration_t dpdk_esp_encrypt_node;
70 ipsec_crypto_alg_t crypto_alg;
71 ipsec_integ_alg_t integ_alg;
72 } esp_encrypt_trace_t;
74 /* packet trace format function */
76 format_esp_encrypt_trace (u8 * s, va_list * args)
78 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
79 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
80 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
82 s = format (s, "esp: spi %u seq %u crypto %U integrity %U",
84 format_ipsec_crypto_alg, t->crypto_alg,
85 format_ipsec_integ_alg, t->integ_alg);
90 dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
91 vlib_node_runtime_t * node,
92 vlib_frame_t * from_frame)
94 u32 n_left_from, *from, *to_next, next_index;
95 ipsec_main_t *im = &ipsec_main;
96 u32 thread_index = vlib_get_thread_index ();
97 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
98 dpdk_esp_main_t *em = &dpdk_esp_main;
101 from = vlib_frame_vector_args (from_frame);
102 n_left_from = from_frame->n_vectors;
104 if (PREDICT_FALSE (!dcm->workers_main))
106 /* Likely there are not enough cryptodevs, so drop frame */
107 vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
108 ESP_ENCRYPT_ERROR_NO_CRYPTODEV,
110 vlib_buffer_free (vm, from, n_left_from);
114 crypto_worker_main_t *cwm =
115 vec_elt_at_index (dcm->workers_main, thread_index);
116 u32 n_qps = vec_len (cwm->qp_data);
117 struct rte_crypto_op **cops_to_enq[n_qps];
118 u32 n_cop_qp[n_qps], *bi_to_enq[n_qps];
120 for (i = 0; i < n_qps; i++)
122 bi_to_enq[i] = cwm->qp_data[i].bi;
123 cops_to_enq[i] = cwm->qp_data[i].cops;
126 memset (n_cop_qp, 0, n_qps * sizeof (u32));
128 crypto_alloc_cops ();
130 next_index = ESP_ENCRYPT_NEXT_DROP;
132 while (n_left_from > 0)
136 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
138 while (n_left_from > 0 && n_left_to_next > 0)
141 vlib_buffer_t *b0 = 0;
144 ip4_and_esp_header_t *ih0, *oh0 = 0;
145 ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
146 struct rte_mbuf *mb0 = 0;
151 u8 transport_mode = 0;
152 const int BLOCK_SIZE = 16;
155 crypto_sa_session_t *sa_sess;
157 struct rte_crypto_op *cop = 0;
164 b0 = vlib_get_buffer (vm, bi0);
165 sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
166 sa0 = pool_elt_at_index (im->sad, sa_index0);
168 if (PREDICT_FALSE (esp_seq_advance (sa0)))
170 clib_warning ("sequence number counter has cycled SPI %u",
172 vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
173 ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
181 sa0->total_data_size += b0->current_length;
183 sa_sess = pool_elt_at_index (cwm->sa_sess_d[1], sa_index0);
184 if (PREDICT_FALSE (!sa_sess->sess))
186 int ret = create_sym_sess (sa0, sa_sess, 1);
188 if (PREDICT_FALSE (ret))
197 qp_index = sa_sess->qp_index;
198 sess = sa_sess->sess;
200 ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0);
201 cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops);
202 ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
204 cops_to_enq[qp_index][0] = cop;
205 cops_to_enq[qp_index] += 1;
206 n_cop_qp[qp_index] += 1;
207 bi_to_enq[qp_index][0] = bi0;
208 bi_to_enq[qp_index] += 1;
211 iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
212 ih0 = vlib_buffer_get_current (b0);
213 orig_sz = b0->current_length;
214 is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60;
216 if (PREDICT_TRUE (sa0->is_tunnel))
218 if (PREDICT_TRUE (!is_ipv6))
219 adv = -sizeof (ip4_and_esp_header_t);
221 adv = -sizeof (ip6_and_esp_header_t);
225 adv = -sizeof (esp_header_t);
226 if (PREDICT_TRUE (!is_ipv6))
227 orig_sz -= sizeof (ip4_header_t);
229 orig_sz -= sizeof (ip6_header_t);
232 /*transport mode save the eth header before it is overwritten */
233 if (PREDICT_FALSE (!sa0->is_tunnel))
235 ethernet_header_t *ieh0 = (ethernet_header_t *)
236 ((u8 *) vlib_buffer_get_current (b0) -
237 sizeof (ethernet_header_t));
238 ethernet_header_t *oeh0 =
239 (ethernet_header_t *) ((u8 *) ieh0 + (adv - iv_size));
240 clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
243 vlib_buffer_advance (b0, adv - iv_size);
245 /* XXX IP6/ip4 and IP4/IP6 not supported, only IP4/IP4 and IP6/IP6 */
248 if (PREDICT_FALSE (is_ipv6))
250 ih6_0 = (ip6_and_esp_header_t *) ih0;
251 ip_hdr_size = sizeof (ip6_header_t);
252 oh6_0 = vlib_buffer_get_current (b0);
254 if (PREDICT_TRUE (sa0->is_tunnel))
256 next_hdr_type = IP_PROTOCOL_IPV6;
257 oh6_0->ip6.ip_version_traffic_class_and_flow_label =
258 ih6_0->ip6.ip_version_traffic_class_and_flow_label;
262 next_hdr_type = ih6_0->ip6.protocol;
263 memmove (oh6_0, ih6_0, sizeof (ip6_header_t));
266 oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
267 oh6_0->ip6.hop_limit = 254;
268 oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi);
269 oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq);
273 ip_hdr_size = sizeof (ip4_header_t);
274 oh0 = vlib_buffer_get_current (b0);
276 if (PREDICT_TRUE (sa0->is_tunnel))
278 next_hdr_type = IP_PROTOCOL_IP_IN_IP;
279 oh0->ip4.tos = ih0->ip4.tos;
283 next_hdr_type = ih0->ip4.protocol;
284 memmove (oh0, ih0, sizeof (ip4_header_t));
287 oh0->ip4.ip_version_and_header_length = 0x45;
288 oh0->ip4.fragment_id = 0;
289 oh0->ip4.flags_and_fragment_offset = 0;
291 oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
292 oh0->esp.spi = clib_net_to_host_u32 (sa0->spi);
293 oh0->esp.seq = clib_net_to_host_u32 (sa0->seq);
297 (!is_ipv6 && sa0->is_tunnel && !sa0->is_tunnel_ip6))
299 oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
300 oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
302 /* in tunnel mode send it back to FIB */
303 next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
304 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
306 else if (is_ipv6 && sa0->is_tunnel && sa0->is_tunnel_ip6)
308 oh6_0->ip6.src_address.as_u64[0] =
309 sa0->tunnel_src_addr.ip6.as_u64[0];
310 oh6_0->ip6.src_address.as_u64[1] =
311 sa0->tunnel_src_addr.ip6.as_u64[1];
312 oh6_0->ip6.dst_address.as_u64[0] =
313 sa0->tunnel_dst_addr.ip6.as_u64[0];
314 oh6_0->ip6.dst_address.as_u64[1] =
315 sa0->tunnel_dst_addr.ip6.as_u64[1];
317 /* in tunnel mode send it back to FIB */
318 next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
319 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
323 next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
327 ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
328 ASSERT (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE);
330 int blocks = 1 + (orig_sz + 1) / BLOCK_SIZE;
332 /* pad packet in input buffer */
333 u8 pad_bytes = BLOCK_SIZE * blocks - 2 - orig_sz;
335 u8 *padding = vlib_buffer_get_current (b0) + b0->current_length;
337 for (i = 0; i < pad_bytes; ++i)
340 f0 = vlib_buffer_get_current (b0) + b0->current_length + pad_bytes;
341 f0->pad_length = pad_bytes;
342 f0->next_header = next_hdr_type;
343 b0->current_length += pad_bytes + 2 +
344 em->esp_integ_algs[sa0->integ_alg].trunc_size;
346 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
347 vnet_buffer (b0)->sw_if_index[VLIB_RX];
348 b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
350 struct rte_crypto_sym_op *sym_cop;
351 sym_cop = (struct rte_crypto_sym_op *) (cop + 1);
353 dpdk_cop_priv_t *priv = (dpdk_cop_priv_t *) (sym_cop + 1);
355 vnet_buffer (b0)->unused[0] = next0;
357 mb0 = rte_mbuf_from_vlib_buffer (b0);
358 mb0->data_len = b0->current_length;
359 mb0->pkt_len = b0->current_length;
360 mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
362 rte_crypto_op_attach_sym_session (cop, sess);
364 sym_cop->m_src = mb0;
366 dpdk_gcm_cnt_blk *icb = &priv->cb;
367 icb->salt = sa0->salt;
368 icb->iv[0] = sa0->seq;
369 icb->iv[1] = sa0->seq_hi;
371 if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
373 icb->cnt = clib_host_to_net_u32 (1);
374 clib_memcpy (vlib_buffer_get_current (b0) + ip_hdr_size +
375 sizeof (esp_header_t), icb->iv, 8);
376 sym_cop->cipher.data.offset =
377 ip_hdr_size + sizeof (esp_header_t) + iv_size;
378 sym_cop->cipher.data.length = BLOCK_SIZE * blocks;
379 sym_cop->cipher.iv.length = 16;
383 sym_cop->cipher.data.offset =
384 ip_hdr_size + sizeof (esp_header_t);
385 sym_cop->cipher.data.length = BLOCK_SIZE * blocks + iv_size;
386 sym_cop->cipher.iv.length = iv_size;
389 sym_cop->cipher.iv.data = (u8 *) icb;
390 sym_cop->cipher.iv.phys_addr = cop->phys_addr + (uintptr_t) icb
394 ASSERT (sa0->integ_alg < IPSEC_INTEG_N_ALG);
395 ASSERT (sa0->integ_alg != IPSEC_INTEG_ALG_NONE);
397 if (PREDICT_FALSE (sa0->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128))
400 clib_memcpy (aad, vlib_buffer_get_current (b0) + ip_hdr_size,
402 sym_cop->auth.aad.data = aad;
403 sym_cop->auth.aad.phys_addr = cop->phys_addr +
404 (uintptr_t) aad - (uintptr_t) cop;
406 if (PREDICT_FALSE (sa0->use_esn))
408 *((u32 *) & aad[8]) = sa0->seq_hi;
409 sym_cop->auth.aad.length = 12;
413 sym_cop->auth.aad.length = 8;
418 sym_cop->auth.data.offset = ip_hdr_size;
419 sym_cop->auth.data.length = b0->current_length - ip_hdr_size
420 - em->esp_integ_algs[sa0->integ_alg].trunc_size;
422 if (PREDICT_FALSE (sa0->use_esn))
425 vlib_buffer_get_current (b0) + b0->current_length;
426 *((u32 *) payload_end) = sa0->seq_hi;
427 sym_cop->auth.data.length += sizeof (sa0->seq_hi);
430 sym_cop->auth.digest.data = vlib_buffer_get_current (b0) +
432 em->esp_integ_algs[sa0->integ_alg].trunc_size;
433 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset (mb0,
437 [sa0->integ_alg].trunc_size);
438 sym_cop->auth.digest.length =
439 em->esp_integ_algs[sa0->integ_alg].trunc_size;
442 if (PREDICT_FALSE (is_ipv6))
444 oh6_0->ip6.payload_length =
445 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
446 sizeof (ip6_header_t));
451 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
452 oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
456 vlib_buffer_advance (b0, -sizeof (ethernet_header_t));
459 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
461 esp_encrypt_trace_t *tr =
462 vlib_add_trace (vm, node, b0, sizeof (*tr));
464 tr->seq = sa0->seq - 1;
465 tr->crypto_alg = sa0->crypto_alg;
466 tr->integ_alg = sa0->integ_alg;
469 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
471 vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
472 ESP_ENCRYPT_ERROR_RX_PKTS,
473 from_frame->n_vectors);
474 crypto_qp_data_t *qpd;
476 vec_foreach_index (i, cwm->qp_data)
480 qpd = vec_elt_at_index(cwm->qp_data, i);
481 enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
482 qpd->cops, n_cop_qp[i]);
483 qpd->inflights += enq;
485 if (PREDICT_FALSE(enq < n_cop_qp[i]))
487 crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq);
488 vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq);
490 vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
491 ESP_ENCRYPT_ERROR_ENQ_FAIL,
497 return from_frame->n_vectors;
500 VLIB_REGISTER_NODE (dpdk_esp_encrypt_node) =
502 .function = dpdk_esp_encrypt_node_fn,.name = "dpdk-esp-encrypt",.flags =
503 VLIB_NODE_FLAG_IS_OUTPUT,.vector_size = sizeof (u32),.format_trace =
504 format_esp_encrypt_trace,.n_errors =
505 ARRAY_LEN (esp_encrypt_error_strings),.error_strings =
506 esp_encrypt_error_strings,.n_next_nodes = 1,.next_nodes =
508 [ESP_ENCRYPT_NEXT_DROP] = "error-drop",}
511 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_node, dpdk_esp_encrypt_node_fn)
513 * ESP Encrypt Post Node
515 #define foreach_esp_encrypt_post_error \
516 _(PKTS, "ESP post pkts")
519 #define _(sym,str) ESP_ENCRYPT_POST_ERROR_##sym,
520 foreach_esp_encrypt_post_error
522 ESP_ENCRYPT_POST_N_ERROR,
523 } esp_encrypt_post_error_t;
525 static char *esp_encrypt_post_error_strings[] = {
526 #define _(sym,string) string,
527 foreach_esp_encrypt_post_error
531 vlib_node_registration_t dpdk_esp_encrypt_post_node;
534 format_esp_encrypt_post_trace (u8 * s, va_list * args)
540 dpdk_esp_encrypt_post_node_fn (vlib_main_t * vm,
541 vlib_node_runtime_t * node,
542 vlib_frame_t * from_frame)
544 u32 n_left_from, *from, *to_next = 0, next_index;
546 from = vlib_frame_vector_args (from_frame);
547 n_left_from = from_frame->n_vectors;
549 next_index = node->cached_next_index;
551 while (n_left_from > 0)
555 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
557 while (n_left_from > 0 && n_left_to_next > 0)
560 vlib_buffer_t *b0 = 0;
567 b0 = vlib_get_buffer (vm, bi0);
572 next0 = vnet_buffer (b0)->unused[0];
574 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
575 to_next, n_left_to_next, bi0,
578 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
581 vlib_node_increment_counter (vm, dpdk_esp_encrypt_post_node.index,
582 ESP_ENCRYPT_POST_ERROR_PKTS,
583 from_frame->n_vectors);
585 return from_frame->n_vectors;
588 VLIB_REGISTER_NODE (dpdk_esp_encrypt_post_node) =
590 .function = dpdk_esp_encrypt_post_node_fn,.name =
591 "dpdk-esp-encrypt-post",.vector_size = sizeof (u32),.format_trace =
592 format_esp_encrypt_post_trace,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors =
593 ARRAY_LEN (esp_encrypt_post_error_strings),.error_strings =
594 esp_encrypt_post_error_strings,.n_next_nodes =
595 ESP_ENCRYPT_N_NEXT,.next_nodes =
597 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
598 foreach_esp_encrypt_next
603 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_post_node,
604 dpdk_esp_encrypt_post_node_fn)
606 * fd.io coding-style-patch-verification: ON
609 * eval: (c-set-style "gnu")