2 * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev
4 * Copyright (c) 2016 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <dpdk/ipsec/ipsec.h>
24 #include <dpdk/ipsec/esp.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #define foreach_esp_encrypt_next \
29 _(DROP, "error-drop") \
30 _(IP4_LOOKUP, "ip4-lookup") \
31 _(IP6_LOOKUP, "ip6-lookup") \
32 _(INTERFACE_OUTPUT, "interface-output")
34 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
37 foreach_esp_encrypt_next
42 #define foreach_esp_encrypt_error \
43 _(RX_PKTS, "ESP pkts received") \
44 _(SEQ_CYCLED, "sequence number cycled") \
45 _(ENQ_FAIL, "Enqueue failed (buffer full)") \
46 _(NO_CRYPTODEV, "Cryptodev not configured")
51 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
52 foreach_esp_encrypt_error
55 } esp_encrypt_error_t;
57 static char *esp_encrypt_error_strings[] = {
58 #define _(sym,string) string,
59 foreach_esp_encrypt_error
63 vlib_node_registration_t dpdk_esp_encrypt_node;
69 ipsec_crypto_alg_t crypto_alg;
70 ipsec_integ_alg_t integ_alg;
71 } esp_encrypt_trace_t;
73 /* packet trace format function */
75 format_esp_encrypt_trace (u8 * s, va_list * args)
77 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
78 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
79 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
81 s = format (s, "esp: spi %u seq %u crypto %U integrity %U",
83 format_ipsec_crypto_alg, t->crypto_alg,
84 format_ipsec_integ_alg, t->integ_alg);
89 dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
90 vlib_node_runtime_t * node,
91 vlib_frame_t * from_frame)
93 u32 n_left_from, *from, *to_next, next_index;
94 ipsec_main_t *im = &ipsec_main;
95 u32 thread_index = vlib_get_thread_index ();
96 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
97 dpdk_esp_main_t *em = &dpdk_esp_main;
100 from = vlib_frame_vector_args (from_frame);
101 n_left_from = from_frame->n_vectors;
103 crypto_worker_main_t *cwm =
104 vec_elt_at_index (dcm->workers_main, thread_index);
105 u32 n_qps = vec_len (cwm->qp_data);
106 struct rte_crypto_op **cops_to_enq[n_qps];
107 u32 n_cop_qp[n_qps], *bi_to_enq[n_qps];
109 for (i = 0; i < n_qps; i++)
111 bi_to_enq[i] = cwm->qp_data[i].bi;
112 cops_to_enq[i] = cwm->qp_data[i].cops;
115 memset (n_cop_qp, 0, n_qps * sizeof (u32));
117 crypto_alloc_cops ();
119 next_index = ESP_ENCRYPT_NEXT_DROP;
121 while (n_left_from > 0)
125 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
127 while (n_left_from > 0 && n_left_to_next > 0)
130 vlib_buffer_t *b0 = 0;
133 ip4_and_esp_header_t *ih0, *oh0 = 0;
134 ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
135 struct rte_mbuf *mb0 = 0;
140 u8 transport_mode = 0;
141 const int BLOCK_SIZE = 16;
145 crypto_sa_session_t *sa_sess;
147 struct rte_crypto_op *cop = 0;
154 b0 = vlib_get_buffer (vm, bi0);
155 sa_index0 = vnet_buffer (b0)->ipsec.sad_index;
156 sa0 = pool_elt_at_index (im->sad, sa_index0);
158 if (PREDICT_FALSE (esp_seq_advance (sa0)))
160 clib_warning ("sequence number counter has cycled SPI %u",
162 vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
163 ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
171 sa0->total_data_size += b0->current_length;
173 sa_sess = pool_elt_at_index (cwm->sa_sess_d[1], sa_index0);
174 if (PREDICT_FALSE (!sa_sess->sess))
176 int ret = create_sym_sess (sa0, sa_sess, 1);
178 if (PREDICT_FALSE (ret))
187 qp_index = sa_sess->qp_index;
188 sess = sa_sess->sess;
190 ASSERT (vec_len (vec_elt (cwm->qp_data, qp_index).free_cops) > 0);
191 cop = vec_pop (vec_elt (cwm->qp_data, qp_index).free_cops);
192 ASSERT (cop->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
194 cops_to_enq[qp_index][0] = cop;
195 cops_to_enq[qp_index] += 1;
196 n_cop_qp[qp_index] += 1;
197 bi_to_enq[qp_index][0] = bi0;
198 bi_to_enq[qp_index] += 1;
201 iv_size = em->esp_crypto_algs[sa0->crypto_alg].iv_len;
202 if (sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
205 trunc_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
207 ih0 = vlib_buffer_get_current (b0);
208 orig_sz = b0->current_length;
209 is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60;
211 if (PREDICT_TRUE (sa0->is_tunnel))
213 if (PREDICT_TRUE (!is_ipv6))
214 adv = -sizeof (ip4_and_esp_header_t);
216 adv = -sizeof (ip6_and_esp_header_t);
220 adv = -sizeof (esp_header_t);
221 if (PREDICT_TRUE (!is_ipv6))
222 orig_sz -= sizeof (ip4_header_t);
224 orig_sz -= sizeof (ip6_header_t);
227 /*transport mode save the eth header before it is overwritten */
228 if (PREDICT_FALSE (!sa0->is_tunnel))
230 ethernet_header_t *ieh0 = (ethernet_header_t *)
231 ((u8 *) vlib_buffer_get_current (b0) -
232 sizeof (ethernet_header_t));
233 ethernet_header_t *oeh0 =
234 (ethernet_header_t *) ((u8 *) ieh0 + (adv - iv_size));
235 clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
238 vlib_buffer_advance (b0, adv - iv_size);
240 /* XXX IP6/ip4 and IP4/IP6 not supported, only IP4/IP4 and IP6/IP6 */
243 if (PREDICT_FALSE (is_ipv6))
245 ih6_0 = (ip6_and_esp_header_t *) ih0;
246 ip_hdr_size = sizeof (ip6_header_t);
247 oh6_0 = vlib_buffer_get_current (b0);
249 if (PREDICT_TRUE (sa0->is_tunnel))
251 next_hdr_type = IP_PROTOCOL_IPV6;
252 oh6_0->ip6.ip_version_traffic_class_and_flow_label =
253 ih6_0->ip6.ip_version_traffic_class_and_flow_label;
257 next_hdr_type = ih6_0->ip6.protocol;
258 memmove (oh6_0, ih6_0, sizeof (ip6_header_t));
261 oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
262 oh6_0->ip6.hop_limit = 254;
263 oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi);
264 oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq);
268 ip_hdr_size = sizeof (ip4_header_t);
269 oh0 = vlib_buffer_get_current (b0);
271 if (PREDICT_TRUE (sa0->is_tunnel))
273 next_hdr_type = IP_PROTOCOL_IP_IN_IP;
274 oh0->ip4.tos = ih0->ip4.tos;
278 next_hdr_type = ih0->ip4.protocol;
279 memmove (oh0, ih0, sizeof (ip4_header_t));
282 oh0->ip4.ip_version_and_header_length = 0x45;
283 oh0->ip4.fragment_id = 0;
284 oh0->ip4.flags_and_fragment_offset = 0;
286 oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
287 oh0->esp.spi = clib_net_to_host_u32 (sa0->spi);
288 oh0->esp.seq = clib_net_to_host_u32 (sa0->seq);
292 (!is_ipv6 && sa0->is_tunnel && !sa0->is_tunnel_ip6))
294 oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
295 oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
297 /* in tunnel mode send it back to FIB */
298 next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
299 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
301 else if (is_ipv6 && sa0->is_tunnel && sa0->is_tunnel_ip6)
303 oh6_0->ip6.src_address.as_u64[0] =
304 sa0->tunnel_src_addr.ip6.as_u64[0];
305 oh6_0->ip6.src_address.as_u64[1] =
306 sa0->tunnel_src_addr.ip6.as_u64[1];
307 oh6_0->ip6.dst_address.as_u64[0] =
308 sa0->tunnel_dst_addr.ip6.as_u64[0];
309 oh6_0->ip6.dst_address.as_u64[1] =
310 sa0->tunnel_dst_addr.ip6.as_u64[1];
312 /* in tunnel mode send it back to FIB */
313 next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
314 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
318 next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
322 int blocks = 1 + (orig_sz + 1) / BLOCK_SIZE;
324 /* pad packet in input buffer */
325 u8 pad_bytes = BLOCK_SIZE * blocks - 2 - orig_sz;
327 u8 *padding = vlib_buffer_get_current (b0) + b0->current_length;
329 for (i = 0; i < pad_bytes; ++i)
332 f0 = vlib_buffer_get_current (b0) + b0->current_length + pad_bytes;
333 f0->pad_length = pad_bytes;
334 f0->next_header = next_hdr_type;
335 b0->current_length += pad_bytes + 2 + trunc_size;
337 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
338 vnet_buffer (b0)->sw_if_index[VLIB_RX];
339 b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
341 struct rte_crypto_sym_op *sym_cop;
342 sym_cop = (struct rte_crypto_sym_op *) (cop + 1);
344 dpdk_cop_priv_t *priv = (dpdk_cop_priv_t *) (sym_cop + 1);
346 vnet_buffer (b0)->unused[0] = next0;
348 mb0 = rte_mbuf_from_vlib_buffer (b0);
349 mb0->data_len = b0->current_length;
350 mb0->pkt_len = b0->current_length;
351 mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data;
353 dpdk_gcm_cnt_blk *icb = &priv->cb;
355 crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi);
357 u8 is_aead = sa0->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128;
358 u32 cipher_off, cipher_len;
359 u32 auth_off = 0, auth_len = 0, aad_size = 0;
360 u8 *aad = NULL, *digest = NULL;
365 (u32 *) (b0->data + b0->current_data + ip_hdr_size +
366 sizeof (esp_header_t));
367 esp_iv[0] = sa0->seq;
368 esp_iv[1] = sa0->seq_hi;
370 cipher_off = ip_hdr_size + sizeof (esp_header_t) + iv_size;
371 cipher_len = BLOCK_SIZE * blocks;
372 iv_size = 16; /* GCM IV size, not ESP IV size */
375 clib_memcpy (aad, vlib_buffer_get_current (b0) + ip_hdr_size,
378 if (PREDICT_FALSE (sa0->use_esn))
380 *((u32 *) & aad[8]) = sa0->seq_hi;
385 vlib_buffer_get_current (b0) + b0->current_length -
390 cipher_off = ip_hdr_size + sizeof (esp_header_t);
391 cipher_len = BLOCK_SIZE * blocks + iv_size;
393 auth_off = ip_hdr_size;
394 auth_len = b0->current_length - ip_hdr_size - trunc_size;
397 vlib_buffer_get_current (b0) + b0->current_length -
400 if (PREDICT_FALSE (sa0->use_esn))
402 *((u32 *) digest) = sa0->seq_hi;
403 auth_len += sizeof (sa0->seq_hi);
407 crypto_op_setup (is_aead, mb0, cop, sess,
408 cipher_off, cipher_len, (u8 *) icb, iv_size,
409 auth_off, auth_len, aad, aad_size,
410 digest, 0, trunc_size);
412 if (PREDICT_FALSE (is_ipv6))
414 oh6_0->ip6.payload_length =
415 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
416 sizeof (ip6_header_t));
421 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
422 oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
426 vlib_buffer_advance (b0, -sizeof (ethernet_header_t));
429 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
431 esp_encrypt_trace_t *tr =
432 vlib_add_trace (vm, node, b0, sizeof (*tr));
434 tr->seq = sa0->seq - 1;
435 tr->crypto_alg = sa0->crypto_alg;
436 tr->integ_alg = sa0->integ_alg;
439 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
441 vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
442 ESP_ENCRYPT_ERROR_RX_PKTS,
443 from_frame->n_vectors);
444 crypto_qp_data_t *qpd;
446 vec_foreach_index (i, cwm->qp_data)
453 qpd = vec_elt_at_index(cwm->qp_data, i);
454 enq = rte_cryptodev_enqueue_burst(qpd->dev_id, qpd->qp_id,
455 qpd->cops, n_cop_qp[i]);
456 qpd->inflights += enq;
458 if (PREDICT_FALSE(enq < n_cop_qp[i]))
460 crypto_free_cop (qpd, &qpd->cops[enq], n_cop_qp[i] - enq);
461 vlib_buffer_free (vm, &qpd->bi[enq], n_cop_qp[i] - enq);
463 vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index,
464 ESP_ENCRYPT_ERROR_ENQ_FAIL,
470 return from_frame->n_vectors;
474 VLIB_REGISTER_NODE (dpdk_esp_encrypt_node) = {
475 .function = dpdk_esp_encrypt_node_fn,
476 .name = "dpdk-esp-encrypt",
477 .flags = VLIB_NODE_FLAG_IS_OUTPUT,
478 .vector_size = sizeof (u32),
479 .format_trace = format_esp_encrypt_trace,
480 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
481 .error_strings = esp_encrypt_error_strings,
485 [ESP_ENCRYPT_NEXT_DROP] = "error-drop",
490 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_node, dpdk_esp_encrypt_node_fn)
492 * ESP Encrypt Post Node
494 #define foreach_esp_encrypt_post_error \
495 _(PKTS, "ESP post pkts")
498 #define _(sym,str) ESP_ENCRYPT_POST_ERROR_##sym,
499 foreach_esp_encrypt_post_error
501 ESP_ENCRYPT_POST_N_ERROR,
502 } esp_encrypt_post_error_t;
504 static char *esp_encrypt_post_error_strings[] = {
505 #define _(sym,string) string,
506 foreach_esp_encrypt_post_error
510 vlib_node_registration_t dpdk_esp_encrypt_post_node;
513 format_esp_encrypt_post_trace (u8 * s, va_list * args)
519 dpdk_esp_encrypt_post_node_fn (vlib_main_t * vm,
520 vlib_node_runtime_t * node,
521 vlib_frame_t * from_frame)
523 u32 n_left_from, *from, *to_next = 0, next_index;
525 from = vlib_frame_vector_args (from_frame);
526 n_left_from = from_frame->n_vectors;
528 next_index = node->cached_next_index;
530 while (n_left_from > 0)
534 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
536 while (n_left_from > 0 && n_left_to_next > 0)
539 vlib_buffer_t *b0 = 0;
546 b0 = vlib_get_buffer (vm, bi0);
551 next0 = vnet_buffer (b0)->unused[0];
553 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
554 to_next, n_left_to_next, bi0,
557 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
560 vlib_node_increment_counter (vm, dpdk_esp_encrypt_post_node.index,
561 ESP_ENCRYPT_POST_ERROR_PKTS,
562 from_frame->n_vectors);
564 return from_frame->n_vectors;
568 VLIB_REGISTER_NODE (dpdk_esp_encrypt_post_node) = {
569 .function = dpdk_esp_encrypt_post_node_fn,
570 .name = "dpdk-esp-encrypt-post",
571 .vector_size = sizeof (u32),
572 .format_trace = format_esp_encrypt_post_trace,
573 .type = VLIB_NODE_TYPE_INTERNAL,
574 .n_errors = ARRAY_LEN (esp_encrypt_post_error_strings),
575 .error_strings = esp_encrypt_post_error_strings,
576 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
579 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
580 foreach_esp_encrypt_next
586 VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_post_node,
587 dpdk_esp_encrypt_post_node_fn)
589 * fd.io coding-style-patch-verification: ON
592 * eval: (c-set-style "gnu")