2 * Copyright (c) 2020 Doc.ai and/or its affiliates.
3 * Copyright (c) 2020 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vppinfra/error.h>
21 #include <wireguard/wireguard.h>
22 #include <wireguard/wireguard_send.h>
24 #define foreach_wg_output_error \
25 _ (NONE, "No error") \
26 _ (PEER, "Peer error") \
27 _ (KEYPAIR, "Keypair error") \
28 _ (TOO_BIG, "packet too big") \
29 _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)")
33 #define _(sym,str) WG_OUTPUT_ERROR_##sym,
34 foreach_wg_output_error
39 static char *wg_output_error_strings[] = {
40 #define _(sym,string) string,
41 foreach_wg_output_error
48 WG_OUTPUT_NEXT_HANDOFF,
49 WG_OUTPUT_NEXT_INTERFACE_OUTPUT,
56 u8 header[sizeof (ip6_udp_header_t)];
58 } wg_output_tun_trace_t;
64 } wg_output_tun_post_trace_t;
67 format_ip4_udp_header (u8 * s, va_list * args)
69 ip4_udp_header_t *hdr4 = va_arg (*args, ip4_udp_header_t *);
71 s = format (s, "%U:$U", format_ip4_header, &hdr4->ip4, format_udp_header,
77 format_ip6_udp_header (u8 *s, va_list *args)
79 ip6_udp_header_t *hdr6 = va_arg (*args, ip6_udp_header_t *);
81 s = format (s, "%U:$U", format_ip6_header, &hdr6->ip6, format_udp_header,
86 /* packet trace format function */
88 format_wg_output_tun_trace (u8 * s, va_list * args)
90 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
91 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
93 wg_output_tun_trace_t *t = va_arg (*args, wg_output_tun_trace_t *);
95 s = format (s, "peer: %d\n", t->peer);
96 s = format (s, " Encrypted packet: ");
98 s = t->is_ip4 ? format (s, "%U", format_ip4_udp_header, t->header) :
99 format (s, "%U", format_ip6_udp_header, t->header);
103 /* post node - packet trace format function */
105 format_wg_output_tun_post_trace (u8 *s, va_list *args)
107 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
108 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
110 wg_output_tun_post_trace_t *t = va_arg (*args, wg_output_tun_post_trace_t *);
112 s = format (s, "peer: %d\n", t->peer);
113 s = format (s, " wg-post: next node index %u", t->next_index);
117 static_always_inline void
118 wg_prepare_sync_enc_op (vlib_main_t *vm, vnet_crypto_op_t **crypto_ops,
119 u8 *src, u32 src_len, u8 *dst, u8 *aad, u32 aad_len,
120 u64 nonce, vnet_crypto_key_index_t key_index, u32 bi,
123 vnet_crypto_op_t _op, *op = &_op;
126 clib_memset (iv, 0, 4);
127 clib_memcpy (iv + 4, &nonce, sizeof (nonce));
129 vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
130 vnet_crypto_op_init (op, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC);
132 op->tag_len = NOISE_AUTHTAG_LEN;
133 op->tag = dst + src_len;
134 op->src = !src ? src_ : src;
137 op->key_index = key_index;
139 op->aad_len = aad_len;
144 static_always_inline void
145 wg_output_process_ops (vlib_main_t *vm, vlib_node_runtime_t *node,
146 vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts,
149 u32 n_fail, n_ops = vec_len (ops);
150 vnet_crypto_op_t *op = ops;
155 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
159 ASSERT (op - ops < n_ops);
161 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
163 u32 bi = op->user_data;
164 b[bi]->error = node->errors[WG_OUTPUT_ERROR_KEYPAIR];
165 nexts[bi] = drop_next;
172 static_always_inline void
173 wg_output_tun_add_to_frame (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
174 u32 key_index, u32 crypto_len,
175 i16 crypto_start_offset, u32 buffer_index,
176 u16 next_node, u8 *iv, u8 *tag, u8 flags)
178 vnet_crypto_async_frame_elt_t *fe;
181 ASSERT (f->n_elts < VNET_CRYPTO_FRAME_SIZE);
184 fe = &f->elts[index];
186 fe->key_index = key_index;
187 fe->crypto_total_length = crypto_len;
188 fe->crypto_start_offset = crypto_start_offset;
192 f->buffer_indices[index] = buffer_index;
193 f->next_node_index[index] = next_node;
196 static_always_inline enum noise_state_crypt
197 wq_output_tun_process (vlib_main_t *vm, vnet_crypto_op_t **crypto_ops,
198 noise_remote_t *r, uint32_t *r_idx, uint64_t *nonce,
199 uint8_t *src, size_t srclen, uint8_t *dst, u32 bi,
203 enum noise_state_crypt ret = SC_FAILED;
205 if ((kp = r->r_current) == NULL)
208 /* We confirm that our values are within our tolerances. We want:
210 * - our keypair to be less than REJECT_AFTER_TIME seconds old
211 * - our receive counter to be less than REJECT_AFTER_MESSAGES
212 * - our send counter to be less than REJECT_AFTER_MESSAGES
215 wg_birthdate_has_expired_opt (kp->kp_birthdate, REJECT_AFTER_TIME,
217 kp->kp_ctr.c_recv >= REJECT_AFTER_MESSAGES ||
218 ((*nonce = noise_counter_send (&kp->kp_ctr)) > REJECT_AFTER_MESSAGES))
221 /* We encrypt into the same buffer, so the caller must ensure that buf
222 * has NOISE_AUTHTAG_LEN bytes to store the MAC. The nonce and index
223 * are passed back out to the caller through the provided data pointer. */
224 *r_idx = kp->kp_remote_index;
226 wg_prepare_sync_enc_op (vm, crypto_ops, src, srclen, dst, NULL, 0, *nonce,
227 kp->kp_send_index, bi, iv);
229 /* If our values are still within tolerances, but we are approaching
230 * the tolerances, we notify the caller with ESTALE that they should
231 * establish a new keypair. The current keypair can continue to be used
232 * until the tolerances are hit. We notify if:
233 * - our send counter is valid and not less than REKEY_AFTER_MESSAGES
234 * - we're the initiator and our keypair is older than
235 * REKEY_AFTER_TIME seconds */
236 ret = SC_KEEP_KEY_FRESH;
237 if ((kp->kp_valid && *nonce >= REKEY_AFTER_MESSAGES) ||
238 (kp->kp_is_initiator && wg_birthdate_has_expired_opt (
239 kp->kp_birthdate, REKEY_AFTER_TIME, time)))
247 static_always_inline enum noise_state_crypt
248 wg_add_to_async_frame (vlib_main_t *vm, wg_per_thread_data_t *ptd,
249 vnet_crypto_async_frame_t *async_frame,
250 vlib_buffer_t *b, u8 *payload, u32 payload_len, u32 bi,
251 u16 next, u16 async_next, noise_remote_t *r,
252 uint32_t *r_idx, uint64_t *nonce, u8 *iv, f64 time)
254 wg_post_data_t *post = wg_post_data (b);
258 post->next_index = next;
261 enum noise_state_crypt ret = SC_FAILED;
263 if ((kp = r->r_current) == NULL)
266 /* We confirm that our values are within our tolerances. We want:
268 * - our keypair to be less than REJECT_AFTER_TIME seconds old
269 * - our receive counter to be less than REJECT_AFTER_MESSAGES
270 * - our send counter to be less than REJECT_AFTER_MESSAGES
273 wg_birthdate_has_expired_opt (kp->kp_birthdate, REJECT_AFTER_TIME,
275 kp->kp_ctr.c_recv >= REJECT_AFTER_MESSAGES ||
276 ((*nonce = noise_counter_send (&kp->kp_ctr)) > REJECT_AFTER_MESSAGES))
279 /* We encrypt into the same buffer, so the caller must ensure that buf
280 * has NOISE_AUTHTAG_LEN bytes to store the MAC. The nonce and index
281 * are passed back out to the caller through the provided data pointer. */
282 *r_idx = kp->kp_remote_index;
284 clib_memset (iv, 0, 4);
285 clib_memcpy (iv + 4, nonce, sizeof (*nonce));
287 /* this always succeeds because we know the frame is not full */
288 wg_output_tun_add_to_frame (vm, async_frame, kp->kp_send_index, payload_len,
289 payload - b->data, bi, async_next, iv,
290 payload + payload_len, flag);
292 /* If our values are still within tolerances, but we are approaching
293 * the tolerances, we notify the caller with ESTALE that they should
294 * establish a new keypair. The current keypair can continue to be used
295 * until the tolerances are hit. We notify if:
296 * - our send counter is valid and not less than REKEY_AFTER_MESSAGES
297 * - we're the initiator and our keypair is older than
298 * REKEY_AFTER_TIME seconds */
299 ret = SC_KEEP_KEY_FRESH;
300 if ((kp->kp_valid && *nonce >= REKEY_AFTER_MESSAGES) ||
301 (kp->kp_is_initiator && wg_birthdate_has_expired_opt (
302 kp->kp_birthdate, REKEY_AFTER_TIME, time)))
310 /* is_ip4 - inner header flag */
312 wg_output_tun_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
313 vlib_frame_t *frame, u8 is_ip4, u16 async_next_node)
315 wg_main_t *wmp = &wg_main;
316 wg_per_thread_data_t *ptd =
317 vec_elt_at_index (wmp->per_thread_data, vm->thread_index);
318 u32 *from = vlib_frame_vector_args (frame);
319 u32 n_left_from = frame->n_vectors;
320 ip4_udp_wg_header_t *hdr4_out = NULL;
321 ip6_udp_wg_header_t *hdr6_out = NULL;
322 message_data_t *message_data_wg = NULL;
323 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
324 vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
325 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
326 vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
327 u32 thread_index = vm->thread_index;
329 const u16 drop_next = WG_OUTPUT_NEXT_ERROR;
330 const u8 is_async = wg_op_mode_is_set_ASYNC ();
331 vnet_crypto_async_frame_t *async_frame = NULL;
333 u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
335 u32 sync_bi[VLIB_FRAME_SIZE];
336 u32 noop_bi[VLIB_FRAME_SIZE];
338 vlib_get_buffers (vm, from, bufs, n_left_from);
339 vec_reset_length (ptd->crypto_ops);
340 vec_reset_length (ptd->async_frames);
342 wg_peer_t *peer = NULL;
344 u32 last_adj_index = ~0;
345 index_t peeri = INDEX_INVALID;
347 f64 time = clib_time_now (&vm->clib_time) + vm->time_offset;
349 while (n_left_from > 0)
359 vlib_prefetch_buffer_header (b[2], LOAD);
360 p = vlib_buffer_get_current (b[1]);
361 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
362 CLIB_PREFETCH (vlib_buffer_get_tail (b[1]), CLIB_CACHE_LINE_BYTES,
366 noop_next[0] = WG_OUTPUT_NEXT_ERROR;
367 err = WG_OUTPUT_NEXT_ERROR;
369 adj_index = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
371 if (PREDICT_FALSE (last_adj_index != adj_index))
373 peeri = wg_peer_get_by_adj_index (adj_index);
374 if (peeri == INDEX_INVALID)
376 b[0]->error = node->errors[WG_OUTPUT_ERROR_PEER];
379 peer = wg_peer_get (peeri);
382 if (!peer || wg_peer_is_dead (peer))
384 b[0]->error = node->errors[WG_OUTPUT_ERROR_PEER];
387 if (PREDICT_FALSE (~0 == peer->output_thread_index))
389 /* this is the first packet to use this peer, claim the peer
392 clib_atomic_cmp_and_swap (&peer->output_thread_index, ~0,
393 wg_peer_assign_thread (thread_index));
396 if (PREDICT_FALSE (thread_index != peer->output_thread_index))
398 noop_next[0] = WG_OUTPUT_NEXT_HANDOFF;
399 err = WG_OUTPUT_NEXT_HANDOFF;
403 if (PREDICT_FALSE (!peer->remote.r_current))
405 wg_send_handshake_from_mt (peeri, false);
406 b[0]->error = node->errors[WG_OUTPUT_ERROR_KEYPAIR];
410 is_ip4_out = ip46_address_is_ip4 (&peer->src.addr);
413 hdr4_out = vlib_buffer_get_current (b[0]);
414 message_data_wg = &hdr4_out->wg;
418 hdr6_out = vlib_buffer_get_current (b[0]);
419 message_data_wg = &hdr6_out->wg;
422 iph_offset = vnet_buffer (b[0])->ip.save_rewrite_length;
423 plain_data = vlib_buffer_get_current (b[0]) + iph_offset;
424 plain_data_len = vlib_buffer_length_in_chain (vm, b[0]) - iph_offset;
425 u8 *iv_data = b[0]->pre_data;
427 size_t encrypted_packet_len = message_data_len (plain_data_len);
430 * Ensure there is enough space to write the encrypted data
433 if (PREDICT_FALSE (encrypted_packet_len >= WG_DEFAULT_DATA_SIZE) ||
434 PREDICT_FALSE ((b[0]->current_data + encrypted_packet_len) >=
435 vlib_buffer_get_default_data_size (vm)))
437 b[0]->error = node->errors[WG_OUTPUT_ERROR_TOO_BIG];
441 if (PREDICT_FALSE (last_adj_index != adj_index))
443 wg_timers_any_authenticated_packet_sent_opt (peer, time);
444 wg_timers_data_sent_opt (peer, time);
445 wg_timers_any_authenticated_packet_traversal (peer);
446 last_adj_index = adj_index;
449 /* Here we are sure that can send packet to next node */
450 next[0] = WG_OUTPUT_NEXT_INTERFACE_OUTPUT;
452 enum noise_state_crypt state;
456 /* get a frame for this op if we don't yet have one or it's full */
457 if (NULL == async_frame ||
458 vnet_crypto_async_frame_is_full (async_frame))
460 async_frame = vnet_crypto_async_get_frame (
461 vm, VNET_CRYPTO_OP_CHACHA20_POLY1305_TAG16_AAD0_ENC);
462 /* Save the frame to the list we'll submit at the end */
463 vec_add1 (ptd->async_frames, async_frame);
465 state = wg_add_to_async_frame (
466 vm, ptd, async_frame, b[0], plain_data, plain_data_len,
467 from[b - bufs], next[0], async_next_node, &peer->remote,
468 &message_data_wg->receiver_index, &message_data_wg->counter,
473 state = wq_output_tun_process (
474 vm, crypto_ops, &peer->remote, &message_data_wg->receiver_index,
475 &message_data_wg->counter, plain_data, plain_data_len, plain_data,
476 n_sync, iv_data, time);
479 if (PREDICT_FALSE (state == SC_KEEP_KEY_FRESH))
481 wg_send_handshake_from_mt (peeri, false);
483 else if (PREDICT_FALSE (state == SC_FAILED))
486 wg_send_handshake_from_mt (peeri, false);
487 wg_peer_update_flags (peeri, WG_PEER_ESTABLISHED, false);
488 noop_next[0] = WG_OUTPUT_NEXT_ERROR;
492 err = WG_OUTPUT_NEXT_INTERFACE_OUTPUT;
496 hdr4_out->wg.header.type = MESSAGE_DATA;
497 hdr4_out->udp.length = clib_host_to_net_u16 (encrypted_packet_len +
498 sizeof (udp_header_t));
499 b[0]->current_length =
500 (encrypted_packet_len + sizeof (ip4_udp_header_t));
501 ip4_header_set_len_w_chksum (
502 &hdr4_out->ip4, clib_host_to_net_u16 (b[0]->current_length));
506 hdr6_out->wg.header.type = MESSAGE_DATA;
507 hdr6_out->udp.length = clib_host_to_net_u16 (encrypted_packet_len +
508 sizeof (udp_header_t));
509 b[0]->current_length =
510 (encrypted_packet_len + sizeof (ip6_udp_header_t));
511 hdr6_out->ip6.payload_length =
512 clib_host_to_net_u16 (b[0]->current_length);
516 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
517 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
519 wg_output_tun_trace_t *t =
520 vlib_add_trace (vm, node, b[0], sizeof (*t));
523 t->is_ip4 = is_ip4_out;
525 clib_memcpy (t->header, hdr4_out, sizeof (ip4_udp_header_t));
527 clib_memcpy (t->header, hdr6_out, sizeof (ip6_udp_header_t));
531 if (PREDICT_FALSE (err != WG_OUTPUT_NEXT_INTERFACE_OUTPUT))
533 noop_bi[n_noop] = from[b - bufs];
540 sync_bi[n_sync] = from[b - bufs];
541 sync_bufs[n_sync] = b[0];
556 /* wg-output-process-ops */
557 wg_output_process_ops (vm, node, ptd->crypto_ops, sync_bufs, nexts,
559 vlib_buffer_enqueue_to_next (vm, node, sync_bi, nexts, n_sync);
563 /* submit all of the open frames */
564 vnet_crypto_async_frame_t **async_frame;
566 vec_foreach (async_frame, ptd->async_frames)
569 vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0))
571 u32 n_drop = (*async_frame)->n_elts;
572 u32 *bi = (*async_frame)->buffer_indices;
576 noop_bi[index] = bi[0];
577 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
578 noop_nexts[index] = drop_next;
579 b->error = node->errors[WG_OUTPUT_ERROR_CRYPTO_ENGINE_ERROR];
583 n_noop += (*async_frame)->n_elts;
585 vnet_crypto_async_reset_frame (*async_frame);
586 vnet_crypto_async_free_frame (vm, *async_frame);
592 vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
595 return frame->n_vectors;
599 wg_output_tun_post (vlib_main_t *vm, vlib_node_runtime_t *node,
602 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
603 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
604 u32 *from = vlib_frame_vector_args (frame);
605 u32 n_left = frame->n_vectors;
609 vlib_get_buffers (vm, from, b, n_left);
613 vlib_prefetch_buffer_header (b[0], LOAD);
614 vlib_prefetch_buffer_header (b[1], LOAD);
615 vlib_prefetch_buffer_header (b[2], LOAD);
616 vlib_prefetch_buffer_header (b[3], LOAD);
621 vlib_prefetch_buffer_header (b[4], LOAD);
622 vlib_prefetch_buffer_header (b[5], LOAD);
623 vlib_prefetch_buffer_header (b[6], LOAD);
624 vlib_prefetch_buffer_header (b[7], LOAD);
626 next[0] = (wg_post_data (b[0]))->next_index;
627 next[1] = (wg_post_data (b[1]))->next_index;
628 next[2] = (wg_post_data (b[2]))->next_index;
629 next[3] = (wg_post_data (b[3]))->next_index;
631 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
633 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
635 wg_output_tun_post_trace_t *tr =
636 vlib_add_trace (vm, node, b[0], sizeof (*tr));
637 peeri = wg_peer_get_by_adj_index (
638 vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
640 tr->next_index = next[0];
642 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
644 wg_output_tun_post_trace_t *tr =
645 vlib_add_trace (vm, node, b[1], sizeof (*tr));
646 peeri = wg_peer_get_by_adj_index (
647 vnet_buffer (b[1])->ip.adj_index[VLIB_TX]);
648 tr->next_index = next[1];
650 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
652 wg_output_tun_post_trace_t *tr =
653 vlib_add_trace (vm, node, b[2], sizeof (*tr));
654 peeri = wg_peer_get_by_adj_index (
655 vnet_buffer (b[2])->ip.adj_index[VLIB_TX]);
656 tr->next_index = next[2];
658 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
660 wg_output_tun_post_trace_t *tr =
661 vlib_add_trace (vm, node, b[3], sizeof (*tr));
662 peeri = wg_peer_get_by_adj_index (
663 vnet_buffer (b[3])->ip.adj_index[VLIB_TX]);
664 tr->next_index = next[3];
675 next[0] = (wg_post_data (b[0]))->next_index;
676 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
677 (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
679 wg_output_tun_post_trace_t *tr =
680 vlib_add_trace (vm, node, b[0], sizeof (*tr));
681 peeri = wg_peer_get_by_adj_index (
682 vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
683 tr->next_index = next[0];
691 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
692 return frame->n_vectors;
695 VLIB_REGISTER_NODE (wg4_output_tun_post_node) = {
696 .name = "wg4-output-tun-post-node",
697 .vector_size = sizeof (u32),
698 .format_trace = format_wg_output_tun_post_trace,
699 .type = VLIB_NODE_TYPE_INTERNAL,
700 .sibling_of = "wg4-output-tun",
701 .n_errors = ARRAY_LEN (wg_output_error_strings),
702 .error_strings = wg_output_error_strings,
705 VLIB_REGISTER_NODE (wg6_output_tun_post_node) = {
706 .name = "wg6-output-tun-post-node",
707 .vector_size = sizeof (u32),
708 .format_trace = format_wg_output_tun_post_trace,
709 .type = VLIB_NODE_TYPE_INTERNAL,
710 .sibling_of = "wg6-output-tun",
711 .n_errors = ARRAY_LEN (wg_output_error_strings),
712 .error_strings = wg_output_error_strings,
715 VLIB_NODE_FN (wg4_output_tun_post_node)
716 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
718 return wg_output_tun_post (vm, node, from_frame);
721 VLIB_NODE_FN (wg6_output_tun_post_node)
722 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
724 return wg_output_tun_post (vm, node, from_frame);
727 VLIB_NODE_FN (wg4_output_tun_node)
728 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
730 return wg_output_tun_inline (vm, node, frame, /* is_ip4 */ 1,
731 wg_encrypt_async_next.wg4_post_next);
734 VLIB_NODE_FN (wg6_output_tun_node)
735 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
737 return wg_output_tun_inline (vm, node, frame, /* is_ip4 */ 0,
738 wg_encrypt_async_next.wg6_post_next);
742 VLIB_REGISTER_NODE (wg4_output_tun_node) =
744 .name = "wg4-output-tun",
745 .vector_size = sizeof (u32),
746 .format_trace = format_wg_output_tun_trace,
747 .type = VLIB_NODE_TYPE_INTERNAL,
748 .n_errors = ARRAY_LEN (wg_output_error_strings),
749 .error_strings = wg_output_error_strings,
750 .n_next_nodes = WG_OUTPUT_N_NEXT,
752 [WG_OUTPUT_NEXT_HANDOFF] = "wg4-output-tun-handoff",
753 [WG_OUTPUT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
754 [WG_OUTPUT_NEXT_ERROR] = "error-drop",
758 VLIB_REGISTER_NODE (wg6_output_tun_node) =
760 .name = "wg6-output-tun",
761 .vector_size = sizeof (u32),
762 .format_trace = format_wg_output_tun_trace,
763 .type = VLIB_NODE_TYPE_INTERNAL,
764 .n_errors = ARRAY_LEN (wg_output_error_strings),
765 .error_strings = wg_output_error_strings,
766 .n_next_nodes = WG_OUTPUT_N_NEXT,
768 [WG_OUTPUT_NEXT_HANDOFF] = "wg6-output-tun-handoff",
769 [WG_OUTPUT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
770 [WG_OUTPUT_NEXT_ERROR] = "error-drop",
776 * fd.io coding-style-patch-verification: ON
779 * eval: (c-set-style "gnu")