+
+ /* decrypt packets */
+ wg_input_process_ops (vm, node, ptd->crypto_ops, data_bufs, data_nexts,
+ drop_next);
+
+ /* process after decryption */
+ b = data_bufs;
+ n_left_from = n_data;
+ n_data = 0;
+ last_rec_idx = ~0;
+ last_peer_time_idx = NULL;
+ while (n_left_from > 0)
+ {
+ bool is_keepalive = false;
+ u32 *peer_idx = NULL;
+
+ if (data_next[n_data] == WG_INPUT_NEXT_PUNT)
+ {
+ goto trace;
+ }
+ else
+ {
+ data_next[n_data] = WG_INPUT_NEXT_PUNT;
+ }
+
+ message_data_t *data = vlib_buffer_get_current (b[0]);
+
+ if (data->receiver_index != last_rec_idx)
+ {
+ peer_idx =
+ wg_index_table_lookup (&wmp->index_table, data->receiver_index);
+ /* already checked and excisting */
+ peer = wg_peer_get (*peer_idx);
+ last_rec_idx = data->receiver_index;
+ }
+
+ noise_keypair_t *kp =
+ wg_get_active_keypair (&peer->remote, data->receiver_index);
+
+ if (!noise_counter_recv (&kp->kp_ctr, data->counter))
+ {
+ goto trace;
+ }
+
+ u16 encr_len = b[0]->current_length - sizeof (message_data_t);
+ u16 decr_len = encr_len - NOISE_AUTHTAG_LEN;
+
+ vlib_buffer_advance (b[0], sizeof (message_data_t));
+ b[0]->current_length = decr_len;
+ vnet_buffer_offload_flags_clear (b[0], VNET_BUFFER_OFFLOAD_F_UDP_CKSUM);
+
+ if (PREDICT_FALSE (peer_idx && (last_peer_time_idx != peer_idx)))
+ {
+ wg_timers_any_authenticated_packet_received_opt (peer, time);
+ wg_timers_any_authenticated_packet_traversal (peer);
+ last_peer_time_idx = peer_idx;
+ }
+
+ /* Keepalive packet has zero length */
+ if (decr_len == 0)
+ {
+ is_keepalive = true;
+ goto trace;
+ }
+
+ wg_timers_data_received (peer);
+
+ ip46_address_t src_ip;
+ u8 is_ip4_inner = is_ip4_header (vlib_buffer_get_current (b[0]));
+ if (is_ip4_inner)
+ {
+ ip46_address_set_ip4 (
+ &src_ip,
+ &((ip4_header_t *) vlib_buffer_get_current (b[0]))->src_address);
+ }
+ else
+ {
+ ip46_address_set_ip6 (
+ &src_ip,
+ &((ip6_header_t *) vlib_buffer_get_current (b[0]))->src_address);
+ }
+
+ const fib_prefix_t *allowed_ip;
+ bool allowed = false;
+
+ /*
+ * we could make this into an ACL, but the expectation
+ * is that there aren't many allowed IPs and thus a linear
+ * walk is fater than an ACL
+ */
+
+ vec_foreach (allowed_ip, peer->allowed_ips)
+ {
+ if (fib_prefix_is_cover_addr_46 (allowed_ip, &src_ip))
+ {
+ allowed = true;
+ break;
+ }
+ }
+ if (allowed)
+ {
+ vnet_buffer (b[0])->sw_if_index[VLIB_RX] = peer->wg_sw_if_index;
+ data_next[n_data] =
+ is_ip4_inner ? WG_INPUT_NEXT_IP4_INPUT : WG_INPUT_NEXT_IP6_INPUT;
+ }
+ trace:
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ wg_input_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
+ t->type = header_type;
+ t->current_length = b[0]->current_length;
+ t->is_keepalive = is_keepalive;
+ t->peer = peer_idx ? *peer_idx : INDEX_INVALID;
+ }
+
+ b += 1;
+ n_left_from -= 1;
+ n_data += 1;
+ }
+ /* enqueue other bufs */
+ vlib_buffer_enqueue_to_next (vm, node, other_bi, other_next, n_other);
+
+ /* enqueue data bufs */
+ vlib_buffer_enqueue_to_next (vm, node, data_bi, data_nexts, n_data);