2 * decap.c: vxlan tunnel decap packet processing
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan/vxlan.h>
22 #ifndef CLIB_MARCH_VARIANT
23 vlib_node_registration_t vxlan4_input_node;
24 vlib_node_registration_t vxlan6_input_node;
36 format_vxlan_rx_trace (u8 * s, va_list * args)
38 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
39 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
40 vxlan_rx_trace_t *t = va_arg (*args, vxlan_rx_trace_t *);
42 if (t->tunnel_index == ~0)
43 return format (s, "VXLAN decap error - tunnel for vni %d does not exist",
45 return format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
46 t->tunnel_index, t->vni, t->next_index, t->error);
50 buf_fib_index (vlib_buffer_t * b, u32 is_ip4)
52 u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
53 if (sw_if_index != (u32) ~ 0)
56 u32 *fib_index_by_sw_if_index = is_ip4 ?
57 ip4_main.fib_index_by_sw_if_index : ip6_main.fib_index_by_sw_if_index;
58 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
60 return vec_elt (fib_index_by_sw_if_index, sw_if_index);
63 typedef vxlan4_tunnel_key_t last_tunnel_cache4;
65 always_inline vxlan_tunnel_t *
66 vxlan4_find_tunnel (vxlan_main_t * vxm, last_tunnel_cache4 * cache,
67 u32 fib_index, ip4_header_t * ip4_0,
68 vxlan_header_t * vxlan0, vxlan_tunnel_t ** stats_t0)
70 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
71 vxlan4_tunnel_key_t key4;
72 key4.key[1] = ((u64) fib_index << 32) | vxlan0->vni_reserved;
74 if (PREDICT_FALSE (key4.key[1] != cache->key[1] ||
75 ip4_0->src_address.as_u32 != (u32) cache->key[0]))
77 key4.key[0] = ip4_0->src_address.as_u32;
79 clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
80 if (PREDICT_FALSE (rv != 0))
88 vxlan_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);
90 /* Validate VXLAN tunnel SIP against packet DIP */
91 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
96 if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address)))
102 key4.key[0] = ip4_0->dst_address.as_u32;
103 /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
105 clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
106 if (PREDICT_FALSE (rv != 0))
112 *stats_t0 = pool_elt_at_index (vxm->tunnels, key4.value);
118 typedef vxlan6_tunnel_key_t last_tunnel_cache6;
120 always_inline vxlan_tunnel_t *
121 vxlan6_find_tunnel (vxlan_main_t * vxm, last_tunnel_cache6 * cache,
122 u32 fib_index, ip6_header_t * ip6_0,
123 vxlan_header_t * vxlan0, vxlan_tunnel_t ** stats_t0)
125 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
126 vxlan6_tunnel_key_t key6 = {
128 [0] = ip6_0->src_address.as_u64[0],
129 [1] = ip6_0->src_address.as_u64[1],
130 [2] = (((u64) fib_index) << 32) | vxlan0->vni_reserved,
135 (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0))
138 clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
139 if (PREDICT_FALSE (rv != 0))
147 vxlan_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);
149 /* Validate VXLAN tunnel SIP against packet DIP */
150 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
155 if (PREDICT_TRUE (!ip6_address_is_multicast (&ip6_0->dst_address)))
161 /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
162 key6.key[0] = ip6_0->dst_address.as_u64[0];
163 key6.key[1] = ip6_0->dst_address.as_u64[1];
165 clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
166 if (PREDICT_FALSE (rv != 0))
172 *stats_t0 = pool_elt_at_index (vxm->tunnels, key6.value);
179 vxlan_input (vlib_main_t * vm,
180 vlib_node_runtime_t * node,
181 vlib_frame_t * from_frame, u32 is_ip4)
183 vxlan_main_t *vxm = &vxlan_main;
184 vnet_main_t *vnm = vxm->vnet_main;
185 vnet_interface_main_t *im = &vnm->interface_main;
186 vlib_combined_counter_main_t *rx_counter =
187 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
188 vlib_combined_counter_main_t *drop_counter =
189 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
190 last_tunnel_cache4 last4;
191 last_tunnel_cache6 last6;
192 u32 pkts_dropped = 0;
193 u32 thread_index = vlib_get_thread_index ();
196 memset (&last4, 0xff, sizeof last4);
198 memset (&last6, 0xff, sizeof last6);
200 u32 *from = vlib_frame_vector_args (from_frame);
201 u32 n_left_from = from_frame->n_vectors;
203 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
204 vlib_get_buffers (vm, from, bufs, n_left_from);
206 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
207 while (n_left_from >= 4)
209 /* Prefetch next iteration. */
210 vlib_prefetch_buffer_header (b[2], LOAD);
211 vlib_prefetch_buffer_header (b[3], LOAD);
213 /* udp leaves current_data pointing at the vxlan header */
214 void *cur0 = vlib_buffer_get_current (b[0]);
215 void *cur1 = vlib_buffer_get_current (b[1]);
216 vxlan_header_t *vxlan0 = cur0;
217 vxlan_header_t *vxlan1 = cur1;
219 u8 error0 = vxlan0->flags != VXLAN_FLAGS_I ? VXLAN_ERROR_BAD_FLAGS : 0;
220 u8 error1 = vxlan1->flags != VXLAN_FLAGS_I ? VXLAN_ERROR_BAD_FLAGS : 0;
222 ip4_header_t *ip4_0, *ip4_1;
223 ip6_header_t *ip6_0, *ip6_1;
226 ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
227 ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t);
231 ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
232 ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
236 vlib_buffer_advance (b[0], sizeof *vxlan0);
237 vlib_buffer_advance (b[1], sizeof *vxlan1);
239 u32 fi0 = buf_fib_index (b[0], is_ip4);
240 u32 fi1 = buf_fib_index (b[1], is_ip4);
242 vxlan_tunnel_t *t0, *stats_t0;
243 vxlan_tunnel_t *t1, *stats_t1;
247 vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_t0);
249 vxlan4_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan1, &stats_t1);
254 vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_t0);
256 vxlan6_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan1, &stats_t1);
259 error0 = t0 == 0 ? VXLAN_ERROR_NO_SUCH_TUNNEL : error0;
260 error1 = t1 == 0 ? VXLAN_ERROR_NO_SUCH_TUNNEL : error1;
262 /* Prefetch next iteration. */
263 CLIB_PREFETCH (b[2]->data, CLIB_CACHE_LINE_BYTES, LOAD);
264 CLIB_PREFETCH (b[3]->data, CLIB_CACHE_LINE_BYTES, LOAD);
266 u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
267 u32 len1 = vlib_buffer_length_in_chain (vm, b[1]);
269 /* Validate VXLAN tunnel encap-fib index against packet */
270 if (PREDICT_FALSE (error0 != 0))
272 next[0] = VXLAN_INPUT_NEXT_DROP;
274 if (error0 == VXLAN_ERROR_BAD_FLAGS)
276 vlib_increment_combined_counter
277 (drop_counter, thread_index, stats_t0->sw_if_index, 1, len0);
279 b[0]->error = node->errors[error0];
284 next[0] = t0->decap_next_index;
286 /* Required to make the l2 tag push / pop code work on l2 subifs */
287 if (PREDICT_TRUE (next[0] == VXLAN_INPUT_NEXT_L2_INPUT))
288 vnet_update_l2_len (b[0]);
290 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
291 vnet_buffer (b[0])->sw_if_index[VLIB_RX] = t0->sw_if_index;
292 vlib_increment_combined_counter
293 (rx_counter, thread_index, stats_t0->sw_if_index, 1, len0);
296 /* Validate VXLAN tunnel encap-fib index against packet */
297 if (PREDICT_FALSE (error1 != 0))
299 next[1] = VXLAN_INPUT_NEXT_DROP;
301 if (error1 == VXLAN_ERROR_BAD_FLAGS)
303 vlib_increment_combined_counter
304 (drop_counter, thread_index, stats_t1->sw_if_index, 1, len1);
306 b[1]->error = node->errors[error1];
311 next[1] = t1->decap_next_index;
313 /* Required to make the l2 tag push / pop code work on l2 subifs */
314 if (PREDICT_TRUE (next[1] == VXLAN_INPUT_NEXT_L2_INPUT))
315 vnet_update_l2_len (b[1]);
317 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
318 vnet_buffer (b[1])->sw_if_index[VLIB_RX] = t1->sw_if_index;
320 vlib_increment_combined_counter
321 (rx_counter, thread_index, stats_t1->sw_if_index, 1, len1);
324 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
326 vxlan_rx_trace_t *tr =
327 vlib_add_trace (vm, node, b[0], sizeof (*tr));
328 tr->next_index = next[0];
330 tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
331 tr->vni = vnet_get_vni (vxlan0);
333 if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
335 vxlan_rx_trace_t *tr =
336 vlib_add_trace (vm, node, b[1], sizeof (*tr));
337 tr->next_index = next[1];
339 tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels;
340 tr->vni = vnet_get_vni (vxlan1);
347 while (n_left_from > 0)
349 /* udp leaves current_data pointing at the vxlan header */
350 void *cur0 = vlib_buffer_get_current (b[0]);
351 vxlan_header_t *vxlan0 = cur0;
352 u8 error0 = vxlan0->flags != VXLAN_FLAGS_I ? VXLAN_ERROR_BAD_FLAGS : 0;
356 ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
358 ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
360 /* pop (ip, udp, vxlan) */
361 vlib_buffer_advance (b[0], sizeof (*vxlan0));
363 u32 fi0 = buf_fib_index (b[0], is_ip4);
365 vxlan_tunnel_t *t0, *stats_t0;
367 t0 = vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_t0);
369 t0 = vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_t0);
371 error0 = t0 == 0 ? VXLAN_ERROR_NO_SUCH_TUNNEL : error0;
372 uword len0 = vlib_buffer_length_in_chain (vm, b[0]);
374 /* Validate VXLAN tunnel encap-fib index against packet */
375 if (PREDICT_FALSE (error0 != 0))
377 next[0] = VXLAN_INPUT_NEXT_DROP;
379 if (error0 == VXLAN_ERROR_BAD_FLAGS)
381 vlib_increment_combined_counter
382 (drop_counter, thread_index, stats_t0->sw_if_index, 1, len0);
384 b[0]->error = node->errors[error0];
389 next[0] = t0->decap_next_index;
391 /* Required to make the l2 tag push / pop code work on l2 subifs */
392 if (PREDICT_TRUE (next[0] == VXLAN_INPUT_NEXT_L2_INPUT))
393 vnet_update_l2_len (b[0]);
395 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
396 vnet_buffer (b[0])->sw_if_index[VLIB_RX] = t0->sw_if_index;
398 vlib_increment_combined_counter
399 (rx_counter, thread_index, stats_t0->sw_if_index, 1, len0);
402 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
405 = vlib_add_trace (vm, node, b[0], sizeof (*tr));
406 tr->next_index = next[0];
408 tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
409 tr->vni = vnet_get_vni (vxlan0);
415 vlib_buffer_enqueue_to_next (vm, node, from, nexts, from_frame->n_vectors);
416 /* Do we still need this now that tunnel tx stats is kept? */
417 u32 node_idx = is_ip4 ? vxlan4_input_node.index : vxlan6_input_node.index;
418 vlib_node_increment_counter (vm, node_idx, VXLAN_ERROR_DECAPSULATED,
419 from_frame->n_vectors - pkts_dropped);
421 return from_frame->n_vectors;
424 VLIB_NODE_FN (vxlan4_input_node) (vlib_main_t * vm,
425 vlib_node_runtime_t * node,
426 vlib_frame_t * from_frame)
428 return vxlan_input (vm, node, from_frame, /* is_ip4 */ 1);
431 VLIB_NODE_FN (vxlan6_input_node) (vlib_main_t * vm,
432 vlib_node_runtime_t * node,
433 vlib_frame_t * from_frame)
435 return vxlan_input (vm, node, from_frame, /* is_ip4 */ 0);
438 static char *vxlan_error_strings[] = {
439 #define vxlan_error(n,s) s,
440 #include <vnet/vxlan/vxlan_error.def>
445 VLIB_REGISTER_NODE (vxlan4_input_node) =
447 .name = "vxlan4-input",
448 .vector_size = sizeof (u32),
449 .n_errors = VXLAN_N_ERROR,
450 .error_strings = vxlan_error_strings,
451 .n_next_nodes = VXLAN_INPUT_N_NEXT,
452 .format_trace = format_vxlan_rx_trace,
454 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
455 foreach_vxlan_input_next
460 VLIB_REGISTER_NODE (vxlan6_input_node) =
462 .name = "vxlan6-input",
463 .vector_size = sizeof (u32),
464 .n_errors = VXLAN_N_ERROR,
465 .error_strings = vxlan_error_strings,
466 .n_next_nodes = VXLAN_INPUT_N_NEXT,
468 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
469 foreach_vxlan_input_next
472 .format_trace = format_vxlan_rx_trace,
478 IP_VXLAN_BYPASS_NEXT_DROP,
479 IP_VXLAN_BYPASS_NEXT_VXLAN,
480 IP_VXLAN_BYPASS_N_NEXT,
481 } ip_vxan_bypass_next_t;
484 ip_vxlan_bypass_inline (vlib_main_t * vm,
485 vlib_node_runtime_t * node,
486 vlib_frame_t * frame, u32 is_ip4)
488 vxlan_main_t *vxm = &vxlan_main;
489 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
490 vlib_node_runtime_t *error_node =
491 vlib_node_get_runtime (vm, ip4_input_node.index);
492 ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
493 ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
495 from = vlib_frame_vector_args (frame);
496 n_left_from = frame->n_vectors;
497 next_index = node->cached_next_index;
499 if (node->flags & VLIB_NODE_FLAG_TRACE)
500 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
505 ip6_address_set_zero (&addr6);
507 while (n_left_from > 0)
509 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
511 while (n_left_from >= 4 && n_left_to_next >= 2)
513 vlib_buffer_t *b0, *b1;
514 ip4_header_t *ip40, *ip41;
515 ip6_header_t *ip60, *ip61;
516 udp_header_t *udp0, *udp1;
517 u32 bi0, ip_len0, udp_len0, flags0, next0;
518 u32 bi1, ip_len1, udp_len1, flags1, next1;
519 i32 len_diff0, len_diff1;
520 u8 error0, good_udp0, proto0;
521 u8 error1, good_udp1, proto1;
523 /* Prefetch next iteration. */
525 vlib_buffer_t *p2, *p3;
527 p2 = vlib_get_buffer (vm, from[2]);
528 p3 = vlib_get_buffer (vm, from[3]);
530 vlib_prefetch_buffer_header (p2, LOAD);
531 vlib_prefetch_buffer_header (p3, LOAD);
533 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
534 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
537 bi0 = to_next[0] = from[0];
538 bi1 = to_next[1] = from[1];
544 b0 = vlib_get_buffer (vm, bi0);
545 b1 = vlib_get_buffer (vm, bi1);
548 ip40 = vlib_buffer_get_current (b0);
549 ip41 = vlib_buffer_get_current (b1);
553 ip60 = vlib_buffer_get_current (b0);
554 ip61 = vlib_buffer_get_current (b1);
557 /* Setup packet for next IP feature */
558 vnet_feature_next (&next0, b0);
559 vnet_feature_next (&next1, b1);
563 /* Treat IP frag packets as "experimental" protocol for now
564 until support of IP frag reassembly is implemented */
565 proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
566 proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol;
570 proto0 = ip60->protocol;
571 proto1 = ip61->protocol;
574 /* Process packet 0 */
575 if (proto0 != IP_PROTOCOL_UDP)
576 goto exit0; /* not UDP packet */
579 udp0 = ip4_next_header (ip40);
581 udp0 = ip6_next_header (ip60);
583 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
584 goto exit0; /* not VXLAN packet */
586 /* Validate DIP against VTEPs */
589 if (addr4.as_u32 != ip40->dst_address.as_u32)
591 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
592 goto exit0; /* no local VTEP for VXLAN packet */
593 addr4 = ip40->dst_address;
598 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
600 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
601 goto exit0; /* no local VTEP for VXLAN packet */
602 addr6 = ip60->dst_address;
607 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
609 /* Don't verify UDP checksum for packets with explicit zero checksum. */
610 good_udp0 |= udp0->checksum == 0;
612 /* Verify UDP length */
614 ip_len0 = clib_net_to_host_u16 (ip40->length);
616 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
617 udp_len0 = clib_net_to_host_u16 (udp0->length);
618 len_diff0 = ip_len0 - udp_len0;
620 /* Verify UDP checksum */
621 if (PREDICT_FALSE (!good_udp0))
623 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
626 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
628 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
630 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
636 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
637 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
641 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
642 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
646 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
647 b0->error = error0 ? error_node->errors[error0] : 0;
649 /* vxlan-input node expect current at VXLAN header */
651 vlib_buffer_advance (b0,
652 sizeof (ip4_header_t) +
653 sizeof (udp_header_t));
655 vlib_buffer_advance (b0,
656 sizeof (ip6_header_t) +
657 sizeof (udp_header_t));
660 /* Process packet 1 */
661 if (proto1 != IP_PROTOCOL_UDP)
662 goto exit1; /* not UDP packet */
665 udp1 = ip4_next_header (ip41);
667 udp1 = ip6_next_header (ip61);
669 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
670 goto exit1; /* not VXLAN packet */
672 /* Validate DIP against VTEPs */
675 if (addr4.as_u32 != ip41->dst_address.as_u32)
677 if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
678 goto exit1; /* no local VTEP for VXLAN packet */
679 addr4 = ip41->dst_address;
684 if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
686 if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
687 goto exit1; /* no local VTEP for VXLAN packet */
688 addr6 = ip61->dst_address;
693 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
695 /* Don't verify UDP checksum for packets with explicit zero checksum. */
696 good_udp1 |= udp1->checksum == 0;
698 /* Verify UDP length */
700 ip_len1 = clib_net_to_host_u16 (ip41->length);
702 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
703 udp_len1 = clib_net_to_host_u16 (udp1->length);
704 len_diff1 = ip_len1 - udp_len1;
706 /* Verify UDP checksum */
707 if (PREDICT_FALSE (!good_udp1))
709 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
712 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
714 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
716 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
722 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
723 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
727 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
728 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
732 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
733 b1->error = error1 ? error_node->errors[error1] : 0;
735 /* vxlan-input node expect current at VXLAN header */
737 vlib_buffer_advance (b1,
738 sizeof (ip4_header_t) +
739 sizeof (udp_header_t));
741 vlib_buffer_advance (b1,
742 sizeof (ip6_header_t) +
743 sizeof (udp_header_t));
746 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
747 to_next, n_left_to_next,
748 bi0, bi1, next0, next1);
751 while (n_left_from > 0 && n_left_to_next > 0)
757 u32 bi0, ip_len0, udp_len0, flags0, next0;
759 u8 error0, good_udp0, proto0;
761 bi0 = to_next[0] = from[0];
767 b0 = vlib_get_buffer (vm, bi0);
769 ip40 = vlib_buffer_get_current (b0);
771 ip60 = vlib_buffer_get_current (b0);
773 /* Setup packet for next IP feature */
774 vnet_feature_next (&next0, b0);
777 /* Treat IP4 frag packets as "experimental" protocol for now
778 until support of IP frag reassembly is implemented */
779 proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
781 proto0 = ip60->protocol;
783 if (proto0 != IP_PROTOCOL_UDP)
784 goto exit; /* not UDP packet */
787 udp0 = ip4_next_header (ip40);
789 udp0 = ip6_next_header (ip60);
791 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
792 goto exit; /* not VXLAN packet */
794 /* Validate DIP against VTEPs */
797 if (addr4.as_u32 != ip40->dst_address.as_u32)
799 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
800 goto exit; /* no local VTEP for VXLAN packet */
801 addr4 = ip40->dst_address;
806 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
808 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
809 goto exit; /* no local VTEP for VXLAN packet */
810 addr6 = ip60->dst_address;
815 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
817 /* Don't verify UDP checksum for packets with explicit zero checksum. */
818 good_udp0 |= udp0->checksum == 0;
820 /* Verify UDP length */
822 ip_len0 = clib_net_to_host_u16 (ip40->length);
824 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
825 udp_len0 = clib_net_to_host_u16 (udp0->length);
826 len_diff0 = ip_len0 - udp_len0;
828 /* Verify UDP checksum */
829 if (PREDICT_FALSE (!good_udp0))
831 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
834 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
836 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
838 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
844 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
845 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
849 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
850 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
854 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
855 b0->error = error0 ? error_node->errors[error0] : 0;
857 /* vxlan-input node expect current at VXLAN header */
859 vlib_buffer_advance (b0,
860 sizeof (ip4_header_t) +
861 sizeof (udp_header_t));
863 vlib_buffer_advance (b0,
864 sizeof (ip6_header_t) +
865 sizeof (udp_header_t));
868 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
869 to_next, n_left_to_next,
873 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
876 return frame->n_vectors;
879 VLIB_NODE_FN (ip4_vxlan_bypass_node) (vlib_main_t * vm,
880 vlib_node_runtime_t * node,
881 vlib_frame_t * frame)
883 return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
887 VLIB_REGISTER_NODE (ip4_vxlan_bypass_node) =
889 .name = "ip4-vxlan-bypass",
890 .vector_size = sizeof (u32),
891 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
893 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
894 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
896 .format_buffer = format_ip4_header,
897 .format_trace = format_ip4_forward_next_trace,
902 /* Dummy init function to get us linked in. */
903 static clib_error_t *
904 ip4_vxlan_bypass_init (vlib_main_t * vm)
909 VLIB_INIT_FUNCTION (ip4_vxlan_bypass_init);
911 VLIB_NODE_FN (ip6_vxlan_bypass_node) (vlib_main_t * vm,
912 vlib_node_runtime_t * node,
913 vlib_frame_t * frame)
915 return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
919 VLIB_REGISTER_NODE (ip6_vxlan_bypass_node) =
921 .name = "ip6-vxlan-bypass",
922 .vector_size = sizeof (u32),
923 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
925 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
926 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-input",
928 .format_buffer = format_ip6_header,
929 .format_trace = format_ip6_forward_next_trace,
934 /* Dummy init function to get us linked in. */
935 static clib_error_t *
936 ip6_vxlan_bypass_init (vlib_main_t * vm)
941 VLIB_INIT_FUNCTION (ip6_vxlan_bypass_init);
943 #define foreach_vxlan_flow_input_next \
944 _(DROP, "error-drop") \
945 _(L2_INPUT, "l2-input")
949 #define _(s,n) VXLAN_FLOW_NEXT_##s,
950 foreach_vxlan_flow_input_next
953 } vxlan_flow_input_next_t;
955 #define foreach_vxlan_flow_error \
956 _(NONE, "no error") \
957 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
958 _(IP_HEADER_ERROR, "Rx ip header errors") \
959 _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
960 _(UDP_LENGTH_ERROR, "Rx udp length errors")
964 #define _(f,s) VXLAN_FLOW_ERROR_##f,
965 foreach_vxlan_flow_error
968 } vxlan_flow_error_t;
970 static char *vxlan_flow_error_strings[] = {
972 foreach_vxlan_flow_error
977 static_always_inline u8
978 vxlan_validate_udp_csum (vlib_main_t * vm, vlib_buffer_t * b)
980 u32 flags = b->flags;
983 sizeof (ip4_header_t) + sizeof (udp_header_t) + sizeof (vxlan_header_t),
986 /* Verify UDP checksum */
987 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
989 vlib_buffer_advance (b, -offset);
990 flags = ip4_tcp_udp_validate_checksum (vm, b);
991 vlib_buffer_advance (b, offset);
994 return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
997 static_always_inline u8
998 vxlan_check_udp_csum (vlib_main_t * vm, vlib_buffer_t * b)
1000 ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
1001 udp_header_t *udp = &hdr->udp;
1002 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1003 u8 good_csum = (b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0 ||
1009 static_always_inline u8
1010 vxlan_check_ip (vlib_buffer_t * b, u16 payload_len)
1012 ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
1013 u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
1014 u16 expected = payload_len + sizeof *hdr;
1015 return ip_len > expected || hdr->ip4.ttl == 0
1016 || hdr->ip4.ip_version_and_header_length != 0x45;
1019 static_always_inline u8
1020 vxlan_check_ip_udp_len (vlib_buffer_t * b)
1022 ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
1023 u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
1024 u16 udp_len = clib_net_to_host_u16 (hdr->udp.length);
1025 return udp_len > ip_len;
1028 static_always_inline u8
1029 vxlan_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1031 u8 error0 = VXLAN_FLOW_ERROR_NONE;
1033 error0 = VXLAN_FLOW_ERROR_IP_HEADER_ERROR;
1035 error0 = VXLAN_FLOW_ERROR_UDP_LENGTH_ERROR;
1037 error0 = VXLAN_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1041 VLIB_NODE_FN (vxlan4_flow_input_node) (vlib_main_t * vm,
1042 vlib_node_runtime_t * node,
1046 { payload_offset = sizeof (ip4_vxlan_header_t) };
1048 vxlan_main_t *vxm = &vxlan_main;
1049 vnet_interface_main_t *im = &vnet_main.interface_main;
1050 vlib_combined_counter_main_t *rx_counter[VXLAN_FLOW_N_NEXT] = {
1051 [VXLAN_FLOW_NEXT_DROP] =
1052 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP,
1053 [VXLAN_FLOW_NEXT_L2_INPUT] =
1054 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1056 u32 thread_index = vlib_get_thread_index ();
1058 u32 *from = vlib_frame_vector_args (f);
1059 u32 n_left_from = f->n_vectors;
1060 u32 next_index = VXLAN_FLOW_NEXT_L2_INPUT;
1062 while (n_left_from > 0)
1064 u32 n_left_to_next, *to_next;
1066 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1068 while (n_left_from > 3 && n_left_to_next > 3)
1070 u32 bi0 = to_next[0] = from[0];
1071 u32 bi1 = to_next[1] = from[1];
1072 u32 bi2 = to_next[2] = from[2];
1073 u32 bi3 = to_next[3] = from[3];
1077 n_left_to_next -= 4;
1079 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
1080 vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
1081 vlib_buffer_t *b2 = vlib_get_buffer (vm, bi2);
1082 vlib_buffer_t *b3 = vlib_get_buffer (vm, bi3);
1084 vlib_buffer_advance (b0, payload_offset);
1085 vlib_buffer_advance (b1, payload_offset);
1086 vlib_buffer_advance (b2, payload_offset);
1087 vlib_buffer_advance (b3, payload_offset);
1089 u16 len0 = vlib_buffer_length_in_chain (vm, b0);
1090 u16 len1 = vlib_buffer_length_in_chain (vm, b1);
1091 u16 len2 = vlib_buffer_length_in_chain (vm, b2);
1092 u16 len3 = vlib_buffer_length_in_chain (vm, b3);
1094 u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT, next1 =
1095 VXLAN_FLOW_NEXT_L2_INPUT, next2 =
1096 VXLAN_FLOW_NEXT_L2_INPUT, next3 = VXLAN_FLOW_NEXT_L2_INPUT;
1098 u8 ip_err0 = vxlan_check_ip (b0, len0);
1099 u8 ip_err1 = vxlan_check_ip (b1, len1);
1100 u8 ip_err2 = vxlan_check_ip (b2, len2);
1101 u8 ip_err3 = vxlan_check_ip (b3, len3);
1102 u8 ip_err = ip_err0 | ip_err1 | ip_err2 | ip_err3;
1104 u8 udp_err0 = vxlan_check_ip_udp_len (b0);
1105 u8 udp_err1 = vxlan_check_ip_udp_len (b1);
1106 u8 udp_err2 = vxlan_check_ip_udp_len (b2);
1107 u8 udp_err3 = vxlan_check_ip_udp_len (b3);
1108 u8 udp_err = udp_err0 | udp_err1 | udp_err2 | udp_err3;
1110 u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
1111 u8 csum_err1 = vxlan_check_udp_csum (vm, b1);
1112 u8 csum_err2 = vxlan_check_udp_csum (vm, b2);
1113 u8 csum_err3 = vxlan_check_udp_csum (vm, b3);
1114 u8 csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1116 if (PREDICT_FALSE (csum_err))
1119 csum_err0 = !vxlan_validate_udp_csum (vm, b0);
1121 csum_err1 = !vxlan_validate_udp_csum (vm, b1);
1123 csum_err2 = !vxlan_validate_udp_csum (vm, b2);
1125 csum_err3 = !vxlan_validate_udp_csum (vm, b3);
1126 csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1129 if (PREDICT_FALSE (ip_err || udp_err || csum_err))
1131 if (ip_err0 || udp_err0 || csum_err0)
1133 next0 = VXLAN_FLOW_NEXT_DROP;
1134 u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1135 b0->error = node->errors[error0];
1137 if (ip_err1 || udp_err1 || csum_err1)
1139 next1 = VXLAN_FLOW_NEXT_DROP;
1140 u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
1141 b1->error = node->errors[error1];
1143 if (ip_err2 || udp_err2 || csum_err2)
1145 next2 = VXLAN_FLOW_NEXT_DROP;
1146 u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
1147 b2->error = node->errors[error2];
1149 if (ip_err3 || udp_err3 || csum_err3)
1151 next3 = VXLAN_FLOW_NEXT_DROP;
1152 u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
1153 b3->error = node->errors[error3];
1157 vnet_update_l2_len (b0);
1158 vnet_update_l2_len (b1);
1159 vnet_update_l2_len (b2);
1160 vnet_update_l2_len (b3);
1162 ASSERT (b0->flow_id != 0);
1163 ASSERT (b1->flow_id != 0);
1164 ASSERT (b2->flow_id != 0);
1165 ASSERT (b3->flow_id != 0);
1167 u32 t_index0 = b0->flow_id - vxm->flow_id_start;
1168 u32 t_index1 = b1->flow_id - vxm->flow_id_start;
1169 u32 t_index2 = b2->flow_id - vxm->flow_id_start;
1170 u32 t_index3 = b3->flow_id - vxm->flow_id_start;
1172 vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
1173 vxlan_tunnel_t *t1 = &vxm->tunnels[t_index1];
1174 vxlan_tunnel_t *t2 = &vxm->tunnels[t_index2];
1175 vxlan_tunnel_t *t3 = &vxm->tunnels[t_index3];
1177 /* flow id consumed */
1183 u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1185 u32 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1187 u32 sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX] =
1189 u32 sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX] =
1192 vlib_increment_combined_counter (rx_counter[next0], thread_index,
1193 sw_if_index0, 1, len0);
1194 vlib_increment_combined_counter (rx_counter[next1], thread_index,
1195 sw_if_index1, 1, len1);
1196 vlib_increment_combined_counter (rx_counter[next2], thread_index,
1197 sw_if_index2, 1, len2);
1198 vlib_increment_combined_counter (rx_counter[next3], thread_index,
1199 sw_if_index3, 1, len3);
1201 u32 flags = b0->flags | b1->flags | b2->flags | b3->flags;
1203 if (PREDICT_FALSE (flags & VLIB_BUFFER_IS_TRACED))
1205 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1207 vxlan_rx_trace_t *tr =
1208 vlib_add_trace (vm, node, b0, sizeof *tr);
1209 u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1210 tr->next_index = next0;
1212 tr->tunnel_index = t_index0;
1215 if (b1->flags & VLIB_BUFFER_IS_TRACED)
1217 vxlan_rx_trace_t *tr =
1218 vlib_add_trace (vm, node, b1, sizeof *tr);
1219 u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
1220 tr->next_index = next1;
1222 tr->tunnel_index = t_index1;
1225 if (b2->flags & VLIB_BUFFER_IS_TRACED)
1227 vxlan_rx_trace_t *tr =
1228 vlib_add_trace (vm, node, b2, sizeof *tr);
1229 u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
1230 tr->next_index = next2;
1232 tr->tunnel_index = t_index2;
1235 if (b3->flags & VLIB_BUFFER_IS_TRACED)
1237 vxlan_rx_trace_t *tr =
1238 vlib_add_trace (vm, node, b3, sizeof *tr);
1239 u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
1240 tr->next_index = next3;
1242 tr->tunnel_index = t_index3;
1246 vlib_validate_buffer_enqueue_x4
1247 (vm, node, next_index, to_next, n_left_to_next,
1248 bi0, bi1, bi2, bi3, next0, next1, next2, next3);
1250 while (n_left_from > 0 && n_left_to_next > 0)
1252 u32 bi0 = to_next[0] = from[0];
1258 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
1259 vlib_buffer_advance (b0, payload_offset);
1261 u16 len0 = vlib_buffer_length_in_chain (vm, b0);
1262 u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT;
1264 u8 ip_err0 = vxlan_check_ip (b0, len0);
1265 u8 udp_err0 = vxlan_check_ip_udp_len (b0);
1266 u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
1269 csum_err0 = !vxlan_validate_udp_csum (vm, b0);
1270 if (ip_err0 || udp_err0 || csum_err0)
1272 next0 = VXLAN_FLOW_NEXT_DROP;
1273 u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1274 b0->error = node->errors[error0];
1277 vnet_update_l2_len (b0);
1279 ASSERT (b0->flow_id != 0);
1280 u32 t_index0 = b0->flow_id - vxm->flow_id_start;
1281 vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
1284 u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1286 vlib_increment_combined_counter (rx_counter[next0], thread_index,
1287 sw_if_index0, 1, len0);
1289 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1291 vxlan_rx_trace_t *tr =
1292 vlib_add_trace (vm, node, b0, sizeof *tr);
1293 u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1294 tr->next_index = next0;
1296 tr->tunnel_index = t_index0;
1299 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1300 to_next, n_left_to_next,
1304 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1307 return f->n_vectors;
1311 #ifndef CLIB_MULTIARCH_VARIANT
1312 VLIB_REGISTER_NODE (vxlan4_flow_input_node) = {
1313 .name = "vxlan-flow-input",
1314 .type = VLIB_NODE_TYPE_INTERNAL,
1315 .vector_size = sizeof (u32),
1317 .format_trace = format_vxlan_rx_trace,
1319 .n_errors = VXLAN_FLOW_N_ERROR,
1320 .error_strings = vxlan_flow_error_strings,
1322 .n_next_nodes = VXLAN_FLOW_N_NEXT,
1324 #define _(s,n) [VXLAN_FLOW_NEXT_##s] = n,
1325 foreach_vxlan_flow_input_next
1333 * fd.io coding-style-patch-verification: ON
1336 * eval: (c-set-style "gnu")