2 * decap.c: vxlan tunnel decap packet processing
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan/vxlan.h>
22 vlib_node_registration_t vxlan4_input_node;
23 vlib_node_registration_t vxlan6_input_node;
32 static u8 * format_vxlan_rx_trace (u8 * s, va_list * args)
34 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36 vxlan_rx_trace_t * t = va_arg (*args, vxlan_rx_trace_t *);
38 if (t->tunnel_index != ~0)
40 s = format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
41 t->tunnel_index, t->vni, t->next_index, t->error);
45 s = format (s, "VXLAN decap error - tunnel for vni %d does not exist",
52 vxlan_input (vlib_main_t * vm,
53 vlib_node_runtime_t * node,
54 vlib_frame_t * from_frame,
57 u32 n_left_from, next_index, * from, * to_next;
58 vxlan_main_t * vxm = &vxlan_main;
59 vnet_main_t * vnm = vxm->vnet_main;
60 vnet_interface_main_t * im = &vnm->interface_main;
61 u32 last_tunnel_index = ~0;
62 vxlan4_tunnel_key_t last_key4;
63 vxlan6_tunnel_key_t last_key6;
64 u32 pkts_decapsulated = 0;
65 u32 cpu_index = os_get_cpu_number();
66 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
69 last_key4.as_u64 = ~0;
71 memset (&last_key6, 0xff, sizeof (last_key6));
73 from = vlib_frame_vector_args (from_frame);
74 n_left_from = from_frame->n_vectors;
76 next_index = node->cached_next_index;
77 stats_sw_if_index = node->runtime_data[0];
78 stats_n_packets = stats_n_bytes = 0;
80 while (n_left_from > 0)
84 vlib_get_next_frame (vm, node, next_index,
85 to_next, n_left_to_next);
86 while (n_left_from >= 4 && n_left_to_next >= 2)
89 vlib_buffer_t * b0, * b1;
91 ip4_header_t * ip4_0, * ip4_1;
92 ip6_header_t * ip6_0, * ip6_1;
93 vxlan_header_t * vxlan0, * vxlan1;
95 u32 tunnel_index0, tunnel_index1;
96 vxlan_tunnel_t * t0, * t1;
97 vxlan4_tunnel_key_t key4_0, key4_1;
98 vxlan6_tunnel_key_t key6_0, key6_1;
100 u32 sw_if_index0, sw_if_index1, len0, len1;
102 /* Prefetch next iteration. */
104 vlib_buffer_t * p2, * p3;
106 p2 = vlib_get_buffer (vm, from[2]);
107 p3 = vlib_get_buffer (vm, from[3]);
109 vlib_prefetch_buffer_header (p2, LOAD);
110 vlib_prefetch_buffer_header (p3, LOAD);
112 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
113 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
125 b0 = vlib_get_buffer (vm, bi0);
126 b1 = vlib_get_buffer (vm, bi1);
128 /* udp leaves current_data pointing at the vxlan header */
129 vxlan0 = vlib_buffer_get_current (b0);
130 vxlan1 = vlib_buffer_get_current (b1);
132 next0 = next1 = VXLAN_INPUT_NEXT_L2_INPUT;
136 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
138 (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
139 ip4_0 = vlib_buffer_get_current (b0);
140 ip4_1 = vlib_buffer_get_current (b1);
143 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
145 (b1, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
146 ip6_0 = vlib_buffer_get_current (b0);
147 ip6_1 = vlib_buffer_get_current (b1);
150 /* pop (ip, udp, vxlan) */
153 (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
155 (b1, sizeof(*ip4_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
158 (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
160 (b1, sizeof(*ip6_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
169 if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
171 error0 = VXLAN_ERROR_BAD_FLAGS;
172 next0 = VXLAN_INPUT_NEXT_DROP;
177 key4_0.src = ip4_0->src_address.as_u32;
178 key4_0.vni = vxlan0->vni_reserved;
180 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
182 p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
186 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
187 next0 = VXLAN_INPUT_NEXT_DROP;
191 last_key4.as_u64 = key4_0.as_u64;
192 tunnel_index0 = last_tunnel_index = p0[0];
195 tunnel_index0 = last_tunnel_index;
196 } else /* !is_ip4 */ {
197 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
198 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
199 key6_0.vni = vxlan0->vni_reserved;
201 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
203 p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
207 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
208 next0 = VXLAN_INPUT_NEXT_DROP;
212 clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
213 tunnel_index0 = last_tunnel_index = p0[0];
216 tunnel_index0 = last_tunnel_index;
219 t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
220 next0 = t0->decap_next_index;
221 sw_if_index0 = t0->sw_if_index;
222 len0 = vlib_buffer_length_in_chain (vm, b0);
224 /* Required to make the l2 tag push / pop code work on l2 subifs */
225 if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
226 vnet_update_l2_len (b0);
228 /* Set input sw_if_index to VXLAN tunnel for learning */
229 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
231 pkts_decapsulated ++;
232 stats_n_packets += 1;
233 stats_n_bytes += len0;
235 /* Batch stats increment on the same vxlan tunnel so counter
236 is not incremented per packet */
237 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
239 stats_n_packets -= 1;
240 stats_n_bytes -= len0;
242 vlib_increment_combined_counter
243 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
244 cpu_index, stats_sw_if_index,
245 stats_n_packets, stats_n_bytes);
247 stats_n_bytes = len0;
248 stats_sw_if_index = sw_if_index0;
252 b0->error = error0 ? node->errors[error0] : 0;
254 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
257 = vlib_add_trace (vm, node, b0, sizeof (*tr));
258 tr->next_index = next0;
260 tr->tunnel_index = tunnel_index0;
261 tr->vni = vnet_get_vni (vxlan0);
264 if (PREDICT_FALSE (vxlan1->flags != VXLAN_FLAGS_I))
266 error1 = VXLAN_ERROR_BAD_FLAGS;
267 next1 = VXLAN_INPUT_NEXT_DROP;
272 key4_1.src = ip4_1->src_address.as_u32;
273 key4_1.vni = vxlan1->vni_reserved;
275 if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
277 p1 = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
281 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
282 next1 = VXLAN_INPUT_NEXT_DROP;
286 last_key4.as_u64 = key4_1.as_u64;
287 tunnel_index1 = last_tunnel_index = p1[0];
290 tunnel_index1 = last_tunnel_index;
291 } else /* !is_ip4 */ {
292 key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
293 key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
294 key6_1.vni = vxlan1->vni_reserved;
296 if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
298 p1 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_1);
302 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
303 next1 = VXLAN_INPUT_NEXT_DROP;
307 clib_memcpy (&last_key6, &key6_1, sizeof(key6_1));
308 tunnel_index1 = last_tunnel_index = p1[0];
311 tunnel_index1 = last_tunnel_index;
314 t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
315 next1 = t1->decap_next_index;
316 sw_if_index1 = t1->sw_if_index;
317 len1 = vlib_buffer_length_in_chain (vm, b1);
319 /* Required to make the l2 tag push / pop code work on l2 subifs */
320 if (PREDICT_TRUE(next1 == VXLAN_INPUT_NEXT_L2_INPUT))
321 vnet_update_l2_len (b1);
323 /* Set input sw_if_index to VXLAN tunnel for learning */
324 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
326 pkts_decapsulated ++;
327 stats_n_packets += 1;
328 stats_n_bytes += len1;
330 /* Batch stats increment on the same vxlan tunnel so counter
331 is not incremented per packet */
332 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
334 stats_n_packets -= 1;
335 stats_n_bytes -= len1;
337 vlib_increment_combined_counter
338 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
339 cpu_index, stats_sw_if_index,
340 stats_n_packets, stats_n_bytes);
342 stats_n_bytes = len1;
343 stats_sw_if_index = sw_if_index1;
347 b1->error = error1 ? node->errors[error1] : 0;
349 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
352 = vlib_add_trace (vm, node, b1, sizeof (*tr));
353 tr->next_index = next1;
355 tr->tunnel_index = tunnel_index1;
356 tr->vni = vnet_get_vni (vxlan1);
359 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
360 to_next, n_left_to_next,
361 bi0, bi1, next0, next1);
364 while (n_left_from > 0 && n_left_to_next > 0)
369 ip4_header_t * ip4_0;
370 ip6_header_t * ip6_0;
371 vxlan_header_t * vxlan0;
375 vxlan4_tunnel_key_t key4_0;
376 vxlan6_tunnel_key_t key6_0;
378 u32 sw_if_index0, len0;
387 b0 = vlib_get_buffer (vm, bi0);
389 /* udp leaves current_data pointing at the vxlan header */
390 vxlan0 = vlib_buffer_get_current (b0);
392 next0 = VXLAN_INPUT_NEXT_L2_INPUT;
396 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
397 ip4_0 = vlib_buffer_get_current (b0);
400 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
401 ip6_0 = vlib_buffer_get_current (b0);
404 /* pop (ip, udp, vxlan) */
407 (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
410 (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
416 if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
418 error0 = VXLAN_ERROR_BAD_FLAGS;
419 next0 = VXLAN_INPUT_NEXT_DROP;
424 key4_0.src = ip4_0->src_address.as_u32;
425 key4_0.vni = vxlan0->vni_reserved;
427 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
429 p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
433 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
434 next0 = VXLAN_INPUT_NEXT_DROP;
438 last_key4.as_u64 = key4_0.as_u64;
439 tunnel_index0 = last_tunnel_index = p0[0];
442 tunnel_index0 = last_tunnel_index;
443 } else /* !is_ip4 */ {
444 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
445 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
446 key6_0.vni = vxlan0->vni_reserved;
448 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
450 p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
454 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
455 next0 = VXLAN_INPUT_NEXT_DROP;
459 clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
460 tunnel_index0 = last_tunnel_index = p0[0];
463 tunnel_index0 = last_tunnel_index;
466 t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
467 next0 = t0->decap_next_index;
468 sw_if_index0 = t0->sw_if_index;
469 len0 = vlib_buffer_length_in_chain (vm, b0);
471 /* Required to make the l2 tag push / pop code work on l2 subifs */
472 if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
473 vnet_update_l2_len (b0);
475 /* Set input sw_if_index to VXLAN tunnel for learning */
476 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
478 pkts_decapsulated ++;
479 stats_n_packets += 1;
480 stats_n_bytes += len0;
482 /* Batch stats increment on the same vxlan tunnel so counter
483 is not incremented per packet */
484 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
486 stats_n_packets -= 1;
487 stats_n_bytes -= len0;
489 vlib_increment_combined_counter
490 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
491 cpu_index, stats_sw_if_index,
492 stats_n_packets, stats_n_bytes);
494 stats_n_bytes = len0;
495 stats_sw_if_index = sw_if_index0;
499 b0->error = error0 ? node->errors[error0] : 0;
501 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
504 = vlib_add_trace (vm, node, b0, sizeof (*tr));
505 tr->next_index = next0;
507 tr->tunnel_index = tunnel_index0;
508 tr->vni = vnet_get_vni (vxlan0);
510 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
511 to_next, n_left_to_next,
515 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
517 /* Do we still need this now that tunnel tx stats is kept? */
518 vlib_node_increment_counter (vm, is_ip4?
519 vxlan4_input_node.index:vxlan6_input_node.index,
520 VXLAN_ERROR_DECAPSULATED,
523 /* Increment any remaining batch stats */
526 vlib_increment_combined_counter
527 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
528 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
529 node->runtime_data[0] = stats_sw_if_index;
532 return from_frame->n_vectors;
536 vxlan4_input (vlib_main_t * vm,
537 vlib_node_runtime_t * node,
538 vlib_frame_t * from_frame)
540 return vxlan_input(vm, node, from_frame, /* is_ip4 */ 1);
544 vxlan6_input (vlib_main_t * vm,
545 vlib_node_runtime_t * node,
546 vlib_frame_t * from_frame)
548 return vxlan_input(vm, node, from_frame, /* is_ip4 */ 0);
551 static char * vxlan_error_strings[] = {
552 #define vxlan_error(n,s) s,
553 #include <vnet/vxlan/vxlan_error.def>
558 VLIB_REGISTER_NODE (vxlan4_input_node) = {
559 .function = vxlan4_input,
560 .name = "vxlan4-input",
561 /* Takes a vector of packets. */
562 .vector_size = sizeof (u32),
564 .n_errors = VXLAN_N_ERROR,
565 .error_strings = vxlan_error_strings,
567 .n_next_nodes = VXLAN_INPUT_N_NEXT,
569 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
570 foreach_vxlan_input_next
574 //temp .format_buffer = format_vxlan_header,
575 .format_trace = format_vxlan_rx_trace,
576 // $$$$ .unformat_buffer = unformat_vxlan_header,
579 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_input_node, vxlan4_input)
581 VLIB_REGISTER_NODE (vxlan6_input_node) = {
582 .function = vxlan6_input,
583 .name = "vxlan6-input",
584 /* Takes a vector of packets. */
585 .vector_size = sizeof (u32),
587 .n_errors = VXLAN_N_ERROR,
588 .error_strings = vxlan_error_strings,
590 .n_next_nodes = VXLAN_INPUT_N_NEXT,
592 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
593 foreach_vxlan_input_next
597 //temp .format_buffer = format_vxlan_header,
598 .format_trace = format_vxlan_rx_trace,
599 // $$$$ .unformat_buffer = unformat_vxlan_header,
602 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_input_node, vxlan6_input)
606 IP_VXLAN_BYPASS_NEXT_DROP,
607 IP_VXLAN_BYPASS_NEXT_VXLAN,
608 IP_VXLAN_BYPASS_N_NEXT,
609 } ip_vxan_bypass_next_t;
612 ip_vxlan_bypass_inline (vlib_main_t * vm,
613 vlib_node_runtime_t * node,
614 vlib_frame_t * frame,
617 vxlan_main_t * vxm = &vxlan_main;
618 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
619 vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
620 ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
621 ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
623 from = vlib_frame_vector_args (frame);
624 n_left_from = frame->n_vectors;
625 next_index = node->cached_next_index;
627 if (node->flags & VLIB_NODE_FLAG_TRACE)
628 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
630 if (is_ip4) addr4.data_u32 = ~0;
631 else ip6_address_set_zero (&addr6);
633 while (n_left_from > 0)
635 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
637 while (n_left_from >= 4 && n_left_to_next >= 2)
639 vlib_buffer_t * b0, * b1;
640 ip4_header_t * ip0, * ip1;
641 udp_header_t * udp0, * udp1;
642 u32 bi0, ip_len0, udp_len0, flags0, next0;
643 u32 bi1, ip_len1, udp_len1, flags1, next1;
644 i32 len_diff0, len_diff1;
645 u8 error0, good_udp0, proto0;
646 u8 error1, good_udp1, proto1;
648 /* Prefetch next iteration. */
650 vlib_buffer_t * p2, * p3;
652 p2 = vlib_get_buffer (vm, from[2]);
653 p3 = vlib_get_buffer (vm, from[3]);
655 vlib_prefetch_buffer_header (p2, LOAD);
656 vlib_prefetch_buffer_header (p3, LOAD);
658 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
659 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
662 bi0 = to_next[0] = from[0];
663 bi1 = to_next[1] = from[1];
669 b0 = vlib_get_buffer (vm, bi0);
670 b1 = vlib_get_buffer (vm, bi1);
671 ip0 = vlib_buffer_get_current (b0);
672 ip1 = vlib_buffer_get_current (b1);
674 /* Setup packet for next IP feature */
675 vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
676 vnet_feature_next(vnet_buffer(b1)->sw_if_index[VLIB_RX], &next1, b1);
678 /* Treat IP frag packets as "experimental" protocol for now
679 until support of IP frag reassembly is implemented */
680 proto0 = ip4_is_fragment(ip0) ? 0xfe : ip0->protocol;
681 proto1 = ip4_is_fragment(ip1) ? 0xfe : ip1->protocol;
683 /* Process packet 0 */
684 if (proto0 != IP_PROTOCOL_UDP)
685 goto exit0; /* not UDP packet */
687 udp0 = ip4_next_header (ip0);
688 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
689 goto exit0; /* not VXLAN packet */
693 if (addr4.as_u32 != ip0->dst_address.as_u32)
695 if (!hash_get (vxm->vtep4, ip0->dst_address.as_u32))
696 goto exit0; /* no local VTEP for VXLAN packet */
697 addr4 = ip0->dst_address;
700 else goto exit0; /* IP6 VXLAN bypass not yet supported */
702 /* vxlan-input node expect current at VXLAN header */
703 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
706 good_udp0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
708 /* Don't verify UDP checksum for packets with explicit zero checksum. */
709 good_udp0 |= udp0->checksum == 0;
711 /* Verify UDP length */
712 ip_len0 = clib_net_to_host_u16 (ip0->length);
713 udp_len0 = clib_net_to_host_u16 (udp0->length);
715 len_diff0 = ip_len0 - udp_len0;
717 /* Verify UDP checksum */
718 if (PREDICT_FALSE (!good_udp0))
720 if (!(flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
721 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
723 (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
726 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
727 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
730 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
731 b0->error = error0 ? error_node->errors[error0] : 0;
734 /* Process packet 1 */
735 if (proto1 != IP_PROTOCOL_UDP)
736 goto exit1; /* not UDP packet */
738 udp1 = ip4_next_header (ip1);
739 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
740 goto exit1; /* not VXLAN packet */
744 if (addr4.as_u32 != ip1->dst_address.as_u32)
746 if (!hash_get (vxm->vtep4, ip1->dst_address.as_u32))
747 goto exit1; /* no local VTEP for VXLAN packet */
748 addr4 = ip1->dst_address;
751 else goto exit1; /* IP6 VXLAN bypass not yet supported */
753 /* vxlan-input node expect current at VXLAN header */
754 vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
757 good_udp1 = (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
759 /* Don't verify UDP checksum for packets with explicit zero checksum. */
760 good_udp1 |= udp1->checksum == 0;
762 /* Verify UDP length */
763 ip_len1 = clib_net_to_host_u16 (ip1->length);
764 udp_len1 = clib_net_to_host_u16 (udp1->length);
766 len_diff1 = ip_len1 - udp_len1;
768 /* Verify UDP checksum */
769 if (PREDICT_FALSE (!good_udp1))
771 if (!(flags1 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
772 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
774 (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
777 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
778 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
781 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
782 b1->error = error1 ? error_node->errors[error1] : 0;
785 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
786 to_next, n_left_to_next,
787 bi0, bi1, next0, next1);
790 while (n_left_from > 0 && n_left_to_next > 0)
795 u32 bi0, ip_len0, udp_len0, flags0, next0;
797 u8 error0, good_udp0, proto0;
799 bi0 = to_next[0] = from[0];
805 b0 = vlib_get_buffer (vm, bi0);
806 ip0 = vlib_buffer_get_current (b0);
808 /* Setup packet for next IP feature */
809 vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
811 /* Treat IP frag packets as "experimental" protocol for now
812 until support of IP frag reassembly is implemented */
813 proto0 = ip4_is_fragment(ip0) ? 0xfe : ip0->protocol;
815 if (proto0 != IP_PROTOCOL_UDP)
816 goto exit; /* not UDP packet */
818 udp0 = ip4_next_header (ip0);
819 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
820 goto exit; /* not VXLAN packet */
824 if (addr4.as_u32 != ip0->dst_address.as_u32)
826 if (!hash_get (vxm->vtep4, ip0->dst_address.as_u32))
827 goto exit; /* no local VTEP for VXLAN packet */
828 addr4 = ip0->dst_address;
831 else goto exit; /* IP6 VXLAN bypass not yet supported */
833 /* vxlan-input node expect current at VXLAN header */
834 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
837 good_udp0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
839 /* Don't verify UDP checksum for packets with explicit zero checksum. */
840 good_udp0 |= udp0->checksum == 0;
842 /* Verify UDP length */
843 ip_len0 = clib_net_to_host_u16 (ip0->length);
844 udp_len0 = clib_net_to_host_u16 (udp0->length);
846 len_diff0 = ip_len0 - udp_len0;
848 /* Verify UDP checksum */
849 if (PREDICT_FALSE (!good_udp0))
851 if (!(flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED))
852 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
854 (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
857 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
858 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
861 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
862 b0->error = error0 ? error_node->errors[error0] : 0;
865 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
866 to_next, n_left_to_next,
870 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
873 return frame->n_vectors;
877 ip4_vxlan_bypass (vlib_main_t * vm,
878 vlib_node_runtime_t * node,
879 vlib_frame_t * frame)
881 return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
884 VLIB_REGISTER_NODE (ip4_vxlan_bypass_node) = {
885 .function = ip4_vxlan_bypass,
886 .name = "ip4-vxlan-bypass",
887 .vector_size = sizeof (u32),
889 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
891 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
892 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
895 .format_buffer = format_ip4_header,
896 .format_trace = format_ip4_forward_next_trace,
899 VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_bypass_node,ip4_vxlan_bypass)
902 static clib_error_t *
903 set_ip_vxlan_bypass (vlib_main_t * vm,
904 unformat_input_t * input,
905 vlib_cli_command_t * cmd)
907 unformat_input_t _line_input, * line_input = &_line_input;
908 vnet_main_t * vnm = vnet_get_main();
909 clib_error_t * error = 0;
910 u32 sw_if_index, is_del;
915 if (! unformat_user (input, unformat_line_input, line_input))
918 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
920 if (unformat_user (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
922 else if (unformat (line_input, "del"))
926 error = unformat_parse_error (line_input);
931 if (~0 == sw_if_index)
933 error = clib_error_return (0, "unknown interface `%U'",
934 format_unformat_error, line_input);
938 vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-bypass", sw_if_index,
945 * This command adds the 'ip4-vxlan-bypass' graph node for a given interface.
946 * By adding the IPv4 vxlan-bypass graph node to an interface, the node checks
947 * for and validate input vxlan packet and bypass ip4-lookup, ip4-local,
948 * ip4-udp-lookup nodes to speedup vxlan packet forwarding. This node will
949 * cause extra overhead to for non-vxlan packets which is kept at a minimum.
953 * Example of graph node before ip4-vxlan-bypass is enabled:
954 * @cliexstart{show vlib graph ip4-vxlan-bypass}
956 * ip4-vxlan-bypass error-drop [0]
961 * Example of how to enable ip4-vxlan-bypass on an interface:
962 * @cliexcmd{set interface ip vxlan-bypass GigabitEthernet2/0/0}
964 * Example of graph node after ip4-vxlan-bypass is enabled:
965 * @cliexstart{show vlib graph ip4-vxlan-bypass}
967 * ip4-vxlan-bypass error-drop [0] ip4-input
968 * vxlan4-input [1] ip4-input-no-checksum
972 * Example of how to display the feature enabed on an interface:
973 * @cliexstart{show ip interface features GigabitEthernet2/0/0}
974 * IP feature paths configured on GigabitEthernet2/0/0...
981 * ip4-lookup-multicast
996 * Example of how to disable unicast source checking on an interface:
997 * @cliexcmd{set interface ip vxlan-bypass GigabitEthernet2/0/0 del}
1001 VLIB_CLI_COMMAND (set_interface_ip_vxlan_bypass_command, static) = {
1002 .path = "set interface ip vxlan-bypass",
1003 .function = set_ip_vxlan_bypass,
1004 .short_help = "set interface ip vxlan-bypass <interface> [del]",
1007 /* Dummy init function to get us linked in. */
1008 clib_error_t * ip4_vxlan_bypass_init (vlib_main_t * vm)
1011 VLIB_INIT_FUNCTION (ip4_vxlan_bypass_init);