2 * decap.c: vxlan tunnel decap packet processing
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan/vxlan.h>
22 vlib_node_registration_t vxlan4_input_node;
23 vlib_node_registration_t vxlan6_input_node;
32 static u8 * format_vxlan_rx_trace (u8 * s, va_list * args)
34 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36 vxlan_rx_trace_t * t = va_arg (*args, vxlan_rx_trace_t *);
38 if (t->tunnel_index != ~0)
40 s = format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
41 t->tunnel_index, t->vni, t->next_index, t->error);
45 s = format (s, "VXLAN decap error - tunnel for vni %d does not exist",
52 validate_vxlan_fib (vlib_buffer_t *b, vxlan_tunnel_t *t, u32 is_ip4)
54 u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
56 u32 * fib_index_by_sw_if_index = is_ip4 ?
57 ip4_main.fib_index_by_sw_if_index : ip6_main.fib_index_by_sw_if_index;
58 u32 tx_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
59 u32 fib_index = (tx_sw_if_index == (u32) ~ 0) ?
60 vec_elt (fib_index_by_sw_if_index, sw_if_index) : tx_sw_if_index;
62 return (fib_index == t->encap_fib_index);
66 vxlan_input (vlib_main_t * vm,
67 vlib_node_runtime_t * node,
68 vlib_frame_t * from_frame,
71 vxlan_main_t * vxm = &vxlan_main;
72 vnet_main_t * vnm = vxm->vnet_main;
73 vnet_interface_main_t * im = &vnm->interface_main;
74 u32 last_tunnel_index = ~0;
75 vxlan4_tunnel_key_t last_key4;
76 vxlan6_tunnel_key_t last_key6;
77 u32 pkts_decapsulated = 0;
78 u32 thread_index = vlib_get_thread_index();
81 last_key4.as_u64 = ~0;
83 memset (&last_key6, 0xff, sizeof (last_key6));
85 u32 next_index = node->cached_next_index;
86 u32 stats_sw_if_index = node->runtime_data[0];
87 u32 stats_n_packets = 0, stats_n_bytes = 0;
89 u32 * from = vlib_frame_vector_args (from_frame);
90 u32 n_left_from = from_frame->n_vectors;
92 while (n_left_from > 0)
94 u32 * to_next, n_left_to_next;
95 vlib_get_next_frame (vm, node, next_index,
96 to_next, n_left_to_next);
98 while (n_left_from >= 4 && n_left_to_next >= 2)
101 vlib_buffer_t * b0, * b1;
103 vxlan_tunnel_t * t0, * t1, * stats_t0, * stats_t1;
105 /* Prefetch next iteration. */
107 vlib_buffer_t * p2, * p3;
109 p2 = vlib_get_buffer (vm, from[2]);
110 p3 = vlib_get_buffer (vm, from[3]);
112 vlib_prefetch_buffer_header (p2, LOAD);
113 vlib_prefetch_buffer_header (p3, LOAD);
115 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
116 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
128 b0 = vlib_get_buffer (vm, bi0);
129 b1 = vlib_get_buffer (vm, bi1);
131 /* udp leaves current_data pointing at the vxlan header */
132 void * cur0 = vlib_buffer_get_current (b0);
133 void * cur1 = vlib_buffer_get_current (b1);
134 vxlan_header_t * vxlan0 = cur0;
135 vxlan_header_t * vxlan1 = cur1;
137 ip4_header_t * ip4_0, * ip4_1;
138 ip6_header_t * ip6_0, * ip6_1;
140 ip4_0 = cur0 - sizeof(udp_header_t) - sizeof(ip4_header_t);
141 ip4_1 = cur1 - sizeof(udp_header_t) - sizeof(ip4_header_t);
143 ip6_0 = cur0 - sizeof(udp_header_t) - sizeof(ip6_header_t);
144 ip6_1 = cur1 - sizeof(udp_header_t) - sizeof(ip6_header_t);
147 /* pop (ip, udp, vxlan) */
148 vlib_buffer_advance (b0, sizeof *vxlan0);
149 vlib_buffer_advance (b1, sizeof *vxlan1);
151 u32 tunnel_index0 = ~0, tunnel_index1 = ~0;
152 u32 error0 = 0, error1 = 0;
154 if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
156 error0 = VXLAN_ERROR_BAD_FLAGS;
157 next0 = VXLAN_INPUT_NEXT_DROP;
163 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
164 vxlan4_tunnel_key_t key4_0 = {
165 .src = ip4_0->src_address.as_u32,
166 .vni = vxlan0->vni_reserved,
169 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
171 uword * p = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
172 if (PREDICT_FALSE (p == NULL))
174 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
175 next0 = VXLAN_INPUT_NEXT_DROP;
179 last_tunnel_index = p[0];
181 tunnel_index0 = last_tunnel_index;
182 stats_t0 = t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
184 /* Validate VXLAN tunnel encap-fib index agaist packet */
185 if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
187 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
188 next0 = VXLAN_INPUT_NEXT_DROP;
192 /* Validate VXLAN tunnel SIP against packet DIP */
193 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
194 goto next0; /* valid packet */
195 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
197 key4_0.src = ip4_0->dst_address.as_u32;
198 /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
199 uword * p = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
200 if (PREDICT_TRUE (p != NULL))
202 stats_t0 = pool_elt_at_index (vxm->tunnels, p[0]);
203 goto next0; /* valid packet */
206 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
207 next0 = VXLAN_INPUT_NEXT_DROP;
210 } else /* !is_ip4 */ {
211 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
212 vxlan6_tunnel_key_t key6_0 = {
213 .src = ip6_0->src_address,
214 .vni = vxlan0->vni_reserved,
217 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
219 uword * p = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
220 if (PREDICT_FALSE (p == NULL))
222 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
223 next0 = VXLAN_INPUT_NEXT_DROP;
226 clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
227 last_tunnel_index = p[0];
229 tunnel_index0 = last_tunnel_index;
230 stats_t0 = t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
232 /* Validate VXLAN tunnel encap-fib index agaist packet */
233 if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
235 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
236 next0 = VXLAN_INPUT_NEXT_DROP;
240 /* Validate VXLAN tunnel SIP against packet DIP */
241 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
243 goto next0; /* valid packet */
244 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
246 key6_0.src = ip6_0->dst_address;
247 uword * p = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
248 if (PREDICT_TRUE (p != NULL))
250 stats_t0 = pool_elt_at_index (vxm->tunnels, p[0]);
251 goto next0; /* valid packet */
254 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
255 next0 = VXLAN_INPUT_NEXT_DROP;
260 next0 = t0->decap_next_index;
261 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
263 /* Required to make the l2 tag push / pop code work on l2 subifs */
264 if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
265 vnet_update_l2_len (b0);
267 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
268 vnet_buffer(b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
270 /* Batch stats increment on the same vxlan tunnel so counter
271 is not incremented per packet */
272 u32 sw_if_index0 = stats_t0->sw_if_index;
273 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
277 vlib_increment_combined_counter
278 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
279 thread_index, stats_sw_if_index,
280 stats_n_packets, stats_n_bytes);
281 pkts_decapsulated += stats_n_packets;
282 stats_n_packets = stats_n_bytes = 0;
284 stats_sw_if_index = sw_if_index0;
286 stats_n_packets += 1;
287 stats_n_bytes += len0;
290 b0->error = error0 ? node->errors[error0] : 0;
292 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
295 = vlib_add_trace (vm, node, b0, sizeof (*tr));
296 tr->next_index = next0;
298 tr->tunnel_index = tunnel_index0;
299 tr->vni = vnet_get_vni (vxlan0);
302 if (PREDICT_FALSE (vxlan1->flags != VXLAN_FLAGS_I))
304 error1 = VXLAN_ERROR_BAD_FLAGS;
305 next1 = VXLAN_INPUT_NEXT_DROP;
310 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
311 vxlan4_tunnel_key_t key4_1 = {
312 .src = ip4_1->src_address.as_u32,
313 .vni = vxlan1->vni_reserved
316 if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
318 uword * p = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
319 if (PREDICT_FALSE (p == NULL))
321 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
322 next1 = VXLAN_INPUT_NEXT_DROP;
326 last_tunnel_index = p[0];
328 tunnel_index1 = last_tunnel_index;
329 stats_t1 = t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
331 /* Validate VXLAN tunnel encap-fib index against packet */
332 if (PREDICT_FALSE (validate_vxlan_fib (b1, t1, is_ip4) == 0))
334 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
335 next1 = VXLAN_INPUT_NEXT_DROP;
339 /* Validate VXLAN tunnel SIP against packet DIP */
340 if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
341 goto next1; /* valid packet */
342 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_1->dst_address)))
344 /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
345 key4_1.src = ip4_1->dst_address.as_u32;
347 uword * p = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
348 if (PREDICT_TRUE (p != NULL))
350 stats_t1 = pool_elt_at_index (vxm->tunnels, p[0]);
351 goto next1; /* valid packet */
354 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
355 next1 = VXLAN_INPUT_NEXT_DROP;
358 } else /* !is_ip4 */ {
359 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
360 vxlan6_tunnel_key_t key6_1 = {
361 .src = ip6_1->src_address,
362 .vni = vxlan1->vni_reserved,
365 if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
367 uword * p = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_1);
368 if (PREDICT_FALSE (p == NULL))
370 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
371 next1 = VXLAN_INPUT_NEXT_DROP;
375 clib_memcpy (&last_key6, &key6_1, sizeof(key6_1));
376 last_tunnel_index = p[0];
378 tunnel_index1 = last_tunnel_index;
379 stats_t1 = t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
381 /* Validate VXLAN tunnel encap-fib index agaist packet */
382 if (PREDICT_FALSE (validate_vxlan_fib (b1, t1, is_ip4) == 0))
384 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
385 next1 = VXLAN_INPUT_NEXT_DROP;
389 /* Validate VXLAN tunnel SIP against packet DIP */
390 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_1->dst_address,
392 goto next1; /* valid packet */
393 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_1->dst_address)))
395 key6_1.src = ip6_1->dst_address;
397 uword * p = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_1);
398 if (PREDICT_TRUE (p != NULL))
400 stats_t1 = pool_elt_at_index (vxm->tunnels, p[0]);
401 goto next1; /* valid packet */
404 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
405 next1 = VXLAN_INPUT_NEXT_DROP;
410 next1 = t1->decap_next_index;
411 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
413 /* Required to make the l2 tag push / pop code work on l2 subifs */
414 if (PREDICT_TRUE(next1 == VXLAN_INPUT_NEXT_L2_INPUT))
415 vnet_update_l2_len (b1);
417 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
418 vnet_buffer(b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
420 /* Batch stats increment on the same vxlan tunnel so counter
421 is not incremented per packet */
422 u32 sw_if_index1 = stats_t1->sw_if_index;
423 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
427 vlib_increment_combined_counter
428 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
429 thread_index, stats_sw_if_index,
430 stats_n_packets, stats_n_bytes);
431 pkts_decapsulated += stats_n_packets;
432 stats_n_packets = stats_n_bytes = 0;
434 stats_sw_if_index = sw_if_index1;
436 stats_n_packets += 1;
437 stats_n_bytes += len1;
440 b1->error = error1 ? node->errors[error1] : 0;
442 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
445 = vlib_add_trace (vm, node, b1, sizeof (*tr));
446 tr->next_index = next1;
448 tr->tunnel_index = tunnel_index1;
449 tr->vni = vnet_get_vni (vxlan1);
452 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
453 to_next, n_left_to_next,
454 bi0, bi1, next0, next1);
457 while (n_left_from > 0 && n_left_to_next > 0)
462 ip4_header_t * ip4_0;
463 ip6_header_t * ip6_0;
464 vxlan_tunnel_t * t0, * stats_t0;
473 b0 = vlib_get_buffer (vm, bi0);
475 /* udp leaves current_data pointing at the vxlan header */
476 void * cur0 = vlib_buffer_get_current (b0);
477 vxlan_header_t * vxlan0 = cur0;
479 ip4_0 = cur0 -sizeof(udp_header_t) - sizeof(ip4_header_t);
481 ip6_0 = cur0 -sizeof(udp_header_t) - sizeof(ip6_header_t);
483 /* pop (ip, udp, vxlan) */
484 vlib_buffer_advance (b0, sizeof(*vxlan0));
486 u32 tunnel_index0 = ~0;
489 if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
491 error0 = VXLAN_ERROR_BAD_FLAGS;
492 next0 = VXLAN_INPUT_NEXT_DROP;
497 vxlan4_tunnel_key_t key4_0 = {
498 .src = ip4_0->src_address.as_u32,
499 .vni = vxlan0->vni_reserved,
502 /* Make sure unicast VXLAN tunnel exist by packet SIP and VNI */
503 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
505 uword * p = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
506 if (PREDICT_FALSE (p == NULL))
508 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
509 next0 = VXLAN_INPUT_NEXT_DROP;
513 last_tunnel_index = p[0];
515 tunnel_index0 = last_tunnel_index;
516 stats_t0 = t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
518 /* Validate VXLAN tunnel encap-fib index agaist packet */
519 if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
521 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
522 next0 = VXLAN_INPUT_NEXT_DROP;
526 /* Validate VXLAN tunnel SIP against packet DIP */
527 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
528 goto next00; /* valid packet */
529 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
531 /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
532 key4_0.src = ip4_0->dst_address.as_u32;
533 uword * p = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
534 if (PREDICT_TRUE (p != NULL))
536 stats_t0 = pool_elt_at_index (vxm->tunnels, p[0]);
537 goto next00; /* valid packet */
540 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
541 next0 = VXLAN_INPUT_NEXT_DROP;
544 } else /* !is_ip4 */ {
545 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
546 vxlan6_tunnel_key_t key6_0 = {
547 .src = ip6_0->src_address,
548 .vni = vxlan0->vni_reserved,
551 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
553 uword * p = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
554 if (PREDICT_FALSE (p == NULL))
556 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
557 next0 = VXLAN_INPUT_NEXT_DROP;
560 clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
561 last_tunnel_index = p[0];
563 tunnel_index0 = last_tunnel_index;
564 stats_t0 = t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
566 /* Validate VXLAN tunnel encap-fib index agaist packet */
567 if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
569 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
570 next0 = VXLAN_INPUT_NEXT_DROP;
574 /* Validate VXLAN tunnel SIP against packet DIP */
575 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
577 goto next00; /* valid packet */
578 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
580 key6_0.src = ip6_0->dst_address;
581 uword * p = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
582 if (PREDICT_TRUE (p != NULL))
584 stats_t0 = pool_elt_at_index (vxm->tunnels, p[0]);
585 goto next00; /* valid packet */
588 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
589 next0 = VXLAN_INPUT_NEXT_DROP;
594 next0 = t0->decap_next_index;
595 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
597 /* Required to make the l2 tag push / pop code work on l2 subifs */
598 if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
599 vnet_update_l2_len (b0);
601 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
602 vnet_buffer(b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
604 /* Batch stats increment on the same vxlan tunnel so counter
605 is not incremented per packet */
606 u32 sw_if_index0 = stats_t0->sw_if_index;
607 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
611 vlib_increment_combined_counter
612 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
613 thread_index, stats_sw_if_index,
614 stats_n_packets, stats_n_bytes);
615 pkts_decapsulated += stats_n_packets;
616 stats_n_packets = stats_n_bytes = 0;
618 stats_sw_if_index = sw_if_index0;
620 stats_n_packets += 1;
621 stats_n_bytes += len0;
625 b0->error = error0 ? node->errors[error0] : 0;
627 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
630 = vlib_add_trace (vm, node, b0, sizeof (*tr));
631 tr->next_index = next0;
633 tr->tunnel_index = tunnel_index0;
634 tr->vni = vnet_get_vni (vxlan0);
636 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
637 to_next, n_left_to_next,
641 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
643 /* Increment any remaining batch stats */
646 pkts_decapsulated += stats_n_packets;
647 vlib_increment_combined_counter
648 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
649 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
650 node->runtime_data[0] = stats_sw_if_index;
653 /* Do we still need this now that tunnel tx stats is kept? */
654 u32 node_idx = is_ip4 ? vxlan4_input_node.index : vxlan6_input_node.index;
655 vlib_node_increment_counter (vm, node_idx, VXLAN_ERROR_DECAPSULATED,
658 return from_frame->n_vectors;
662 vxlan4_input (vlib_main_t * vm,
663 vlib_node_runtime_t * node,
664 vlib_frame_t * from_frame)
666 return vxlan_input(vm, node, from_frame, /* is_ip4 */ 1);
670 vxlan6_input (vlib_main_t * vm,
671 vlib_node_runtime_t * node,
672 vlib_frame_t * from_frame)
674 return vxlan_input(vm, node, from_frame, /* is_ip4 */ 0);
677 static char * vxlan_error_strings[] = {
678 #define vxlan_error(n,s) s,
679 #include <vnet/vxlan/vxlan_error.def>
684 VLIB_REGISTER_NODE (vxlan4_input_node) = {
685 .function = vxlan4_input,
686 .name = "vxlan4-input",
687 /* Takes a vector of packets. */
688 .vector_size = sizeof (u32),
690 .n_errors = VXLAN_N_ERROR,
691 .error_strings = vxlan_error_strings,
693 .n_next_nodes = VXLAN_INPUT_N_NEXT,
695 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
696 foreach_vxlan_input_next
700 //temp .format_buffer = format_vxlan_header,
701 .format_trace = format_vxlan_rx_trace,
702 // $$$$ .unformat_buffer = unformat_vxlan_header,
705 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_input_node, vxlan4_input)
707 VLIB_REGISTER_NODE (vxlan6_input_node) = {
708 .function = vxlan6_input,
709 .name = "vxlan6-input",
710 /* Takes a vector of packets. */
711 .vector_size = sizeof (u32),
713 .n_errors = VXLAN_N_ERROR,
714 .error_strings = vxlan_error_strings,
716 .n_next_nodes = VXLAN_INPUT_N_NEXT,
718 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
719 foreach_vxlan_input_next
723 //temp .format_buffer = format_vxlan_header,
724 .format_trace = format_vxlan_rx_trace,
725 // $$$$ .unformat_buffer = unformat_vxlan_header,
728 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_input_node, vxlan6_input)
732 IP_VXLAN_BYPASS_NEXT_DROP,
733 IP_VXLAN_BYPASS_NEXT_VXLAN,
734 IP_VXLAN_BYPASS_N_NEXT,
735 } ip_vxan_bypass_next_t;
738 ip_vxlan_bypass_inline (vlib_main_t * vm,
739 vlib_node_runtime_t * node,
740 vlib_frame_t * frame,
743 vxlan_main_t * vxm = &vxlan_main;
744 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
745 vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
746 ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
747 ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
749 from = vlib_frame_vector_args (frame);
750 n_left_from = frame->n_vectors;
751 next_index = node->cached_next_index;
753 if (node->flags & VLIB_NODE_FLAG_TRACE)
754 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
756 if (is_ip4) addr4.data_u32 = ~0;
757 else ip6_address_set_zero (&addr6);
759 while (n_left_from > 0)
761 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
763 while (n_left_from >= 4 && n_left_to_next >= 2)
765 vlib_buffer_t * b0, * b1;
766 ip4_header_t * ip40, * ip41;
767 ip6_header_t * ip60, * ip61;
768 udp_header_t * udp0, * udp1;
769 u32 bi0, ip_len0, udp_len0, flags0, next0;
770 u32 bi1, ip_len1, udp_len1, flags1, next1;
771 i32 len_diff0, len_diff1;
772 u8 error0, good_udp0, proto0;
773 u8 error1, good_udp1, proto1;
775 /* Prefetch next iteration. */
777 vlib_buffer_t * p2, * p3;
779 p2 = vlib_get_buffer (vm, from[2]);
780 p3 = vlib_get_buffer (vm, from[3]);
782 vlib_prefetch_buffer_header (p2, LOAD);
783 vlib_prefetch_buffer_header (p3, LOAD);
785 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
786 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
789 bi0 = to_next[0] = from[0];
790 bi1 = to_next[1] = from[1];
796 b0 = vlib_get_buffer (vm, bi0);
797 b1 = vlib_get_buffer (vm, bi1);
800 ip40 = vlib_buffer_get_current (b0);
801 ip41 = vlib_buffer_get_current (b1);
805 ip60 = vlib_buffer_get_current (b0);
806 ip61 = vlib_buffer_get_current (b1);
809 /* Setup packet for next IP feature */
810 vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
811 vnet_feature_next(vnet_buffer(b1)->sw_if_index[VLIB_RX], &next1, b1);
815 /* Treat IP frag packets as "experimental" protocol for now
816 until support of IP frag reassembly is implemented */
817 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
818 proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
822 proto0 = ip60->protocol;
823 proto1 = ip61->protocol;
826 /* Process packet 0 */
827 if (proto0 != IP_PROTOCOL_UDP)
828 goto exit0; /* not UDP packet */
831 udp0 = ip4_next_header (ip40);
833 udp0 = ip6_next_header (ip60);
835 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
836 goto exit0; /* not VXLAN packet */
838 /* Validate DIP against VTEPs*/
841 if (addr4.as_u32 != ip40->dst_address.as_u32)
843 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
844 goto exit0; /* no local VTEP for VXLAN packet */
845 addr4 = ip40->dst_address;
850 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
852 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
853 goto exit0; /* no local VTEP for VXLAN packet */
854 addr6 = ip60->dst_address;
859 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
861 /* Don't verify UDP checksum for packets with explicit zero checksum. */
862 good_udp0 |= udp0->checksum == 0;
864 /* Verify UDP length */
866 ip_len0 = clib_net_to_host_u16 (ip40->length);
868 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
869 udp_len0 = clib_net_to_host_u16 (udp0->length);
870 len_diff0 = ip_len0 - udp_len0;
872 /* Verify UDP checksum */
873 if (PREDICT_FALSE (!good_udp0))
875 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
878 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
880 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
882 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
888 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
889 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
893 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
894 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
898 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
899 b0->error = error0 ? error_node->errors[error0] : 0;
901 /* vxlan-input node expect current at VXLAN header */
903 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
905 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
908 /* Process packet 1 */
909 if (proto1 != IP_PROTOCOL_UDP)
910 goto exit1; /* not UDP packet */
913 udp1 = ip4_next_header (ip41);
915 udp1 = ip6_next_header (ip61);
917 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
918 goto exit1; /* not VXLAN packet */
920 /* Validate DIP against VTEPs*/
923 if (addr4.as_u32 != ip41->dst_address.as_u32)
925 if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
926 goto exit1; /* no local VTEP for VXLAN packet */
927 addr4 = ip41->dst_address;
932 if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
934 if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
935 goto exit1; /* no local VTEP for VXLAN packet */
936 addr6 = ip61->dst_address;
941 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
943 /* Don't verify UDP checksum for packets with explicit zero checksum. */
944 good_udp1 |= udp1->checksum == 0;
946 /* Verify UDP length */
948 ip_len1 = clib_net_to_host_u16 (ip41->length);
950 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
951 udp_len1 = clib_net_to_host_u16 (udp1->length);
952 len_diff1 = ip_len1 - udp_len1;
954 /* Verify UDP checksum */
955 if (PREDICT_FALSE (!good_udp1))
957 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
960 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
962 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
964 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
970 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
971 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
975 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
976 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
980 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
981 b1->error = error1 ? error_node->errors[error1] : 0;
983 /* vxlan-input node expect current at VXLAN header */
985 vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
987 vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
990 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
991 to_next, n_left_to_next,
992 bi0, bi1, next0, next1);
995 while (n_left_from > 0 && n_left_to_next > 0)
1000 udp_header_t * udp0;
1001 u32 bi0, ip_len0, udp_len0, flags0, next0;
1003 u8 error0, good_udp0, proto0;
1005 bi0 = to_next[0] = from[0];
1009 n_left_to_next -= 1;
1011 b0 = vlib_get_buffer (vm, bi0);
1013 ip40 = vlib_buffer_get_current (b0);
1015 ip60 = vlib_buffer_get_current (b0);
1017 /* Setup packet for next IP feature */
1018 vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
1021 /* Treat IP4 frag packets as "experimental" protocol for now
1022 until support of IP frag reassembly is implemented */
1023 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1025 proto0 = ip60->protocol;
1027 if (proto0 != IP_PROTOCOL_UDP)
1028 goto exit; /* not UDP packet */
1031 udp0 = ip4_next_header (ip40);
1033 udp0 = ip6_next_header (ip60);
1035 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
1036 goto exit; /* not VXLAN packet */
1038 /* Validate DIP against VTEPs*/
1041 if (addr4.as_u32 != ip40->dst_address.as_u32)
1043 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
1044 goto exit; /* no local VTEP for VXLAN packet */
1045 addr4 = ip40->dst_address;
1050 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1052 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
1053 goto exit; /* no local VTEP for VXLAN packet */
1054 addr6 = ip60->dst_address;
1059 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1061 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1062 good_udp0 |= udp0->checksum == 0;
1064 /* Verify UDP length */
1066 ip_len0 = clib_net_to_host_u16 (ip40->length);
1068 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1069 udp_len0 = clib_net_to_host_u16 (udp0->length);
1070 len_diff0 = ip_len0 - udp_len0;
1072 /* Verify UDP checksum */
1073 if (PREDICT_FALSE (!good_udp0))
1075 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1078 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1080 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1082 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1088 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1089 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1093 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1094 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1098 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1099 b0->error = error0 ? error_node->errors[error0] : 0;
1101 /* vxlan-input node expect current at VXLAN header */
1103 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1105 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1108 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1109 to_next, n_left_to_next,
1113 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1116 return frame->n_vectors;
1120 ip4_vxlan_bypass (vlib_main_t * vm,
1121 vlib_node_runtime_t * node,
1122 vlib_frame_t * frame)
1124 return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1127 VLIB_REGISTER_NODE (ip4_vxlan_bypass_node) = {
1128 .function = ip4_vxlan_bypass,
1129 .name = "ip4-vxlan-bypass",
1130 .vector_size = sizeof (u32),
1132 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1134 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1135 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
1138 .format_buffer = format_ip4_header,
1139 .format_trace = format_ip4_forward_next_trace,
1142 VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_bypass_node,ip4_vxlan_bypass)
1144 /* Dummy init function to get us linked in. */
1145 clib_error_t * ip4_vxlan_bypass_init (vlib_main_t * vm)
1148 VLIB_INIT_FUNCTION (ip4_vxlan_bypass_init);
1151 ip6_vxlan_bypass (vlib_main_t * vm,
1152 vlib_node_runtime_t * node,
1153 vlib_frame_t * frame)
1155 return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1158 VLIB_REGISTER_NODE (ip6_vxlan_bypass_node) = {
1159 .function = ip6_vxlan_bypass,
1160 .name = "ip6-vxlan-bypass",
1161 .vector_size = sizeof (u32),
1163 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1165 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1166 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-input",
1169 .format_buffer = format_ip6_header,
1170 .format_trace = format_ip6_forward_next_trace,
1173 VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_bypass_node,ip6_vxlan_bypass)
1175 /* Dummy init function to get us linked in. */
1176 clib_error_t * ip6_vxlan_bypass_init (vlib_main_t * vm)
1179 VLIB_INIT_FUNCTION (ip6_vxlan_bypass_init);