2 * decap.c: vxlan tunnel decap packet processing
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan/vxlan.h>
22 vlib_node_registration_t vxlan4_input_node;
23 vlib_node_registration_t vxlan6_input_node;
32 static u8 * format_vxlan_rx_trace (u8 * s, va_list * args)
34 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36 vxlan_rx_trace_t * t = va_arg (*args, vxlan_rx_trace_t *);
38 if (t->tunnel_index != ~0)
40 s = format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
41 t->tunnel_index, t->vni, t->next_index, t->error);
45 s = format (s, "VXLAN decap error - tunnel for vni %d does not exist",
52 validate_vxlan_fib (vlib_buffer_t *b, vxlan_tunnel_t *t, u32 is_ip4)
54 u32 fib_index, sw_if_index;
56 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
59 fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
60 vec_elt (ip4_main.fib_index_by_sw_if_index, sw_if_index) :
61 vnet_buffer (b)->sw_if_index[VLIB_TX];
63 fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
64 vec_elt (ip6_main.fib_index_by_sw_if_index, sw_if_index) :
65 vnet_buffer (b)->sw_if_index[VLIB_TX];
67 return (fib_index == t->encap_fib_index);
71 vxlan_input (vlib_main_t * vm,
72 vlib_node_runtime_t * node,
73 vlib_frame_t * from_frame,
76 u32 n_left_from, next_index, * from, * to_next;
77 vxlan_main_t * vxm = &vxlan_main;
78 vnet_main_t * vnm = vxm->vnet_main;
79 vnet_interface_main_t * im = &vnm->interface_main;
80 u32 last_tunnel_index = ~0;
81 vxlan4_tunnel_key_t last_key4;
82 vxlan6_tunnel_key_t last_key6;
83 u32 pkts_decapsulated = 0;
84 u32 thread_index = vlib_get_thread_index();
85 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
88 last_key4.as_u64 = ~0;
90 memset (&last_key6, 0xff, sizeof (last_key6));
92 from = vlib_frame_vector_args (from_frame);
93 n_left_from = from_frame->n_vectors;
95 next_index = node->cached_next_index;
96 stats_sw_if_index = node->runtime_data[0];
97 stats_n_packets = stats_n_bytes = 0;
99 while (n_left_from > 0)
103 vlib_get_next_frame (vm, node, next_index,
104 to_next, n_left_to_next);
105 while (n_left_from >= 4 && n_left_to_next >= 2)
108 vlib_buffer_t * b0, * b1;
110 ip4_header_t * ip4_0, * ip4_1;
111 ip6_header_t * ip6_0, * ip6_1;
112 vxlan_header_t * vxlan0, * vxlan1;
114 u32 tunnel_index0, tunnel_index1;
115 vxlan_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
116 vxlan4_tunnel_key_t key4_0, key4_1;
117 vxlan6_tunnel_key_t key6_0, key6_1;
119 u32 sw_if_index0, sw_if_index1, len0, len1;
121 /* Prefetch next iteration. */
123 vlib_buffer_t * p2, * p3;
125 p2 = vlib_get_buffer (vm, from[2]);
126 p3 = vlib_get_buffer (vm, from[3]);
128 vlib_prefetch_buffer_header (p2, LOAD);
129 vlib_prefetch_buffer_header (p3, LOAD);
131 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
132 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
144 b0 = vlib_get_buffer (vm, bi0);
145 b1 = vlib_get_buffer (vm, bi1);
147 /* udp leaves current_data pointing at the vxlan header */
148 vxlan0 = vlib_buffer_get_current (b0);
149 vxlan1 = vlib_buffer_get_current (b1);
152 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
154 (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
155 ip4_0 = vlib_buffer_get_current (b0);
156 ip4_1 = vlib_buffer_get_current (b1);
159 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
161 (b1, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
162 ip6_0 = vlib_buffer_get_current (b0);
163 ip6_1 = vlib_buffer_get_current (b1);
166 /* pop (ip, udp, vxlan) */
169 (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
171 (b1, sizeof(*ip4_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
174 (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
176 (b1, sizeof(*ip6_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
185 if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
187 error0 = VXLAN_ERROR_BAD_FLAGS;
188 next0 = VXLAN_INPUT_NEXT_DROP;
193 key4_0.src = ip4_0->src_address.as_u32;
194 key4_0.vni = vxlan0->vni_reserved;
196 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
197 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
199 p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
200 if (PREDICT_FALSE (p0 == NULL))
202 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
203 next0 = VXLAN_INPUT_NEXT_DROP;
206 last_key4.as_u64 = key4_0.as_u64;
207 tunnel_index0 = last_tunnel_index = p0[0];
210 tunnel_index0 = last_tunnel_index;
211 t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
213 /* Validate VXLAN tunnel encap-fib index agaist packet */
214 if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
216 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
217 next0 = VXLAN_INPUT_NEXT_DROP;
221 /* Validate VXLAN tunnel SIP against packet DIP */
222 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
223 goto next0; /* valid packet */
224 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
226 key4_0.src = ip4_0->dst_address.as_u32;
227 key4_0.vni = vxlan0->vni_reserved;
228 /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
229 p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
230 if (PREDICT_TRUE (p0 != NULL))
232 mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
233 goto next0; /* valid packet */
236 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
237 next0 = VXLAN_INPUT_NEXT_DROP;
240 } else /* !is_ip4 */ {
241 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
242 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
243 key6_0.vni = vxlan0->vni_reserved;
245 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
246 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
248 p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
249 if (PREDICT_FALSE (p0 == NULL))
251 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
252 next0 = VXLAN_INPUT_NEXT_DROP;
255 clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
256 tunnel_index0 = last_tunnel_index = p0[0];
259 tunnel_index0 = last_tunnel_index;
260 t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
262 /* Validate VXLAN tunnel encap-fib index agaist packet */
263 if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
265 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
266 next0 = VXLAN_INPUT_NEXT_DROP;
270 /* Validate VXLAN tunnel SIP against packet DIP */
271 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
273 goto next0; /* valid packet */
274 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
276 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
277 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
278 key6_0.vni = vxlan0->vni_reserved;
279 p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
280 if (PREDICT_TRUE (p0 != NULL))
282 mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
283 goto next0; /* valid packet */
286 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
287 next0 = VXLAN_INPUT_NEXT_DROP;
292 next0 = t0->decap_next_index;
293 sw_if_index0 = t0->sw_if_index;
294 len0 = vlib_buffer_length_in_chain (vm, b0);
296 /* Required to make the l2 tag push / pop code work on l2 subifs */
297 if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
298 vnet_update_l2_len (b0);
300 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
301 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
302 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
304 pkts_decapsulated ++;
305 stats_n_packets += 1;
306 stats_n_bytes += len0;
308 /* Batch stats increment on the same vxlan tunnel so counter
309 is not incremented per packet */
310 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
312 stats_n_packets -= 1;
313 stats_n_bytes -= len0;
315 vlib_increment_combined_counter
316 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
317 thread_index, stats_sw_if_index,
318 stats_n_packets, stats_n_bytes);
320 stats_n_bytes = len0;
321 stats_sw_if_index = sw_if_index0;
325 b0->error = error0 ? node->errors[error0] : 0;
327 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
330 = vlib_add_trace (vm, node, b0, sizeof (*tr));
331 tr->next_index = next0;
333 tr->tunnel_index = tunnel_index0;
334 tr->vni = vnet_get_vni (vxlan0);
337 if (PREDICT_FALSE (vxlan1->flags != VXLAN_FLAGS_I))
339 error1 = VXLAN_ERROR_BAD_FLAGS;
340 next1 = VXLAN_INPUT_NEXT_DROP;
345 key4_1.src = ip4_1->src_address.as_u32;
346 key4_1.vni = vxlan1->vni_reserved;
348 /* Make sure unicast VXLAN tunnel exist by packet SIP and VNI */
349 if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
351 p1 = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
352 if (PREDICT_FALSE (p1 == NULL))
354 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
355 next1 = VXLAN_INPUT_NEXT_DROP;
358 last_key4.as_u64 = key4_1.as_u64;
359 tunnel_index1 = last_tunnel_index = p1[0];
362 tunnel_index1 = last_tunnel_index;
363 t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
365 /* Validate VXLAN tunnel encap-fib index agaist packet */
366 if (PREDICT_FALSE (validate_vxlan_fib (b1, t1, is_ip4) == 0))
368 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
369 next1 = VXLAN_INPUT_NEXT_DROP;
373 /* Validate VXLAN tunnel SIP against packet DIP */
374 if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
375 goto next1; /* valid packet */
376 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_1->dst_address)))
378 key4_1.src = ip4_1->dst_address.as_u32;
379 key4_1.vni = vxlan1->vni_reserved;
380 /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
381 p1 = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
382 if (PREDICT_TRUE (p1 != NULL))
384 mt1 = pool_elt_at_index (vxm->tunnels, p1[0]);
385 goto next1; /* valid packet */
388 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
389 next1 = VXLAN_INPUT_NEXT_DROP;
392 } else /* !is_ip4 */ {
393 key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
394 key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
395 key6_1.vni = vxlan1->vni_reserved;
397 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
398 if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
400 p1 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_1);
402 if (PREDICT_FALSE (p1 == NULL))
404 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
405 next1 = VXLAN_INPUT_NEXT_DROP;
409 clib_memcpy (&last_key6, &key6_1, sizeof(key6_1));
410 tunnel_index1 = last_tunnel_index = p1[0];
413 tunnel_index1 = last_tunnel_index;
414 t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
416 /* Validate VXLAN tunnel encap-fib index agaist packet */
417 if (PREDICT_FALSE (validate_vxlan_fib (b1, t1, is_ip4) == 0))
419 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
420 next1 = VXLAN_INPUT_NEXT_DROP;
424 /* Validate VXLAN tunnel SIP against packet DIP */
425 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_1->dst_address,
427 goto next1; /* valid packet */
428 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_1->dst_address)))
430 key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
431 key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
432 key6_1.vni = vxlan1->vni_reserved;
433 p1 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_1);
434 if (PREDICT_TRUE (p1 != NULL))
436 mt1 = pool_elt_at_index (vxm->tunnels, p1[0]);
437 goto next1; /* valid packet */
440 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
441 next1 = VXLAN_INPUT_NEXT_DROP;
446 next1 = t1->decap_next_index;
447 sw_if_index1 = t1->sw_if_index;
448 len1 = vlib_buffer_length_in_chain (vm, b1);
450 /* Required to make the l2 tag push / pop code work on l2 subifs */
451 if (PREDICT_TRUE(next1 == VXLAN_INPUT_NEXT_L2_INPUT))
452 vnet_update_l2_len (b1);
454 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
455 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
456 sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
458 pkts_decapsulated ++;
459 stats_n_packets += 1;
460 stats_n_bytes += len1;
462 /* Batch stats increment on the same vxlan tunnel so counter
463 is not incremented per packet */
464 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
466 stats_n_packets -= 1;
467 stats_n_bytes -= len1;
469 vlib_increment_combined_counter
470 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
471 thread_index, stats_sw_if_index,
472 stats_n_packets, stats_n_bytes);
474 stats_n_bytes = len1;
475 stats_sw_if_index = sw_if_index1;
479 b1->error = error1 ? node->errors[error1] : 0;
481 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
484 = vlib_add_trace (vm, node, b1, sizeof (*tr));
485 tr->next_index = next1;
487 tr->tunnel_index = tunnel_index1;
488 tr->vni = vnet_get_vni (vxlan1);
491 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
492 to_next, n_left_to_next,
493 bi0, bi1, next0, next1);
496 while (n_left_from > 0 && n_left_to_next > 0)
501 ip4_header_t * ip4_0;
502 ip6_header_t * ip6_0;
503 vxlan_header_t * vxlan0;
506 vxlan_tunnel_t * t0, * mt0 = NULL;
507 vxlan4_tunnel_key_t key4_0;
508 vxlan6_tunnel_key_t key6_0;
510 u32 sw_if_index0, len0;
519 b0 = vlib_get_buffer (vm, bi0);
521 /* udp leaves current_data pointing at the vxlan header */
522 vxlan0 = vlib_buffer_get_current (b0);
525 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
526 ip4_0 = vlib_buffer_get_current (b0);
529 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
530 ip6_0 = vlib_buffer_get_current (b0);
533 /* pop (ip, udp, vxlan) */
536 (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
539 (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
545 if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
547 error0 = VXLAN_ERROR_BAD_FLAGS;
548 next0 = VXLAN_INPUT_NEXT_DROP;
553 key4_0.src = ip4_0->src_address.as_u32;
554 key4_0.vni = vxlan0->vni_reserved;
556 /* Make sure unicast VXLAN tunnel exist by packet SIP and VNI */
557 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
559 p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
560 if (PREDICT_FALSE (p0 == NULL))
562 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
563 next0 = VXLAN_INPUT_NEXT_DROP;
566 last_key4.as_u64 = key4_0.as_u64;
567 tunnel_index0 = last_tunnel_index = p0[0];
570 tunnel_index0 = last_tunnel_index;
571 t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
573 /* Validate VXLAN tunnel encap-fib index agaist packet */
574 if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
576 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
577 next0 = VXLAN_INPUT_NEXT_DROP;
581 /* Validate VXLAN tunnel SIP against packet DIP */
582 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
583 goto next00; /* valid packet */
584 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
586 key4_0.src = ip4_0->dst_address.as_u32;
587 key4_0.vni = vxlan0->vni_reserved;
588 /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
589 p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
590 if (PREDICT_TRUE (p0 != NULL))
592 mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
593 goto next00; /* valid packet */
596 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
597 next0 = VXLAN_INPUT_NEXT_DROP;
600 } else /* !is_ip4 */ {
601 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
602 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
603 key6_0.vni = vxlan0->vni_reserved;
605 /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
606 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
608 p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
609 if (PREDICT_FALSE (p0 == NULL))
611 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
612 next0 = VXLAN_INPUT_NEXT_DROP;
615 clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
616 tunnel_index0 = last_tunnel_index = p0[0];
619 tunnel_index0 = last_tunnel_index;
620 t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
622 /* Validate VXLAN tunnel encap-fib index agaist packet */
623 if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
625 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
626 next0 = VXLAN_INPUT_NEXT_DROP;
630 /* Validate VXLAN tunnel SIP against packet DIP */
631 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
633 goto next00; /* valid packet */
634 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
636 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
637 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
638 key6_0.vni = vxlan0->vni_reserved;
639 p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
640 if (PREDICT_TRUE (p0 != NULL))
642 mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
643 goto next00; /* valid packet */
646 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
647 next0 = VXLAN_INPUT_NEXT_DROP;
652 next0 = t0->decap_next_index;
653 sw_if_index0 = t0->sw_if_index;
654 len0 = vlib_buffer_length_in_chain (vm, b0);
656 /* Required to make the l2 tag push / pop code work on l2 subifs */
657 if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
658 vnet_update_l2_len (b0);
660 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
661 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
662 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
664 pkts_decapsulated ++;
665 stats_n_packets += 1;
666 stats_n_bytes += len0;
668 /* Batch stats increment on the same vxlan tunnel so counter
669 is not incremented per packet */
670 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
672 stats_n_packets -= 1;
673 stats_n_bytes -= len0;
675 vlib_increment_combined_counter
676 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
677 thread_index, stats_sw_if_index,
678 stats_n_packets, stats_n_bytes);
680 stats_n_bytes = len0;
681 stats_sw_if_index = sw_if_index0;
685 b0->error = error0 ? node->errors[error0] : 0;
687 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
690 = vlib_add_trace (vm, node, b0, sizeof (*tr));
691 tr->next_index = next0;
693 tr->tunnel_index = tunnel_index0;
694 tr->vni = vnet_get_vni (vxlan0);
696 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
697 to_next, n_left_to_next,
701 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
703 /* Do we still need this now that tunnel tx stats is kept? */
704 vlib_node_increment_counter (vm, is_ip4?
705 vxlan4_input_node.index:vxlan6_input_node.index,
706 VXLAN_ERROR_DECAPSULATED,
709 /* Increment any remaining batch stats */
712 vlib_increment_combined_counter
713 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
714 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
715 node->runtime_data[0] = stats_sw_if_index;
718 return from_frame->n_vectors;
722 vxlan4_input (vlib_main_t * vm,
723 vlib_node_runtime_t * node,
724 vlib_frame_t * from_frame)
726 return vxlan_input(vm, node, from_frame, /* is_ip4 */ 1);
730 vxlan6_input (vlib_main_t * vm,
731 vlib_node_runtime_t * node,
732 vlib_frame_t * from_frame)
734 return vxlan_input(vm, node, from_frame, /* is_ip4 */ 0);
737 static char * vxlan_error_strings[] = {
738 #define vxlan_error(n,s) s,
739 #include <vnet/vxlan/vxlan_error.def>
744 VLIB_REGISTER_NODE (vxlan4_input_node) = {
745 .function = vxlan4_input,
746 .name = "vxlan4-input",
747 /* Takes a vector of packets. */
748 .vector_size = sizeof (u32),
750 .n_errors = VXLAN_N_ERROR,
751 .error_strings = vxlan_error_strings,
753 .n_next_nodes = VXLAN_INPUT_N_NEXT,
755 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
756 foreach_vxlan_input_next
760 //temp .format_buffer = format_vxlan_header,
761 .format_trace = format_vxlan_rx_trace,
762 // $$$$ .unformat_buffer = unformat_vxlan_header,
765 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_input_node, vxlan4_input)
767 VLIB_REGISTER_NODE (vxlan6_input_node) = {
768 .function = vxlan6_input,
769 .name = "vxlan6-input",
770 /* Takes a vector of packets. */
771 .vector_size = sizeof (u32),
773 .n_errors = VXLAN_N_ERROR,
774 .error_strings = vxlan_error_strings,
776 .n_next_nodes = VXLAN_INPUT_N_NEXT,
778 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
779 foreach_vxlan_input_next
783 //temp .format_buffer = format_vxlan_header,
784 .format_trace = format_vxlan_rx_trace,
785 // $$$$ .unformat_buffer = unformat_vxlan_header,
788 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_input_node, vxlan6_input)
792 IP_VXLAN_BYPASS_NEXT_DROP,
793 IP_VXLAN_BYPASS_NEXT_VXLAN,
794 IP_VXLAN_BYPASS_N_NEXT,
795 } ip_vxan_bypass_next_t;
798 ip_vxlan_bypass_inline (vlib_main_t * vm,
799 vlib_node_runtime_t * node,
800 vlib_frame_t * frame,
803 vxlan_main_t * vxm = &vxlan_main;
804 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
805 vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
806 ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
807 ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
809 from = vlib_frame_vector_args (frame);
810 n_left_from = frame->n_vectors;
811 next_index = node->cached_next_index;
813 if (node->flags & VLIB_NODE_FLAG_TRACE)
814 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
816 if (is_ip4) addr4.data_u32 = ~0;
817 else ip6_address_set_zero (&addr6);
819 while (n_left_from > 0)
821 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
823 while (n_left_from >= 4 && n_left_to_next >= 2)
825 vlib_buffer_t * b0, * b1;
826 ip4_header_t * ip40, * ip41;
827 ip6_header_t * ip60, * ip61;
828 udp_header_t * udp0, * udp1;
829 u32 bi0, ip_len0, udp_len0, flags0, next0;
830 u32 bi1, ip_len1, udp_len1, flags1, next1;
831 i32 len_diff0, len_diff1;
832 u8 error0, good_udp0, proto0;
833 u8 error1, good_udp1, proto1;
835 /* Prefetch next iteration. */
837 vlib_buffer_t * p2, * p3;
839 p2 = vlib_get_buffer (vm, from[2]);
840 p3 = vlib_get_buffer (vm, from[3]);
842 vlib_prefetch_buffer_header (p2, LOAD);
843 vlib_prefetch_buffer_header (p3, LOAD);
845 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
846 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
849 bi0 = to_next[0] = from[0];
850 bi1 = to_next[1] = from[1];
856 b0 = vlib_get_buffer (vm, bi0);
857 b1 = vlib_get_buffer (vm, bi1);
860 ip40 = vlib_buffer_get_current (b0);
861 ip41 = vlib_buffer_get_current (b1);
865 ip60 = vlib_buffer_get_current (b0);
866 ip61 = vlib_buffer_get_current (b1);
869 /* Setup packet for next IP feature */
870 vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
871 vnet_feature_next(vnet_buffer(b1)->sw_if_index[VLIB_RX], &next1, b1);
875 /* Treat IP frag packets as "experimental" protocol for now
876 until support of IP frag reassembly is implemented */
877 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
878 proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
882 proto0 = ip60->protocol;
883 proto1 = ip61->protocol;
886 /* Process packet 0 */
887 if (proto0 != IP_PROTOCOL_UDP)
888 goto exit0; /* not UDP packet */
891 udp0 = ip4_next_header (ip40);
893 udp0 = ip6_next_header (ip60);
895 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
896 goto exit0; /* not VXLAN packet */
898 /* Validate DIP against VTEPs*/
901 if (addr4.as_u32 != ip40->dst_address.as_u32)
903 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
904 goto exit0; /* no local VTEP for VXLAN packet */
905 addr4 = ip40->dst_address;
910 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
912 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
913 goto exit0; /* no local VTEP for VXLAN packet */
914 addr6 = ip60->dst_address;
919 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
921 /* Don't verify UDP checksum for packets with explicit zero checksum. */
922 good_udp0 |= udp0->checksum == 0;
924 /* Verify UDP length */
926 ip_len0 = clib_net_to_host_u16 (ip40->length);
928 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
929 udp_len0 = clib_net_to_host_u16 (udp0->length);
930 len_diff0 = ip_len0 - udp_len0;
932 /* Verify UDP checksum */
933 if (PREDICT_FALSE (!good_udp0))
935 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
938 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
940 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
942 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
948 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
949 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
953 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
954 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
958 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
959 b0->error = error0 ? error_node->errors[error0] : 0;
961 /* vxlan-input node expect current at VXLAN header */
963 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
965 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
968 /* Process packet 1 */
969 if (proto1 != IP_PROTOCOL_UDP)
970 goto exit1; /* not UDP packet */
973 udp1 = ip4_next_header (ip41);
975 udp1 = ip6_next_header (ip61);
977 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
978 goto exit1; /* not VXLAN packet */
980 /* Validate DIP against VTEPs*/
983 if (addr4.as_u32 != ip41->dst_address.as_u32)
985 if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
986 goto exit1; /* no local VTEP for VXLAN packet */
987 addr4 = ip41->dst_address;
992 if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
994 if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
995 goto exit1; /* no local VTEP for VXLAN packet */
996 addr6 = ip61->dst_address;
1001 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1003 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1004 good_udp1 |= udp1->checksum == 0;
1006 /* Verify UDP length */
1008 ip_len1 = clib_net_to_host_u16 (ip41->length);
1010 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1011 udp_len1 = clib_net_to_host_u16 (udp1->length);
1012 len_diff1 = ip_len1 - udp_len1;
1014 /* Verify UDP checksum */
1015 if (PREDICT_FALSE (!good_udp1))
1017 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1020 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1022 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1024 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1030 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1031 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1035 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1036 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1040 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1041 b1->error = error1 ? error_node->errors[error1] : 0;
1043 /* vxlan-input node expect current at VXLAN header */
1045 vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1047 vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1050 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1051 to_next, n_left_to_next,
1052 bi0, bi1, next0, next1);
1055 while (n_left_from > 0 && n_left_to_next > 0)
1058 ip4_header_t * ip40;
1059 ip6_header_t * ip60;
1060 udp_header_t * udp0;
1061 u32 bi0, ip_len0, udp_len0, flags0, next0;
1063 u8 error0, good_udp0, proto0;
1065 bi0 = to_next[0] = from[0];
1069 n_left_to_next -= 1;
1071 b0 = vlib_get_buffer (vm, bi0);
1073 ip40 = vlib_buffer_get_current (b0);
1075 ip60 = vlib_buffer_get_current (b0);
1077 /* Setup packet for next IP feature */
1078 vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
1081 /* Treat IP4 frag packets as "experimental" protocol for now
1082 until support of IP frag reassembly is implemented */
1083 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1085 proto0 = ip60->protocol;
1087 if (proto0 != IP_PROTOCOL_UDP)
1088 goto exit; /* not UDP packet */
1091 udp0 = ip4_next_header (ip40);
1093 udp0 = ip6_next_header (ip60);
1095 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
1096 goto exit; /* not VXLAN packet */
1098 /* Validate DIP against VTEPs*/
1101 if (addr4.as_u32 != ip40->dst_address.as_u32)
1103 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
1104 goto exit; /* no local VTEP for VXLAN packet */
1105 addr4 = ip40->dst_address;
1110 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1112 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
1113 goto exit; /* no local VTEP for VXLAN packet */
1114 addr6 = ip60->dst_address;
1119 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1121 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1122 good_udp0 |= udp0->checksum == 0;
1124 /* Verify UDP length */
1126 ip_len0 = clib_net_to_host_u16 (ip40->length);
1128 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1129 udp_len0 = clib_net_to_host_u16 (udp0->length);
1130 len_diff0 = ip_len0 - udp_len0;
1132 /* Verify UDP checksum */
1133 if (PREDICT_FALSE (!good_udp0))
1135 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1138 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1140 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1142 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1148 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1149 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1153 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1154 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1158 IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1159 b0->error = error0 ? error_node->errors[error0] : 0;
1161 /* vxlan-input node expect current at VXLAN header */
1163 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1165 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1168 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1169 to_next, n_left_to_next,
1173 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1176 return frame->n_vectors;
1180 ip4_vxlan_bypass (vlib_main_t * vm,
1181 vlib_node_runtime_t * node,
1182 vlib_frame_t * frame)
1184 return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1187 VLIB_REGISTER_NODE (ip4_vxlan_bypass_node) = {
1188 .function = ip4_vxlan_bypass,
1189 .name = "ip4-vxlan-bypass",
1190 .vector_size = sizeof (u32),
1192 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1194 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1195 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
1198 .format_buffer = format_ip4_header,
1199 .format_trace = format_ip4_forward_next_trace,
1202 VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_bypass_node,ip4_vxlan_bypass)
1204 /* Dummy init function to get us linked in. */
1205 clib_error_t * ip4_vxlan_bypass_init (vlib_main_t * vm)
1208 VLIB_INIT_FUNCTION (ip4_vxlan_bypass_init);
1211 ip6_vxlan_bypass (vlib_main_t * vm,
1212 vlib_node_runtime_t * node,
1213 vlib_frame_t * frame)
1215 return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1218 VLIB_REGISTER_NODE (ip6_vxlan_bypass_node) = {
1219 .function = ip6_vxlan_bypass,
1220 .name = "ip6-vxlan-bypass",
1221 .vector_size = sizeof (u32),
1223 .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1225 [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1226 [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-input",
1229 .format_buffer = format_ip6_header,
1230 .format_trace = format_ip6_forward_next_trace,
1233 VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_bypass_node,ip6_vxlan_bypass)
1235 /* Dummy init function to get us linked in. */
1236 clib_error_t * ip6_vxlan_bypass_init (vlib_main_t * vm)
1239 VLIB_INIT_FUNCTION (ip6_vxlan_bypass_init);