2 * decap.c: vxlan gbp tunnel decap packet processing
4 * Copyright (c) 2018 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan-gbp/vxlan_gbp.h>
22 vlib_node_registration_t vxlan4_gbp_input_node;
23 vlib_node_registration_t vxlan6_gbp_input_node;
33 } vxlan_gbp_rx_trace_t;
36 format_vxlan_gbp_rx_trace (u8 * s, va_list * args)
38 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
39 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
40 vxlan_gbp_rx_trace_t *t = va_arg (*args, vxlan_gbp_rx_trace_t *);
42 if (t->tunnel_index == ~0)
44 "VXLAN_GBP decap error - tunnel for vni %d does not exist",
47 "VXLAN_GBP decap from vxlan_gbp_tunnel%d vni %d sclass %d"
48 " flags %U next %d error %d",
49 t->tunnel_index, t->vni, t->sclass,
50 format_vxlan_gbp_header_gpflags, t->flags,
51 t->next_index, t->error);
55 buf_fib_index (vlib_buffer_t * b, u32 is_ip4)
57 u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
58 if (sw_if_index != (u32) ~ 0)
61 u32 *fib_index_by_sw_if_index = is_ip4 ?
62 ip4_main.fib_index_by_sw_if_index : ip6_main.fib_index_by_sw_if_index;
63 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
65 return vec_elt (fib_index_by_sw_if_index, sw_if_index);
68 typedef vxlan4_gbp_tunnel_key_t last_tunnel_cache4;
70 always_inline vxlan_gbp_tunnel_t *
71 vxlan4_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache4 * cache,
72 u32 fib_index, ip4_header_t * ip4_0,
73 vxlan_gbp_header_t * vxlan_gbp0)
76 * Check unicast first since that's where most of the traffic comes from
77 * Make sure VXLAN_GBP tunnel exist according to packet SIP, DIP and VNI
79 vxlan4_gbp_tunnel_key_t key4;
82 key4.key[1] = ((u64) fib_index << 32) | vxlan_gbp0->vni_reserved;
83 key4.key[0] = (((u64) ip4_0->dst_address.as_u32 << 32) |
84 ip4_0->src_address.as_u32);
86 if (PREDICT_FALSE (key4.key[0] != cache->key[0] ||
87 key4.key[1] != cache->key[1]))
89 rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key,
91 if (PREDICT_FALSE (rv == 0))
94 return (pool_elt_at_index (vxm->tunnels, cache->value));
99 return (pool_elt_at_index (vxm->tunnels, cache->value));
102 /* No unicast match - try multicast */
103 if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address)))
106 key4.key[0] = ip4_0->dst_address.as_u32;
107 /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */
108 rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, &key4);
110 if (PREDICT_FALSE (rv != 0))
113 return (pool_elt_at_index (vxm->tunnels, key4.value));
116 typedef vxlan6_gbp_tunnel_key_t last_tunnel_cache6;
118 always_inline vxlan_gbp_tunnel_t *
119 vxlan6_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache6 * cache,
120 u32 fib_index, ip6_header_t * ip6_0,
121 vxlan_gbp_header_t * vxlan_gbp0)
123 /* Make sure VXLAN_GBP tunnel exist according to packet SIP and VNI */
124 vxlan6_gbp_tunnel_key_t key6 = {
126 [0] = ip6_0->src_address.as_u64[0],
127 [1] = ip6_0->src_address.as_u64[1],
128 [2] = (((u64) fib_index) << 32) | vxlan_gbp0->vni_reserved,
134 (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0))
136 rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key,
138 if (PREDICT_FALSE (rv != 0))
143 vxlan_gbp_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);
145 /* Validate VXLAN_GBP tunnel SIP against packet DIP */
147 (!ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
150 if (PREDICT_TRUE (!ip6_address_is_multicast (&ip6_0->dst_address)))
153 /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */
154 key6.key[0] = ip6_0->dst_address.as_u64[0];
155 key6.key[1] = ip6_0->dst_address.as_u64[1];
156 rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key,
158 if (PREDICT_FALSE (rv != 0))
166 always_inline vxlan_gbp_input_next_t
167 vxlan_gbp_tunnel_get_next (const vxlan_gbp_tunnel_t * t, vlib_buffer_t * b0)
169 if (VXLAN_GBP_TUNNEL_MODE_L2 == t->mode)
170 return (VXLAN_GBP_INPUT_NEXT_L2_INPUT);
173 ethernet_header_t *e0;
176 e0 = vlib_buffer_get_current (b0);
177 vlib_buffer_advance (b0, sizeof (*e0));
178 type0 = clib_net_to_host_u16 (e0->type);
181 case ETHERNET_TYPE_IP4:
182 return (VXLAN_GBP_INPUT_NEXT_IP4_INPUT);
183 case ETHERNET_TYPE_IP6:
184 return (VXLAN_GBP_INPUT_NEXT_IP6_INPUT);
187 return (VXLAN_GBP_INPUT_NEXT_DROP);
191 vxlan_gbp_input (vlib_main_t * vm,
192 vlib_node_runtime_t * node,
193 vlib_frame_t * from_frame, u8 is_ip4)
195 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
196 vnet_main_t *vnm = vxm->vnet_main;
197 vnet_interface_main_t *im = &vnm->interface_main;
198 vlib_combined_counter_main_t *rx_counter =
199 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
200 vlib_combined_counter_main_t *drop_counter =
201 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
202 last_tunnel_cache4 last4;
203 last_tunnel_cache6 last6;
204 u32 pkts_decapsulated = 0;
205 u32 thread_index = vlib_get_thread_index ();
208 clib_memset (&last4, 0xff, sizeof last4);
210 clib_memset (&last6, 0xff, sizeof last6);
212 u32 next_index = node->cached_next_index;
214 u32 *from = vlib_frame_vector_args (from_frame);
215 u32 n_left_from = from_frame->n_vectors;
217 while (n_left_from > 0)
219 u32 *to_next, n_left_to_next;
220 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
222 while (n_left_from >= 4 && n_left_to_next >= 2)
224 /* Prefetch next iteration. */
226 vlib_buffer_t *p2, *p3;
228 p2 = vlib_get_buffer (vm, from[2]);
229 p3 = vlib_get_buffer (vm, from[3]);
231 vlib_prefetch_buffer_header (p2, LOAD);
232 vlib_prefetch_buffer_header (p3, LOAD);
234 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
235 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
238 u32 bi0 = to_next[0] = from[0];
239 u32 bi1 = to_next[1] = from[1];
245 vlib_buffer_t *b0, *b1;
246 b0 = vlib_get_buffer (vm, bi0);
247 b1 = vlib_get_buffer (vm, bi1);
249 /* udp leaves current_data pointing at the vxlan_gbp header */
250 void *cur0 = vlib_buffer_get_current (b0);
251 void *cur1 = vlib_buffer_get_current (b1);
252 vxlan_gbp_header_t *vxlan_gbp0 = cur0;
253 vxlan_gbp_header_t *vxlan_gbp1 = cur1;
255 ip4_header_t *ip4_0, *ip4_1;
256 ip6_header_t *ip6_0, *ip6_1;
259 ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
260 ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t);
264 ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
265 ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
268 u32 fi0 = buf_fib_index (b0, is_ip4);
269 u32 fi1 = buf_fib_index (b1, is_ip4);
271 vxlan_gbp_tunnel_t *t0, *t1;
275 vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0);
277 vxlan4_gbp_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan_gbp1);
282 vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0);
284 vxlan6_gbp_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan_gbp1);
287 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
288 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
290 vxlan_gbp_input_next_t next0, next1;
291 u8 error0 = 0, error1 = 0;
292 u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
293 u8 flags1 = vxlan_gbp_get_flags (vxlan_gbp1);
294 /* Required to make the l2 tag push / pop code work on l2 subifs */
296 vlib_buffer_advance (b0, sizeof *vxlan_gbp0);
297 vlib_buffer_advance (b1, sizeof *vxlan_gbp1);
299 /* Validate VXLAN_GBP tunnel encap-fib index against packet */
302 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
305 && flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
307 error0 = VXLAN_GBP_ERROR_BAD_FLAGS;
308 vlib_increment_combined_counter
309 (drop_counter, thread_index, t0->sw_if_index, 1, len0);
310 next0 = VXLAN_GBP_INPUT_NEXT_DROP;
314 error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
315 next0 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
317 b0->error = node->errors[error0];
321 next0 = vxlan_gbp_tunnel_get_next (t0, b0);
323 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
324 vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
325 vlib_increment_combined_counter
326 (rx_counter, thread_index, t0->sw_if_index, 1, len0);
330 vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
331 vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
335 (t1 == 0 || flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
338 && flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
340 error1 = VXLAN_GBP_ERROR_BAD_FLAGS;
341 vlib_increment_combined_counter
342 (drop_counter, thread_index, t1->sw_if_index, 1, len1);
343 next1 = VXLAN_GBP_INPUT_NEXT_DROP;
347 error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
348 next1 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
350 b1->error = node->errors[error1];
354 next1 = vxlan_gbp_tunnel_get_next (t1, b1);
356 /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
357 vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
360 vlib_increment_combined_counter
361 (rx_counter, thread_index, t1->sw_if_index, 1, len1);
364 vnet_buffer2 (b1)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp1);
365 vnet_buffer2 (b1)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
367 vnet_update_l2_len (b0);
368 vnet_update_l2_len (b1);
370 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
372 vxlan_gbp_rx_trace_t *tr =
373 vlib_add_trace (vm, node, b0, sizeof (*tr));
374 tr->next_index = next0;
376 tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
377 tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
378 tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
379 tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
381 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
383 vxlan_gbp_rx_trace_t *tr =
384 vlib_add_trace (vm, node, b1, sizeof (*tr));
385 tr->next_index = next1;
387 tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels;
388 tr->vni = vxlan_gbp_get_vni (vxlan_gbp1);
389 tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
390 tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp1);
393 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
394 to_next, n_left_to_next,
395 bi0, bi1, next0, next1);
398 while (n_left_from > 0 && n_left_to_next > 0)
400 u32 bi0 = to_next[0] = from[0];
406 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
408 /* udp leaves current_data pointing at the vxlan_gbp header */
409 void *cur0 = vlib_buffer_get_current (b0);
410 vxlan_gbp_header_t *vxlan_gbp0 = cur0;
414 ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
416 ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
418 u32 fi0 = buf_fib_index (b0, is_ip4);
420 vxlan_gbp_tunnel_t *t0;
422 t0 = vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0);
424 t0 = vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0);
426 uword len0 = vlib_buffer_length_in_chain (vm, b0);
428 vxlan_gbp_input_next_t next0;
430 u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
432 /* pop (ip, udp, vxlan_gbp) */
433 vlib_buffer_advance (b0, sizeof (*vxlan_gbp0));
434 /* Validate VXLAN_GBP tunnel encap-fib index against packet */
437 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
440 && flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
442 error0 = VXLAN_GBP_ERROR_BAD_FLAGS;
443 vlib_increment_combined_counter
444 (drop_counter, thread_index, t0->sw_if_index, 1, len0);
445 next0 = VXLAN_GBP_INPUT_NEXT_DROP;
449 error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
450 next0 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
452 b0->error = node->errors[error0];
456 next0 = vxlan_gbp_tunnel_get_next (t0, b0);
457 /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
458 vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
461 vlib_increment_combined_counter
462 (rx_counter, thread_index, t0->sw_if_index, 1, len0);
464 vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
465 vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
467 /* Required to make the l2 tag push / pop code work on l2 subifs */
468 vnet_update_l2_len (b0);
470 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
472 vxlan_gbp_rx_trace_t *tr
473 = vlib_add_trace (vm, node, b0, sizeof (*tr));
474 tr->next_index = next0;
476 tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
477 tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
478 tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
479 tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
481 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
482 to_next, n_left_to_next,
486 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
488 /* Do we still need this now that tunnel tx stats is kept? */
490 is_ip4 ? vxlan4_gbp_input_node.index : vxlan6_gbp_input_node.index;
491 vlib_node_increment_counter (vm, node_idx, VXLAN_GBP_ERROR_DECAPSULATED,
494 return from_frame->n_vectors;
498 vxlan4_gbp_input (vlib_main_t * vm,
499 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
501 return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 1);
505 vxlan6_gbp_input (vlib_main_t * vm,
506 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
508 return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 0);
511 static char *vxlan_gbp_error_strings[] = {
512 #define vxlan_gbp_error(n,s) s,
513 #include <vnet/vxlan-gbp/vxlan_gbp_error.def>
514 #undef vxlan_gbp_error
519 VLIB_REGISTER_NODE (vxlan4_gbp_input_node) =
521 .function = vxlan4_gbp_input,
522 .name = "vxlan4-gbp-input",
523 .vector_size = sizeof (u32),
524 .n_errors = VXLAN_GBP_N_ERROR,
525 .error_strings = vxlan_gbp_error_strings,
526 .n_next_nodes = VXLAN_GBP_INPUT_N_NEXT,
527 .format_trace = format_vxlan_gbp_rx_trace,
529 #define _(s,n) [VXLAN_GBP_INPUT_NEXT_##s] = n,
530 foreach_vxlan_gbp_input_next
534 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gbp_input_node, vxlan4_gbp_input)
536 VLIB_REGISTER_NODE (vxlan6_gbp_input_node) =
538 .function = vxlan6_gbp_input,
539 .name = "vxlan6-gbp-input",
540 .vector_size = sizeof (u32),
541 .n_errors = VXLAN_GBP_N_ERROR,
542 .error_strings = vxlan_gbp_error_strings,
543 .n_next_nodes = VXLAN_GBP_INPUT_N_NEXT,
545 #define _(s,n) [VXLAN_GBP_INPUT_NEXT_##s] = n,
546 foreach_vxlan_gbp_input_next
549 .format_trace = format_vxlan_gbp_rx_trace,
551 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gbp_input_node, vxlan6_gbp_input)
556 IP_VXLAN_GBP_BYPASS_NEXT_DROP,
557 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP,
558 IP_VXLAN_GBP_BYPASS_N_NEXT,
559 } ip_vxan_gbp_bypass_next_t;
562 ip_vxlan_gbp_bypass_inline (vlib_main_t * vm,
563 vlib_node_runtime_t * node,
564 vlib_frame_t * frame, u32 is_ip4)
566 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
567 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
568 vlib_node_runtime_t *error_node =
569 vlib_node_get_runtime (vm, ip4_input_node.index);
570 ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
571 ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
573 from = vlib_frame_vector_args (frame);
574 n_left_from = frame->n_vectors;
575 next_index = node->cached_next_index;
577 if (node->flags & VLIB_NODE_FLAG_TRACE)
578 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
583 ip6_address_set_zero (&addr6);
585 while (n_left_from > 0)
587 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
589 while (n_left_from >= 4 && n_left_to_next >= 2)
591 vlib_buffer_t *b0, *b1;
592 ip4_header_t *ip40, *ip41;
593 ip6_header_t *ip60, *ip61;
594 udp_header_t *udp0, *udp1;
595 u32 bi0, ip_len0, udp_len0, flags0, next0;
596 u32 bi1, ip_len1, udp_len1, flags1, next1;
597 i32 len_diff0, len_diff1;
598 u8 error0, good_udp0, proto0;
599 u8 error1, good_udp1, proto1;
601 /* Prefetch next iteration. */
603 vlib_buffer_t *p2, *p3;
605 p2 = vlib_get_buffer (vm, from[2]);
606 p3 = vlib_get_buffer (vm, from[3]);
608 vlib_prefetch_buffer_header (p2, LOAD);
609 vlib_prefetch_buffer_header (p3, LOAD);
611 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
612 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
615 bi0 = to_next[0] = from[0];
616 bi1 = to_next[1] = from[1];
622 b0 = vlib_get_buffer (vm, bi0);
623 b1 = vlib_get_buffer (vm, bi1);
626 ip40 = vlib_buffer_get_current (b0);
627 ip41 = vlib_buffer_get_current (b1);
631 ip60 = vlib_buffer_get_current (b0);
632 ip61 = vlib_buffer_get_current (b1);
635 /* Setup packet for next IP feature */
636 vnet_feature_next (&next0, b0);
637 vnet_feature_next (&next1, b1);
641 /* Treat IP frag packets as "experimental" protocol for now
642 until support of IP frag reassembly is implemented */
643 proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
644 proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol;
648 proto0 = ip60->protocol;
649 proto1 = ip61->protocol;
652 /* Process packet 0 */
653 if (proto0 != IP_PROTOCOL_UDP)
654 goto exit0; /* not UDP packet */
657 udp0 = ip4_next_header (ip40);
659 udp0 = ip6_next_header (ip60);
661 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
662 goto exit0; /* not VXLAN_GBP packet */
664 /* Validate DIP against VTEPs */
667 if (addr4.as_u32 != ip40->dst_address.as_u32)
669 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
670 goto exit0; /* no local VTEP for VXLAN_GBP packet */
671 addr4 = ip40->dst_address;
676 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
678 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
679 goto exit0; /* no local VTEP for VXLAN_GBP packet */
680 addr6 = ip60->dst_address;
685 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
687 /* Don't verify UDP checksum for packets with explicit zero checksum. */
688 good_udp0 |= udp0->checksum == 0;
690 /* Verify UDP length */
692 ip_len0 = clib_net_to_host_u16 (ip40->length);
694 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
695 udp_len0 = clib_net_to_host_u16 (udp0->length);
696 len_diff0 = ip_len0 - udp_len0;
698 /* Verify UDP checksum */
699 if (PREDICT_FALSE (!good_udp0))
701 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
704 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
706 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
708 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
714 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
715 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
719 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
720 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
724 IP_VXLAN_GBP_BYPASS_NEXT_DROP :
725 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
726 b0->error = error0 ? error_node->errors[error0] : 0;
728 /* vxlan-gbp-input node expect current at VXLAN_GBP header */
730 vlib_buffer_advance (b0,
731 sizeof (ip4_header_t) +
732 sizeof (udp_header_t));
734 vlib_buffer_advance (b0,
735 sizeof (ip6_header_t) +
736 sizeof (udp_header_t));
739 /* Process packet 1 */
740 if (proto1 != IP_PROTOCOL_UDP)
741 goto exit1; /* not UDP packet */
744 udp1 = ip4_next_header (ip41);
746 udp1 = ip6_next_header (ip61);
748 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
749 goto exit1; /* not VXLAN_GBP packet */
751 /* Validate DIP against VTEPs */
754 if (addr4.as_u32 != ip41->dst_address.as_u32)
756 if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
757 goto exit1; /* no local VTEP for VXLAN_GBP packet */
758 addr4 = ip41->dst_address;
763 if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
765 if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
766 goto exit1; /* no local VTEP for VXLAN_GBP packet */
767 addr6 = ip61->dst_address;
772 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
774 /* Don't verify UDP checksum for packets with explicit zero checksum. */
775 good_udp1 |= udp1->checksum == 0;
777 /* Verify UDP length */
779 ip_len1 = clib_net_to_host_u16 (ip41->length);
781 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
782 udp_len1 = clib_net_to_host_u16 (udp1->length);
783 len_diff1 = ip_len1 - udp_len1;
785 /* Verify UDP checksum */
786 if (PREDICT_FALSE (!good_udp1))
788 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
791 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
793 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
795 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
801 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
802 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
806 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
807 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
811 IP_VXLAN_GBP_BYPASS_NEXT_DROP :
812 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
813 b1->error = error1 ? error_node->errors[error1] : 0;
815 /* vxlan_gbp-input node expect current at VXLAN_GBP header */
817 vlib_buffer_advance (b1,
818 sizeof (ip4_header_t) +
819 sizeof (udp_header_t));
821 vlib_buffer_advance (b1,
822 sizeof (ip6_header_t) +
823 sizeof (udp_header_t));
826 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
827 to_next, n_left_to_next,
828 bi0, bi1, next0, next1);
831 while (n_left_from > 0 && n_left_to_next > 0)
837 u32 bi0, ip_len0, udp_len0, flags0, next0;
839 u8 error0, good_udp0, proto0;
841 bi0 = to_next[0] = from[0];
847 b0 = vlib_get_buffer (vm, bi0);
849 ip40 = vlib_buffer_get_current (b0);
851 ip60 = vlib_buffer_get_current (b0);
853 /* Setup packet for next IP feature */
854 vnet_feature_next (&next0, b0);
857 /* Treat IP4 frag packets as "experimental" protocol for now
858 until support of IP frag reassembly is implemented */
859 proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
861 proto0 = ip60->protocol;
863 if (proto0 != IP_PROTOCOL_UDP)
864 goto exit; /* not UDP packet */
867 udp0 = ip4_next_header (ip40);
869 udp0 = ip6_next_header (ip60);
871 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
872 goto exit; /* not VXLAN_GBP packet */
874 /* Validate DIP against VTEPs */
877 if (addr4.as_u32 != ip40->dst_address.as_u32)
879 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
880 goto exit; /* no local VTEP for VXLAN_GBP packet */
881 addr4 = ip40->dst_address;
886 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
888 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
889 goto exit; /* no local VTEP for VXLAN_GBP packet */
890 addr6 = ip60->dst_address;
895 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
897 /* Don't verify UDP checksum for packets with explicit zero checksum. */
898 good_udp0 |= udp0->checksum == 0;
900 /* Verify UDP length */
902 ip_len0 = clib_net_to_host_u16 (ip40->length);
904 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
905 udp_len0 = clib_net_to_host_u16 (udp0->length);
906 len_diff0 = ip_len0 - udp_len0;
908 /* Verify UDP checksum */
909 if (PREDICT_FALSE (!good_udp0))
911 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
914 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
916 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
918 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
924 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
925 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
929 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
930 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
934 IP_VXLAN_GBP_BYPASS_NEXT_DROP :
935 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
936 b0->error = error0 ? error_node->errors[error0] : 0;
938 /* vxlan_gbp-input node expect current at VXLAN_GBP header */
940 vlib_buffer_advance (b0,
941 sizeof (ip4_header_t) +
942 sizeof (udp_header_t));
944 vlib_buffer_advance (b0,
945 sizeof (ip6_header_t) +
946 sizeof (udp_header_t));
949 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
950 to_next, n_left_to_next,
954 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
957 return frame->n_vectors;
961 ip4_vxlan_gbp_bypass (vlib_main_t * vm,
962 vlib_node_runtime_t * node, vlib_frame_t * frame)
964 return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
968 VLIB_REGISTER_NODE (ip4_vxlan_gbp_bypass_node) =
970 .function = ip4_vxlan_gbp_bypass,
971 .name = "ip4-vxlan-gbp-bypass",
972 .vector_size = sizeof (u32),
973 .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT,
975 [IP_VXLAN_GBP_BYPASS_NEXT_DROP] = "error-drop",
976 [IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP] = "vxlan4-gbp-input",
978 .format_buffer = format_ip4_header,
979 .format_trace = format_ip4_forward_next_trace,
982 VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gbp_bypass_node, ip4_vxlan_gbp_bypass)
985 /* Dummy init function to get us linked in. */
987 ip4_vxlan_gbp_bypass_init (vlib_main_t * vm)
992 VLIB_INIT_FUNCTION (ip4_vxlan_gbp_bypass_init);
995 ip6_vxlan_gbp_bypass (vlib_main_t * vm,
996 vlib_node_runtime_t * node, vlib_frame_t * frame)
998 return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1002 VLIB_REGISTER_NODE (ip6_vxlan_gbp_bypass_node) =
1004 .function = ip6_vxlan_gbp_bypass,
1005 .name = "ip6-vxlan-gbp-bypass",
1006 .vector_size = sizeof (u32),
1007 .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT,
1009 [IP_VXLAN_GBP_BYPASS_NEXT_DROP] = "error-drop",
1010 [IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP] = "vxlan6-gbp-input",
1012 .format_buffer = format_ip6_header,
1013 .format_trace = format_ip6_forward_next_trace,
1016 VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gbp_bypass_node, ip6_vxlan_gbp_bypass)
1019 /* Dummy init function to get us linked in. */
1021 ip6_vxlan_gbp_bypass_init (vlib_main_t * vm)
1026 VLIB_INIT_FUNCTION (ip6_vxlan_gbp_bypass_init);
1029 * fd.io coding-style-patch-verification: ON
1032 * eval: (c-set-style "gnu")