2 * decap.c: vxlan gbp tunnel decap packet processing
4 * Copyright (c) 2018 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan-gbp/vxlan_gbp.h>
30 } vxlan_gbp_rx_trace_t;
33 format_vxlan_gbp_rx_trace (u8 * s, va_list * args)
35 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
36 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
37 vxlan_gbp_rx_trace_t *t = va_arg (*args, vxlan_gbp_rx_trace_t *);
39 if (t->tunnel_index == ~0)
41 "VXLAN_GBP decap error - tunnel for vni %d does not exist",
44 "VXLAN_GBP decap from vxlan_gbp_tunnel%d vni %d sclass %d"
45 " flags %U next %d error %d",
46 t->tunnel_index, t->vni, t->sclass,
47 format_vxlan_gbp_header_gpflags, t->flags,
48 t->next_index, t->error);
52 buf_fib_index (vlib_buffer_t * b, u32 is_ip4)
54 u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
55 if (sw_if_index != (u32) ~ 0)
58 u32 *fib_index_by_sw_if_index = is_ip4 ?
59 ip4_main.fib_index_by_sw_if_index : ip6_main.fib_index_by_sw_if_index;
60 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
62 return vec_elt (fib_index_by_sw_if_index, sw_if_index);
65 typedef vxlan4_gbp_tunnel_key_t last_tunnel_cache4;
67 always_inline vxlan_gbp_tunnel_t *
68 vxlan4_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache4 * cache,
69 u32 fib_index, ip4_header_t * ip4_0,
70 vxlan_gbp_header_t * vxlan_gbp0)
73 * Check unicast first since that's where most of the traffic comes from
74 * Make sure VXLAN_GBP tunnel exist according to packet SIP, DIP and VNI
76 vxlan4_gbp_tunnel_key_t key4;
79 key4.key[1] = ((u64) fib_index << 32) | vxlan_gbp0->vni_reserved;
80 key4.key[0] = (((u64) ip4_0->dst_address.as_u32 << 32) |
81 ip4_0->src_address.as_u32);
83 if (PREDICT_FALSE (key4.key[0] != cache->key[0] ||
84 key4.key[1] != cache->key[1]))
86 rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key,
88 if (PREDICT_FALSE (rv == 0))
91 return (pool_elt_at_index (vxm->tunnels, cache->value));
96 return (pool_elt_at_index (vxm->tunnels, cache->value));
99 /* No unicast match - try multicast */
100 if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address)))
103 key4.key[0] = ip4_0->dst_address.as_u32;
104 /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */
105 rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, &key4);
107 if (PREDICT_FALSE (rv != 0))
110 return (pool_elt_at_index (vxm->tunnels, key4.value));
113 typedef vxlan6_gbp_tunnel_key_t last_tunnel_cache6;
115 always_inline vxlan_gbp_tunnel_t *
116 vxlan6_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache6 * cache,
117 u32 fib_index, ip6_header_t * ip6_0,
118 vxlan_gbp_header_t * vxlan_gbp0)
120 /* Make sure VXLAN_GBP tunnel exist according to packet SIP and VNI */
121 vxlan6_gbp_tunnel_key_t key6 = {
123 [0] = ip6_0->src_address.as_u64[0],
124 [1] = ip6_0->src_address.as_u64[1],
125 [2] = (((u64) fib_index) << 32) | vxlan_gbp0->vni_reserved,
131 (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0))
133 rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key,
135 if (PREDICT_FALSE (rv != 0))
140 vxlan_gbp_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);
142 /* Validate VXLAN_GBP tunnel SIP against packet DIP */
144 (!ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
147 if (PREDICT_TRUE (!ip6_address_is_multicast (&ip6_0->dst_address)))
150 /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */
151 key6.key[0] = ip6_0->dst_address.as_u64[0];
152 key6.key[1] = ip6_0->dst_address.as_u64[1];
153 rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key,
155 if (PREDICT_FALSE (rv != 0))
163 always_inline vxlan_gbp_input_next_t
164 vxlan_gbp_tunnel_get_next (const vxlan_gbp_tunnel_t * t, vlib_buffer_t * b0)
166 if (VXLAN_GBP_TUNNEL_MODE_L2 == t->mode)
167 return (VXLAN_GBP_INPUT_NEXT_L2_INPUT);
170 ethernet_header_t *e0;
173 e0 = vlib_buffer_get_current (b0);
174 vlib_buffer_advance (b0, sizeof (*e0));
175 type0 = clib_net_to_host_u16 (e0->type);
178 case ETHERNET_TYPE_IP4:
179 return (VXLAN_GBP_INPUT_NEXT_IP4_INPUT);
180 case ETHERNET_TYPE_IP6:
181 return (VXLAN_GBP_INPUT_NEXT_IP6_INPUT);
184 return (VXLAN_GBP_INPUT_NEXT_DROP);
188 vxlan_gbp_input (vlib_main_t * vm,
189 vlib_node_runtime_t * node,
190 vlib_frame_t * from_frame, u8 is_ip4)
192 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
193 vnet_main_t *vnm = vxm->vnet_main;
194 vnet_interface_main_t *im = &vnm->interface_main;
195 vlib_combined_counter_main_t *rx_counter =
196 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
197 vlib_combined_counter_main_t *drop_counter =
198 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
199 last_tunnel_cache4 last4;
200 last_tunnel_cache6 last6;
201 u32 pkts_decapsulated = 0;
202 u32 thread_index = vlib_get_thread_index ();
205 clib_memset (&last4, 0xff, sizeof last4);
207 clib_memset (&last6, 0xff, sizeof last6);
209 u32 next_index = node->cached_next_index;
211 u32 *from = vlib_frame_vector_args (from_frame);
212 u32 n_left_from = from_frame->n_vectors;
214 while (n_left_from > 0)
216 u32 *to_next, n_left_to_next;
217 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
219 while (n_left_from >= 4 && n_left_to_next >= 2)
221 /* Prefetch next iteration. */
223 vlib_buffer_t *p2, *p3;
225 p2 = vlib_get_buffer (vm, from[2]);
226 p3 = vlib_get_buffer (vm, from[3]);
228 vlib_prefetch_buffer_header (p2, LOAD);
229 vlib_prefetch_buffer_header (p3, LOAD);
231 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
232 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
235 u32 bi0 = to_next[0] = from[0];
236 u32 bi1 = to_next[1] = from[1];
242 vlib_buffer_t *b0, *b1;
243 b0 = vlib_get_buffer (vm, bi0);
244 b1 = vlib_get_buffer (vm, bi1);
246 /* udp leaves current_data pointing at the vxlan_gbp header */
247 void *cur0 = vlib_buffer_get_current (b0);
248 void *cur1 = vlib_buffer_get_current (b1);
249 vxlan_gbp_header_t *vxlan_gbp0 = cur0;
250 vxlan_gbp_header_t *vxlan_gbp1 = cur1;
252 ip4_header_t *ip4_0, *ip4_1;
253 ip6_header_t *ip6_0, *ip6_1;
256 ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
257 ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t);
261 ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
262 ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
265 u32 fi0 = buf_fib_index (b0, is_ip4);
266 u32 fi1 = buf_fib_index (b1, is_ip4);
268 vxlan_gbp_tunnel_t *t0, *t1;
272 vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0);
274 vxlan4_gbp_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan_gbp1);
279 vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0);
281 vxlan6_gbp_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan_gbp1);
284 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
285 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
287 vxlan_gbp_input_next_t next0, next1;
288 u8 error0 = 0, error1 = 0;
289 u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
290 u8 flags1 = vxlan_gbp_get_flags (vxlan_gbp1);
291 /* Required to make the l2 tag push / pop code work on l2 subifs */
293 vlib_buffer_advance (b0, sizeof *vxlan_gbp0);
294 vlib_buffer_advance (b1, sizeof *vxlan_gbp1);
296 /* Validate VXLAN_GBP tunnel encap-fib index against packet */
299 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
302 && flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
304 error0 = VXLAN_GBP_ERROR_BAD_FLAGS;
305 vlib_increment_combined_counter
306 (drop_counter, thread_index, t0->sw_if_index, 1, len0);
307 next0 = VXLAN_GBP_INPUT_NEXT_DROP;
311 error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
312 next0 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
314 b0->error = node->errors[error0];
318 next0 = vxlan_gbp_tunnel_get_next (t0, b0);
320 /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
321 vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
322 vlib_increment_combined_counter
323 (rx_counter, thread_index, t0->sw_if_index, 1, len0);
327 vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
328 vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
332 (t1 == 0 || flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
335 && flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
337 error1 = VXLAN_GBP_ERROR_BAD_FLAGS;
338 vlib_increment_combined_counter
339 (drop_counter, thread_index, t1->sw_if_index, 1, len1);
340 next1 = VXLAN_GBP_INPUT_NEXT_DROP;
344 error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
345 next1 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
347 b1->error = node->errors[error1];
351 next1 = vxlan_gbp_tunnel_get_next (t1, b1);
353 /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
354 vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
357 vlib_increment_combined_counter
358 (rx_counter, thread_index, t1->sw_if_index, 1, len1);
361 vnet_buffer2 (b1)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp1);
362 vnet_buffer2 (b1)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
364 vnet_update_l2_len (b0);
365 vnet_update_l2_len (b1);
367 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
369 vxlan_gbp_rx_trace_t *tr =
370 vlib_add_trace (vm, node, b0, sizeof (*tr));
371 tr->next_index = next0;
373 tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
374 tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
375 tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
376 tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
378 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
380 vxlan_gbp_rx_trace_t *tr =
381 vlib_add_trace (vm, node, b1, sizeof (*tr));
382 tr->next_index = next1;
384 tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels;
385 tr->vni = vxlan_gbp_get_vni (vxlan_gbp1);
386 tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
387 tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp1);
390 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
391 to_next, n_left_to_next,
392 bi0, bi1, next0, next1);
395 while (n_left_from > 0 && n_left_to_next > 0)
397 u32 bi0 = to_next[0] = from[0];
403 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
405 /* udp leaves current_data pointing at the vxlan_gbp header */
406 void *cur0 = vlib_buffer_get_current (b0);
407 vxlan_gbp_header_t *vxlan_gbp0 = cur0;
411 ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
413 ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
415 u32 fi0 = buf_fib_index (b0, is_ip4);
417 vxlan_gbp_tunnel_t *t0;
419 t0 = vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0);
421 t0 = vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0);
423 uword len0 = vlib_buffer_length_in_chain (vm, b0);
425 vxlan_gbp_input_next_t next0;
427 u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
429 /* pop (ip, udp, vxlan_gbp) */
430 vlib_buffer_advance (b0, sizeof (*vxlan_gbp0));
431 /* Validate VXLAN_GBP tunnel encap-fib index against packet */
434 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
437 && flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
439 error0 = VXLAN_GBP_ERROR_BAD_FLAGS;
440 vlib_increment_combined_counter
441 (drop_counter, thread_index, t0->sw_if_index, 1, len0);
442 next0 = VXLAN_GBP_INPUT_NEXT_DROP;
446 error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
447 next0 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
449 b0->error = node->errors[error0];
453 next0 = vxlan_gbp_tunnel_get_next (t0, b0);
454 /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
455 vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
458 vlib_increment_combined_counter
459 (rx_counter, thread_index, t0->sw_if_index, 1, len0);
461 vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
462 vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
464 /* Required to make the l2 tag push / pop code work on l2 subifs */
465 vnet_update_l2_len (b0);
467 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
469 vxlan_gbp_rx_trace_t *tr
470 = vlib_add_trace (vm, node, b0, sizeof (*tr));
471 tr->next_index = next0;
473 tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
474 tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
475 tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
476 tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
478 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
479 to_next, n_left_to_next,
483 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
485 /* Do we still need this now that tunnel tx stats is kept? */
487 is_ip4 ? vxlan4_gbp_input_node.index : vxlan6_gbp_input_node.index;
488 vlib_node_increment_counter (vm, node_idx, VXLAN_GBP_ERROR_DECAPSULATED,
491 return from_frame->n_vectors;
494 VLIB_NODE_FN (vxlan4_gbp_input_node) (vlib_main_t * vm,
495 vlib_node_runtime_t * node,
496 vlib_frame_t * from_frame)
498 return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 1);
501 VLIB_NODE_FN (vxlan6_gbp_input_node) (vlib_main_t * vm,
502 vlib_node_runtime_t * node,
503 vlib_frame_t * from_frame)
505 return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 0);
508 static char *vxlan_gbp_error_strings[] = {
509 #define vxlan_gbp_error(n,s) s,
510 #include <vnet/vxlan-gbp/vxlan_gbp_error.def>
511 #undef vxlan_gbp_error
516 VLIB_REGISTER_NODE (vxlan4_gbp_input_node) =
518 .name = "vxlan4-gbp-input",
519 .vector_size = sizeof (u32),
520 .n_errors = VXLAN_GBP_N_ERROR,
521 .error_strings = vxlan_gbp_error_strings,
522 .n_next_nodes = VXLAN_GBP_INPUT_N_NEXT,
523 .format_trace = format_vxlan_gbp_rx_trace,
525 #define _(s,n) [VXLAN_GBP_INPUT_NEXT_##s] = n,
526 foreach_vxlan_gbp_input_next
531 VLIB_REGISTER_NODE (vxlan6_gbp_input_node) =
533 .name = "vxlan6-gbp-input",
534 .vector_size = sizeof (u32),
535 .n_errors = VXLAN_GBP_N_ERROR,
536 .error_strings = vxlan_gbp_error_strings,
537 .n_next_nodes = VXLAN_GBP_INPUT_N_NEXT,
539 #define _(s,n) [VXLAN_GBP_INPUT_NEXT_##s] = n,
540 foreach_vxlan_gbp_input_next
543 .format_trace = format_vxlan_gbp_rx_trace,
549 IP_VXLAN_GBP_BYPASS_NEXT_DROP,
550 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP,
551 IP_VXLAN_GBP_BYPASS_N_NEXT,
552 } ip_vxlan_gbp_bypass_next_t;
555 ip_vxlan_gbp_bypass_inline (vlib_main_t * vm,
556 vlib_node_runtime_t * node,
557 vlib_frame_t * frame, u32 is_ip4)
559 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
560 u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
561 vlib_node_runtime_t *error_node =
562 vlib_node_get_runtime (vm, ip4_input_node.index);
563 ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
564 ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
566 from = vlib_frame_vector_args (frame);
567 n_left_from = frame->n_vectors;
568 next_index = node->cached_next_index;
570 if (node->flags & VLIB_NODE_FLAG_TRACE)
571 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
576 ip6_address_set_zero (&addr6);
578 while (n_left_from > 0)
580 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
582 while (n_left_from >= 4 && n_left_to_next >= 2)
584 vlib_buffer_t *b0, *b1;
585 ip4_header_t *ip40, *ip41;
586 ip6_header_t *ip60, *ip61;
587 udp_header_t *udp0, *udp1;
588 u32 bi0, ip_len0, udp_len0, flags0, next0;
589 u32 bi1, ip_len1, udp_len1, flags1, next1;
590 i32 len_diff0, len_diff1;
591 u8 error0, good_udp0, proto0;
592 u8 error1, good_udp1, proto1;
594 /* Prefetch next iteration. */
596 vlib_buffer_t *p2, *p3;
598 p2 = vlib_get_buffer (vm, from[2]);
599 p3 = vlib_get_buffer (vm, from[3]);
601 vlib_prefetch_buffer_header (p2, LOAD);
602 vlib_prefetch_buffer_header (p3, LOAD);
604 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
605 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
608 bi0 = to_next[0] = from[0];
609 bi1 = to_next[1] = from[1];
615 b0 = vlib_get_buffer (vm, bi0);
616 b1 = vlib_get_buffer (vm, bi1);
619 ip40 = vlib_buffer_get_current (b0);
620 ip41 = vlib_buffer_get_current (b1);
624 ip60 = vlib_buffer_get_current (b0);
625 ip61 = vlib_buffer_get_current (b1);
628 /* Setup packet for next IP feature */
629 vnet_feature_next (&next0, b0);
630 vnet_feature_next (&next1, b1);
634 /* Treat IP frag packets as "experimental" protocol for now
635 until support of IP frag reassembly is implemented */
636 proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
637 proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol;
641 proto0 = ip60->protocol;
642 proto1 = ip61->protocol;
645 /* Process packet 0 */
646 if (proto0 != IP_PROTOCOL_UDP)
647 goto exit0; /* not UDP packet */
650 udp0 = ip4_next_header (ip40);
652 udp0 = ip6_next_header (ip60);
654 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
655 goto exit0; /* not VXLAN_GBP packet */
657 /* Validate DIP against VTEPs */
660 if (addr4.as_u32 != ip40->dst_address.as_u32)
662 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
663 goto exit0; /* no local VTEP for VXLAN_GBP packet */
664 addr4 = ip40->dst_address;
669 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
671 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
672 goto exit0; /* no local VTEP for VXLAN_GBP packet */
673 addr6 = ip60->dst_address;
678 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
680 /* Don't verify UDP checksum for packets with explicit zero checksum. */
681 good_udp0 |= udp0->checksum == 0;
683 /* Verify UDP length */
685 ip_len0 = clib_net_to_host_u16 (ip40->length);
687 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
688 udp_len0 = clib_net_to_host_u16 (udp0->length);
689 len_diff0 = ip_len0 - udp_len0;
691 /* Verify UDP checksum */
692 if (PREDICT_FALSE (!good_udp0))
694 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
697 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
699 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
701 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
707 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
708 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
712 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
713 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
717 IP_VXLAN_GBP_BYPASS_NEXT_DROP :
718 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
719 b0->error = error0 ? error_node->errors[error0] : 0;
721 /* vxlan-gbp-input node expect current at VXLAN_GBP header */
723 vlib_buffer_advance (b0,
724 sizeof (ip4_header_t) +
725 sizeof (udp_header_t));
727 vlib_buffer_advance (b0,
728 sizeof (ip6_header_t) +
729 sizeof (udp_header_t));
732 /* Process packet 1 */
733 if (proto1 != IP_PROTOCOL_UDP)
734 goto exit1; /* not UDP packet */
737 udp1 = ip4_next_header (ip41);
739 udp1 = ip6_next_header (ip61);
741 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
742 goto exit1; /* not VXLAN_GBP packet */
744 /* Validate DIP against VTEPs */
747 if (addr4.as_u32 != ip41->dst_address.as_u32)
749 if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
750 goto exit1; /* no local VTEP for VXLAN_GBP packet */
751 addr4 = ip41->dst_address;
756 if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
758 if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
759 goto exit1; /* no local VTEP for VXLAN_GBP packet */
760 addr6 = ip61->dst_address;
765 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
767 /* Don't verify UDP checksum for packets with explicit zero checksum. */
768 good_udp1 |= udp1->checksum == 0;
770 /* Verify UDP length */
772 ip_len1 = clib_net_to_host_u16 (ip41->length);
774 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
775 udp_len1 = clib_net_to_host_u16 (udp1->length);
776 len_diff1 = ip_len1 - udp_len1;
778 /* Verify UDP checksum */
779 if (PREDICT_FALSE (!good_udp1))
781 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
784 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
786 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
788 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
794 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
795 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
799 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
800 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
804 IP_VXLAN_GBP_BYPASS_NEXT_DROP :
805 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
806 b1->error = error1 ? error_node->errors[error1] : 0;
808 /* vxlan_gbp-input node expect current at VXLAN_GBP header */
810 vlib_buffer_advance (b1,
811 sizeof (ip4_header_t) +
812 sizeof (udp_header_t));
814 vlib_buffer_advance (b1,
815 sizeof (ip6_header_t) +
816 sizeof (udp_header_t));
819 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
820 to_next, n_left_to_next,
821 bi0, bi1, next0, next1);
824 while (n_left_from > 0 && n_left_to_next > 0)
830 u32 bi0, ip_len0, udp_len0, flags0, next0;
832 u8 error0, good_udp0, proto0;
834 bi0 = to_next[0] = from[0];
840 b0 = vlib_get_buffer (vm, bi0);
842 ip40 = vlib_buffer_get_current (b0);
844 ip60 = vlib_buffer_get_current (b0);
846 /* Setup packet for next IP feature */
847 vnet_feature_next (&next0, b0);
850 /* Treat IP4 frag packets as "experimental" protocol for now
851 until support of IP frag reassembly is implemented */
852 proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
854 proto0 = ip60->protocol;
856 if (proto0 != IP_PROTOCOL_UDP)
857 goto exit; /* not UDP packet */
860 udp0 = ip4_next_header (ip40);
862 udp0 = ip6_next_header (ip60);
864 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp))
865 goto exit; /* not VXLAN_GBP packet */
867 /* Validate DIP against VTEPs */
870 if (addr4.as_u32 != ip40->dst_address.as_u32)
872 if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
873 goto exit; /* no local VTEP for VXLAN_GBP packet */
874 addr4 = ip40->dst_address;
879 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
881 if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
882 goto exit; /* no local VTEP for VXLAN_GBP packet */
883 addr6 = ip60->dst_address;
888 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
890 /* Don't verify UDP checksum for packets with explicit zero checksum. */
891 good_udp0 |= udp0->checksum == 0;
893 /* Verify UDP length */
895 ip_len0 = clib_net_to_host_u16 (ip40->length);
897 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
898 udp_len0 = clib_net_to_host_u16 (udp0->length);
899 len_diff0 = ip_len0 - udp_len0;
901 /* Verify UDP checksum */
902 if (PREDICT_FALSE (!good_udp0))
904 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
907 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
909 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
911 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
917 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
918 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
922 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
923 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
927 IP_VXLAN_GBP_BYPASS_NEXT_DROP :
928 IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP;
929 b0->error = error0 ? error_node->errors[error0] : 0;
931 /* vxlan_gbp-input node expect current at VXLAN_GBP header */
933 vlib_buffer_advance (b0,
934 sizeof (ip4_header_t) +
935 sizeof (udp_header_t));
937 vlib_buffer_advance (b0,
938 sizeof (ip6_header_t) +
939 sizeof (udp_header_t));
942 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
943 to_next, n_left_to_next,
947 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
950 return frame->n_vectors;
953 VLIB_NODE_FN (ip4_vxlan_gbp_bypass_node) (vlib_main_t * vm,
954 vlib_node_runtime_t * node,
955 vlib_frame_t * frame)
957 return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
961 VLIB_REGISTER_NODE (ip4_vxlan_gbp_bypass_node) =
963 .name = "ip4-vxlan-gbp-bypass",
964 .vector_size = sizeof (u32),
965 .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT,
967 [IP_VXLAN_GBP_BYPASS_NEXT_DROP] = "error-drop",
968 [IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP] = "vxlan4-gbp-input",
970 .format_buffer = format_ip4_header,
971 .format_trace = format_ip4_forward_next_trace,
975 #ifndef CLIB_MARCH_VARIANT
976 /* Dummy init function to get us linked in. */
978 ip4_vxlan_gbp_bypass_init (vlib_main_t * vm)
983 VLIB_INIT_FUNCTION (ip4_vxlan_gbp_bypass_init);
984 #endif /* CLIB_MARCH_VARIANT */
986 VLIB_NODE_FN (ip6_vxlan_gbp_bypass_node) (vlib_main_t * vm,
987 vlib_node_runtime_t * node,
988 vlib_frame_t * frame)
990 return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
994 VLIB_REGISTER_NODE (ip6_vxlan_gbp_bypass_node) =
996 .name = "ip6-vxlan-gbp-bypass",
997 .vector_size = sizeof (u32),
998 .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT,
1000 [IP_VXLAN_GBP_BYPASS_NEXT_DROP] = "error-drop",
1001 [IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP] = "vxlan6-gbp-input",
1003 .format_buffer = format_ip6_header,
1004 .format_trace = format_ip6_forward_next_trace,
1008 #ifndef CLIB_MARCH_VARIANT
1009 /* Dummy init function to get us linked in. */
1011 ip6_vxlan_gbp_bypass_init (vlib_main_t * vm)
1016 VLIB_INIT_FUNCTION (ip6_vxlan_gbp_bypass_init);
1017 #endif /* CLIB_MARCH_VARIANT */
1020 * fd.io coding-style-patch-verification: ON
1023 * eval: (c-set-style "gnu")