2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vxlan-gpe/vxlan_gpe.h>
16 #include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
17 #include <vnet/ip/format.h>
18 #include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
19 #include <vnet/dpo/load_balance.h>
20 #include <vnet/fib/ip4_fib.h>
21 #include <vnet/fib/fib_entry.h>
22 #include <vnet/udp/udp_local.h>
24 vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main;
27 vxlan_gpe_ioam_set_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
28 int has_pot_option, int has_ppc_option,
31 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
33 vxlan_gpe_ioam_hdr_t *vxlan_gpe_ioam_hdr;
35 u8 trace_data_size = 0;
38 if (has_trace_option == 0 && has_pot_option == 0)
41 /* Work out how much space we need */
42 size = sizeof (vxlan_gpe_ioam_hdr_t);
45 && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
47 size += sizeof (vxlan_gpe_ioam_option_t);
48 size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE];
51 && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
53 size += sizeof (vxlan_gpe_ioam_option_t);
54 size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
57 t->rewrite_size = size;
61 vxlan4_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
62 hm->encap_v4_next_node);
64 (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
65 sizeof (ip4_vxlan_gpe_header_t));
69 vxlan6_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
70 VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
72 (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
73 sizeof (ip6_vxlan_gpe_header_t));
77 vxlan_gpe_ioam_hdr->type = VXLAN_GPE_PROTOCOL_IOAM;
78 /* Length of the header in octets */
79 vxlan_gpe_ioam_hdr->length = size;
80 vxlan_gpe_ioam_hdr->protocol = t->protocol;
81 current = (u8 *) vxlan_gpe_ioam_hdr + sizeof (vxlan_gpe_ioam_hdr_t);
84 && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
86 if (0 != hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] (current,
89 current += trace_data_size;
92 && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
95 hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
97 hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT]
98 (current, &pot_data_size))
99 current += pot_data_size;
106 vxlan_gpe_ioam_clear_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
107 int has_pot_option, int has_ppc_option,
115 vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
119 vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
127 vxlan_gpe_ioam_clear (vxlan_gpe_tunnel_t * t,
128 int has_trace_option, int has_pot_option,
129 int has_ppc_option, u8 ipv6_set)
132 rv = vxlan_gpe_ioam_clear_rewrite (t, 0, 0, 0, 0);
140 return clib_error_return_code (0, rv, 0,
141 "vxlan_gpe_ioam_clear_rewrite returned %d",
149 vxlan_gpe_ioam_set (vxlan_gpe_tunnel_t * t,
150 int has_trace_option, int has_pot_option,
151 int has_ppc_option, u8 ipv6_set)
154 rv = vxlan_gpe_ioam_set_rewrite (t, has_trace_option,
155 has_pot_option, has_ppc_option, ipv6_set);
163 return clib_error_return_code (0, rv, 0,
164 "vxlan_gpe_ioam_set_rewrite returned %d",
171 vxlan_gpe_set_clear_output_feature_on_intf (vlib_main_t * vm,
172 u32 sw_if_index0, u8 is_add)
177 vnet_feature_enable_disable ("ip4-output", "vxlan-gpe-transit-ioam",
178 sw_if_index0, is_add,
179 0 /* void *feature_config */ ,
180 0 /* u32 n_feature_config_bytes */ );
185 vxlan_gpe_clear_output_feature_on_all_intfs (vlib_main_t * vm)
187 vnet_sw_interface_t *si = 0;
188 vnet_main_t *vnm = vnet_get_main ();
189 vnet_interface_main_t *im = &vnm->interface_main;
191 pool_foreach (si, im->sw_interfaces)
193 vxlan_gpe_set_clear_output_feature_on_intf (vm, si->sw_if_index, 0);
199 extern fib_forward_chain_type_t
200 fib_entry_get_default_chain_type (const fib_entry_t * fib_entry);
203 vxlan_gpe_enable_disable_ioam_for_dest (vlib_main_t * vm,
204 ip46_address_t dst_addr,
206 u8 is_ipv4, u8 is_add)
208 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
210 u32 sw_if_index0 = ~0;
212 fib_node_index_t fei = ~0;
213 fib_entry_t *fib_entry;
215 ip_adjacency_t *adj0;
216 fib_prefix_t fib_prefix;
217 //fib_forward_chain_type_t fct;
218 load_balance_t *lb_m, *lb_b;
219 const dpo_id_t *dpo0, *dpo1;
221 //vnet_hw_interface_t *hw;
225 clib_memset (&fib_prefix, 0, sizeof (fib_prefix_t));
226 fib_prefix.fp_len = 32;
227 fib_prefix.fp_proto = FIB_PROTOCOL_IP4;
228 fib_prefix.fp_addr = dst_addr;
235 fei = fib_table_lookup (fib_index0, &fib_prefix);
236 fib_entry = fib_entry_get (fei);
238 //fct = fib_entry_get_default_chain_type (fib_entry);
240 if (!dpo_id_is_valid (&fib_entry->fe_lb /*[fct] */ ))
245 lb_m = load_balance_get (fib_entry->fe_lb /*[fct] */ .dpoi_index);
247 for (i = 0; i < lb_m->lb_n_buckets; i++)
249 dpo0 = load_balance_get_bucket_i (lb_m, i);
251 if (dpo0->dpoi_type == DPO_LOAD_BALANCE)
253 lb_b = load_balance_get (dpo0->dpoi_index);
255 for (j = 0; j < lb_b->lb_n_buckets; j++)
257 dpo1 = load_balance_get_bucket_i (lb_b, j);
259 if (dpo1->dpoi_type == DPO_ADJACENCY)
261 adj_index0 = dpo1->dpoi_index;
263 if (ADJ_INDEX_INVALID == adj_index0)
268 adj0 = adj_get (adj_index0);
269 sw_if_index0 = adj0->rewrite_header.sw_if_index;
271 if (~0 == sw_if_index0)
279 vnet_feature_enable_disable ("ip4-output",
280 "vxlan-gpe-transit-ioam",
281 sw_if_index0, is_add, 0
282 /* void *feature_config */
283 , 0 /* u32 n_feature_config_bytes */
286 vec_validate_init_empty (hm->bool_ref_by_sw_if_index,
288 hm->bool_ref_by_sw_if_index[sw_if_index0] = 1;
292 hm->bool_ref_by_sw_if_index[sw_if_index0] = ~0;
303 vxlan_gpe_ioam_dest_tunnels_t *t1;
304 fib_prefix_t key4, *key4_copy;
306 clib_memset (&key4, 0, sizeof (key4));
307 key4.fp_proto = FIB_PROTOCOL_IP4;
308 key4.fp_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
309 t = hash_get_mem (hm->dst_by_ip4, &key4);
316 pool_get_aligned (hm->dst_tunnels, t1, CLIB_CACHE_LINE_BYTES);
317 clib_memset (t1, 0, sizeof (*t1));
318 t1->fp_proto = FIB_PROTOCOL_IP4;
319 t1->dst_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
320 key4_copy = clib_mem_alloc (sizeof (*key4_copy));
321 clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
322 hash_set_mem (hm->dst_by_ip4, key4_copy, t1 - hm->dst_tunnels);
324 * Attach to the FIB entry for the VxLAN-GPE destination
325 * and become its child. The dest route will invoke a callback
326 * when the fib entry changes, it can be used to
327 * re-program the output feature on the egress interface.
330 const fib_prefix_t tun_dst_pfx = {
332 .fp_proto = FIB_PROTOCOL_IP4,
333 .fp_addr = {.ip4 = t1->dst_addr.ip4,}
336 t1->fib_entry_index =
337 fib_table_entry_special_add (outer_fib_index,
339 FIB_SOURCE_RR, FIB_ENTRY_FLAG_NONE);
341 fib_entry_child_add (t1->fib_entry_index,
342 hm->fib_entry_type, t1 - hm->dst_tunnels);
343 t1->outer_fib_index = outer_fib_index;
352 t1 = pool_elt_at_index (hm->dst_tunnels, t[0]);
353 hp = hash_get_pair (hm->dst_by_ip4, &key4);
354 key4_copy = (void *) (hp->key);
355 hash_unset_mem (hm->dst_by_ip4, &key4);
356 clib_mem_free (key4_copy);
357 pool_put (hm->dst_tunnels, t1);
369 vxlan_gpe_refresh_output_feature_on_all_dest (void)
371 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
372 vxlan_gpe_ioam_dest_tunnels_t *t;
374 if (pool_elts (hm->dst_tunnels) == 0)
376 vxlan_gpe_clear_output_feature_on_all_intfs (hm->vlib_main);
377 i = vec_len (hm->bool_ref_by_sw_if_index);
378 vec_free (hm->bool_ref_by_sw_if_index);
379 vec_validate_init_empty (hm->bool_ref_by_sw_if_index, i, ~0);
380 pool_foreach (t, hm->dst_tunnels)
382 vxlan_gpe_enable_disable_ioam_for_dest
383 (hm->vlib_main, t->dst_addr, t->outer_fib_index,
384 (t->fp_proto == FIB_PROTOCOL_IP4), 1 /* is_add */ );
390 vxlan_gpe_clear_output_feature_on_select_intfs (void)
392 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
393 u32 sw_if_index0 = 0;
394 for (sw_if_index0 = 0;
395 sw_if_index0 < vec_len (hm->bool_ref_by_sw_if_index); sw_if_index0++)
397 if (hm->bool_ref_by_sw_if_index[sw_if_index0] == 0xFF)
399 vxlan_gpe_set_clear_output_feature_on_intf
400 (hm->vlib_main, sw_if_index0, 0);
407 static clib_error_t *
408 vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t *
411 * input, vlib_cli_command_t * cmd)
413 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
414 ip46_address_t local, remote;
422 clib_error_t *rv = 0;
423 vxlan4_gpe_tunnel_key_t key4;
424 vxlan6_gpe_tunnel_key_t key6;
426 vxlan_gpe_main_t *gm = &vxlan_gpe_main;
427 vxlan_gpe_tunnel_t *t = 0;
428 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
430 if (unformat (input, "local %U", unformat_ip4_address, &local.ip4))
436 if (unformat (input, "remote %U", unformat_ip4_address, &remote.ip4))
441 else if (unformat (input, "local %U", unformat_ip6_address, &local.ip6))
447 if (unformat (input, "remote %U", unformat_ip6_address, &remote.ip6))
452 else if (unformat (input, "vni %d", &vni))
454 else if (unformat (input, "disable"))
461 return clib_error_return (0, "tunnel local address not specified");
463 return clib_error_return (0, "tunnel remote address not specified");
464 if (ipv4_set && ipv6_set)
465 return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
467 && memcmp (&local.ip4, &remote.ip4,
468 sizeof (local.ip4)) == 0) || (ipv6_set
473 sizeof (local.ip6)) == 0))
474 return clib_error_return (0, "src and dst addresses are identical");
476 return clib_error_return (0, "vni not specified");
479 key4.local = local.ip4.as_u32;
480 key4.remote = remote.ip4.as_u32;
481 key4.vni = clib_host_to_net_u32 (vni << 8);
482 key4.port = clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE);
483 p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
487 key6.local.as_u64[0] = local.ip6.as_u64[0];
488 key6.local.as_u64[1] = local.ip6.as_u64[1];
489 key6.remote.as_u64[0] = remote.ip6.as_u64[0];
490 key6.remote.as_u64[1] = remote.ip6.as_u64[1];
491 key6.vni = clib_host_to_net_u32 (vni << 8);
492 key6.port = clib_host_to_net_u16 (UDP_DST_PORT_VXLAN6_GPE);
493 p = hash_get_mem (gm->vxlan6_gpe_tunnel_by_key, &key6);
497 return clib_error_return (0, "VxLAN Tunnel not found");
498 t = pool_elt_at_index (gm->tunnels, p[0]);
502 vxlan_gpe_ioam_set (t, hm->has_trace_option,
503 hm->has_pot_option, hm->has_ppc_option, ipv6_set);
507 rv = vxlan_gpe_ioam_clear (t, 0, 0, 0, 0);
514 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_rewrite_cmd, static) = {
515 .path = "set vxlan-gpe-ioam",
516 .short_help = "set vxlan-gpe-ioam vxlan <src-ip> <dst_ip> <vnid> [disable]",
517 .function = vxlan_gpe_set_ioam_rewrite_command_fn,
524 vxlan_gpe_ioam_enable (int has_trace_option,
525 int has_pot_option, int has_ppc_option)
527 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
528 hm->has_trace_option = has_trace_option;
529 hm->has_pot_option = has_pot_option;
530 hm->has_ppc_option = has_ppc_option;
531 if (hm->has_trace_option)
533 vxlan_gpe_trace_profile_setup ();
540 vxlan_gpe_ioam_disable (int
542 int has_pot_option, int has_ppc_option)
544 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
545 hm->has_trace_option = has_trace_option;
546 hm->has_pot_option = has_pot_option;
547 hm->has_ppc_option = has_ppc_option;
548 if (!hm->has_trace_option)
550 vxlan_gpe_trace_profile_cleanup ();
557 vxlan_gpe_set_next_override (uword next)
559 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
560 hm->decap_v4_next_override = next;
564 static clib_error_t *
565 vxlan_gpe_set_ioam_flags_command_fn (vlib_main_t * vm,
567 * input, vlib_cli_command_t * cmd)
569 int has_trace_option = 0;
570 int has_pot_option = 0;
571 int has_ppc_option = 0;
572 clib_error_t *rv = 0;
573 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
575 if (unformat (input, "trace"))
576 has_trace_option = 1;
577 else if (unformat (input, "pot"))
579 else if (unformat (input, "ppc encap"))
580 has_ppc_option = PPC_ENCAP;
581 else if (unformat (input, "ppc decap"))
582 has_ppc_option = PPC_DECAP;
583 else if (unformat (input, "ppc none"))
584 has_ppc_option = PPC_NONE;
591 vxlan_gpe_ioam_enable (has_trace_option, has_pot_option, has_ppc_option);
596 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_flags_cmd, static) =
598 .path = "set vxlan-gpe-ioam rewrite",
599 .short_help = "set vxlan-gpe-ioam [trace] [pot] [ppc <encap|decap>]",
600 .function = vxlan_gpe_set_ioam_flags_command_fn,};
604 int vxlan_gpe_ioam_disable_for_dest
605 (vlib_main_t * vm, ip46_address_t dst_addr, u32 outer_fib_index,
608 vxlan_gpe_ioam_dest_tunnels_t *t;
609 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
611 vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
612 dst_addr, outer_fib_index, ipv4_set,
614 if (pool_elts (hm->dst_tunnels) == 0)
616 vxlan_gpe_clear_output_feature_on_select_intfs ();
620 pool_foreach (t, hm->dst_tunnels)
622 vxlan_gpe_enable_disable_ioam_for_dest
626 (t->fp_proto == FIB_PROTOCOL_IP4), 1 /* is_add */ );
628 vxlan_gpe_clear_output_feature_on_select_intfs ();
633 static clib_error_t *vxlan_gpe_set_ioam_transit_rewrite_command_fn
634 (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
636 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
637 ip46_address_t dst_addr;
642 clib_error_t *rv = 0;
643 u32 outer_fib_index = 0;
644 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
646 if (unformat (input, "dst-ip %U", unformat_ip4_address, &dst_addr.ip4))
653 (input, "dst-ip %U", unformat_ip6_address, &dst_addr.ip6))
658 else if (unformat (input, "outer-fib-index %d", &outer_fib_index))
662 else if (unformat (input, "disable"))
668 if (dst_addr_set == 0)
669 return clib_error_return (0, "tunnel destination address not specified");
670 if (ipv4_set && ipv6_set)
671 return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
674 vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
675 dst_addr, outer_fib_index,
680 vxlan_gpe_ioam_disable_for_dest
681 (vm, dst_addr, outer_fib_index, ipv4_set);
687 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_transit_rewrite_cmd, static) = {
688 .path = "set vxlan-gpe-ioam-transit",
689 .short_help = "set vxlan-gpe-ioam-transit dst-ip <dst_ip> [outer-fib-index <outer_fib_index>] [disable]",
690 .function = vxlan_gpe_set_ioam_transit_rewrite_command_fn,
694 clib_error_t *clear_vxlan_gpe_ioam_rewrite_command_fn
695 (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
697 return (vxlan_gpe_ioam_disable (0, 0, 0));
701 VLIB_CLI_COMMAND (vxlan_gpe_clear_ioam_flags_cmd, static) =
703 .path = "clear vxlan-gpe-ioam rewrite",
704 .short_help = "clear vxlan-gpe-ioam rewrite",
705 .function = clear_vxlan_gpe_ioam_rewrite_command_fn,
711 * Function definition to backwalk a FIB node
713 static fib_node_back_walk_rc_t
714 vxlan_gpe_ioam_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
716 vxlan_gpe_refresh_output_feature_on_all_dest ();
717 return (FIB_NODE_BACK_WALK_CONTINUE);
721 * Function definition to get a FIB node from its index
724 vxlan_gpe_ioam_fib_node_get (fib_node_index_t index)
726 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
731 * Function definition to inform the FIB node that its last lock has gone.
734 vxlan_gpe_ioam_last_lock_gone (fib_node_t * node)
741 * Virtual function table registered by MPLS GRE tunnels
742 * for participation in the FIB object graph.
744 const static fib_node_vft_t vxlan_gpe_ioam_vft = {
745 .fnv_get = vxlan_gpe_ioam_fib_node_get,
746 .fnv_last_lock = vxlan_gpe_ioam_last_lock_gone,
747 .fnv_back_walk = vxlan_gpe_ioam_back_walk,
751 vxlan_gpe_ioam_interface_init (void)
753 vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
754 hm->fib_entry_type = fib_node_register_new_type (&vxlan_gpe_ioam_vft);
759 * fd.io coding-style-patch-verification: ON
762 * eval: (c-set-style "gnu")