8558c505a438b8ffab61e9ea06fbbe5bbbcf606a
[vpp.git] / src / plugins / ioam / lib-vxlan-gpe / vxlan_gpe_ioam.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vxlan-gpe/vxlan_gpe.h>
16 #include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
17 #include <vnet/ip/format.h>
18 #include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
19 #include <vnet/dpo/load_balance.h>
20 #include <vnet/fib/ip4_fib.h>
21 #include <vnet/fib/fib_entry.h>
22
23 vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main;
24
25 int
26 vxlan_gpe_ioam_set_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
27                             int has_pot_option, int has_ppc_option,
28                             u8 ipv6_set)
29 {
30   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
31   u32 size;
32   vxlan_gpe_ioam_hdr_t *vxlan_gpe_ioam_hdr;
33   u8 *current;
34   u8 trace_data_size = 0;
35   u8 pot_data_size = 0;
36
37   if (has_trace_option == 0 && has_pot_option == 0)
38     return -1;
39
40   /* Work out how much space we need */
41   size = sizeof (vxlan_gpe_ioam_hdr_t);
42
43   if (has_trace_option
44       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
45     {
46       size += sizeof (vxlan_gpe_ioam_option_t);
47       size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE];
48     }
49   if (has_pot_option
50       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
51     {
52       size += sizeof (vxlan_gpe_ioam_option_t);
53       size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
54     }
55
56   t->rewrite_size = size;
57
58   if (!ipv6_set)
59     {
60       vxlan4_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
61                           hm->encap_v4_next_node);
62       vxlan_gpe_ioam_hdr =
63         (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
64                                   sizeof (ip4_vxlan_gpe_header_t));
65     }
66   else
67     {
68       vxlan6_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
69                           VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
70       vxlan_gpe_ioam_hdr =
71         (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
72                                   sizeof (ip6_vxlan_gpe_header_t));
73     }
74
75
76   vxlan_gpe_ioam_hdr->type = VXLAN_GPE_PROTOCOL_IOAM;
77   /* Length of the header in octets */
78   vxlan_gpe_ioam_hdr->length = size;
79   vxlan_gpe_ioam_hdr->protocol = t->protocol;
80   current = (u8 *) vxlan_gpe_ioam_hdr + sizeof (vxlan_gpe_ioam_hdr_t);
81
82   if (has_trace_option
83       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
84     {
85       if (0 != hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] (current,
86                                                                   &trace_data_size))
87         return -1;
88       current += trace_data_size;
89     }
90   if (has_pot_option
91       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
92     {
93       pot_data_size =
94         hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
95       if (0 ==
96           hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT]
97           (current, &pot_data_size))
98         current += pot_data_size;
99     }
100
101   return 0;
102 }
103
104 int
105 vxlan_gpe_ioam_clear_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
106                               int has_pot_option, int has_ppc_option,
107                               u8 ipv6_set)
108 {
109
110   t->rewrite_size = 0;
111
112   if (!ipv6_set)
113     {
114       vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
115     }
116   else
117     {
118       vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
119     }
120
121
122   return 0;
123 }
124
125 clib_error_t *
126 vxlan_gpe_ioam_clear (vxlan_gpe_tunnel_t * t,
127                       int has_trace_option, int has_pot_option,
128                       int has_ppc_option, u8 ipv6_set)
129 {
130   int rv;
131   rv = vxlan_gpe_ioam_clear_rewrite (t, 0, 0, 0, 0);
132
133   if (rv == 0)
134     {
135       return (0);
136     }
137   else
138     {
139       return clib_error_return_code (0, rv, 0,
140                                      "vxlan_gpe_ioam_clear_rewrite returned %d",
141                                      rv);
142     }
143
144 }
145
146
147 clib_error_t *
148 vxlan_gpe_ioam_set (vxlan_gpe_tunnel_t * t,
149                     int has_trace_option, int has_pot_option,
150                     int has_ppc_option, u8 ipv6_set)
151 {
152   int rv;
153   rv = vxlan_gpe_ioam_set_rewrite (t, has_trace_option,
154                                    has_pot_option, has_ppc_option, ipv6_set);
155
156   if (rv == 0)
157     {
158       return (0);
159     }
160   else
161     {
162       return clib_error_return_code (0, rv, 0,
163                                      "vxlan_gpe_ioam_set_rewrite returned %d",
164                                      rv);
165     }
166
167 }
168
169 static void
170 vxlan_gpe_set_clear_output_feature_on_intf (vlib_main_t * vm,
171                                             u32 sw_if_index0, u8 is_add)
172 {
173
174
175
176   vnet_feature_enable_disable ("ip4-output", "vxlan-gpe-transit-ioam",
177                                sw_if_index0, is_add,
178                                0 /* void *feature_config */ ,
179                                0 /* u32 n_feature_config_bytes */ );
180   return;
181 }
182
183 void
184 vxlan_gpe_clear_output_feature_on_all_intfs (vlib_main_t * vm)
185 {
186   vnet_sw_interface_t *si = 0;
187   vnet_main_t *vnm = vnet_get_main ();
188   vnet_interface_main_t *im = &vnm->interface_main;
189
190   pool_foreach (si, im->sw_interfaces, (
191                                          {
192                                          vxlan_gpe_set_clear_output_feature_on_intf
193                                          (vm, si->sw_if_index, 0);
194                                          }));
195   return;
196 }
197
198
199 extern fib_forward_chain_type_t
200 fib_entry_get_default_chain_type (const fib_entry_t * fib_entry);
201
202 int
203 vxlan_gpe_enable_disable_ioam_for_dest (vlib_main_t * vm,
204                                         ip46_address_t dst_addr,
205                                         u32 outer_fib_index,
206                                         u8 is_ipv4, u8 is_add)
207 {
208   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
209   u32 fib_index0 = 0;
210   u32 sw_if_index0 = ~0;
211
212   fib_node_index_t fei = ~0;
213   fib_entry_t *fib_entry;
214   u32 adj_index0;
215   ip_adjacency_t *adj0;
216   fib_prefix_t fib_prefix;
217   //fib_forward_chain_type_t fct;
218   load_balance_t *lb_m, *lb_b;
219   const dpo_id_t *dpo0, *dpo1;
220   u32 i, j;
221   //vnet_hw_interface_t *hw;
222
223   if (is_ipv4)
224     {
225       memset (&fib_prefix, 0, sizeof (fib_prefix_t));
226       fib_prefix.fp_len = 32;
227       fib_prefix.fp_proto = FIB_PROTOCOL_IP4;
228       fib_prefix.fp_addr = dst_addr;
229     }
230   else
231     {
232       return 0;
233     }
234
235   fei = fib_table_lookup (fib_index0, &fib_prefix);
236   fib_entry = fib_entry_get (fei);
237
238   //fct = fib_entry_get_default_chain_type (fib_entry);
239
240   if (!dpo_id_is_valid (&fib_entry->fe_lb /*[fct] */ ))
241     {
242       return (-1);
243     }
244
245   lb_m = load_balance_get (fib_entry->fe_lb /*[fct] */ .dpoi_index);
246
247   for (i = 0; i < lb_m->lb_n_buckets; i++)
248     {
249       dpo0 = load_balance_get_bucket_i (lb_m, i);
250
251       if (dpo0->dpoi_type == DPO_LOAD_BALANCE)
252         {
253           lb_b = load_balance_get (dpo0->dpoi_index);
254
255           for (j = 0; j < lb_b->lb_n_buckets; j++)
256             {
257               dpo1 = load_balance_get_bucket_i (lb_b, j);
258
259               if (dpo1->dpoi_type == DPO_ADJACENCY)
260                 {
261                   adj_index0 = dpo1->dpoi_index;
262
263                   if (ADJ_INDEX_INVALID == adj_index0)
264                     {
265                       continue;
266                     }
267
268                   adj0 = adj_get (adj_index0);
269                   sw_if_index0 = adj0->rewrite_header.sw_if_index;
270
271                   if (~0 == sw_if_index0)
272                     {
273                       continue;
274                     }
275
276
277                   if (is_add)
278                     {
279                       vnet_feature_enable_disable ("ip4-output",
280                                                    "vxlan-gpe-transit-ioam",
281                                                    sw_if_index0, is_add, 0
282                                                    /* void *feature_config */
283                                                    , 0  /* u32 n_feature_config_bytes */
284                         );
285
286                       vec_validate_init_empty (hm->bool_ref_by_sw_if_index,
287                                                sw_if_index0, ~0);
288                       hm->bool_ref_by_sw_if_index[sw_if_index0] = 1;
289                     }
290                   else
291                     {
292                       hm->bool_ref_by_sw_if_index[sw_if_index0] = ~0;
293                     }
294                 }
295             }
296         }
297     }
298
299   if (is_ipv4)
300     {
301
302       uword *t = NULL;
303       vxlan_gpe_ioam_dest_tunnels_t *t1;
304       fib_prefix_t key4, *key4_copy;
305       hash_pair_t *hp;
306       memset (&key4, 0, sizeof (key4));
307       key4.fp_proto = FIB_PROTOCOL_IP4;
308       key4.fp_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
309       t = hash_get_mem (hm->dst_by_ip4, &key4);
310       if (is_add)
311         {
312           if (t)
313             {
314               return 0;
315             }
316           pool_get_aligned (hm->dst_tunnels, t1, CLIB_CACHE_LINE_BYTES);
317           memset (t1, 0, sizeof (*t1));
318           t1->fp_proto = FIB_PROTOCOL_IP4;
319           t1->dst_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
320           key4_copy = clib_mem_alloc (sizeof (*key4_copy));
321           clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
322           hash_set_mem (hm->dst_by_ip4, key4_copy, t1 - hm->dst_tunnels);
323           /*
324            * Attach to the FIB entry for the VxLAN-GPE destination
325            * and become its child. The dest route will invoke a callback
326            * when the fib entry changes, it can be used to
327            * re-program the output feature on the egress interface.
328            */
329
330           const fib_prefix_t tun_dst_pfx = {
331             .fp_len = 32,
332             .fp_proto = FIB_PROTOCOL_IP4,
333             .fp_addr = {.ip4 = t1->dst_addr.ip4,}
334           };
335
336           t1->fib_entry_index =
337             fib_table_entry_special_add (outer_fib_index,
338                                          &tun_dst_pfx,
339                                          FIB_SOURCE_RR, FIB_ENTRY_FLAG_NONE);
340           t1->sibling_index =
341             fib_entry_child_add (t1->fib_entry_index,
342                                  hm->fib_entry_type, t1 - hm->dst_tunnels);
343           t1->outer_fib_index = outer_fib_index;
344
345         }
346       else
347         {
348           if (!t)
349             {
350               return 0;
351             }
352           t1 = pool_elt_at_index (hm->dst_tunnels, t[0]);
353           hp = hash_get_pair (hm->dst_by_ip4, &key4);
354           key4_copy = (void *) (hp->key);
355           hash_unset_mem (hm->dst_by_ip4, &key4);
356           clib_mem_free (key4_copy);
357           pool_put (hm->dst_tunnels, t1);
358         }
359     }
360   else
361     {
362       // TBD for IPv6
363     }
364
365   return 0;
366 }
367
368 void
369 vxlan_gpe_refresh_output_feature_on_all_dest (void)
370 {
371   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
372   vxlan_gpe_ioam_dest_tunnels_t *t;
373   u32 i;
374   if (pool_elts (hm->dst_tunnels) == 0)
375     return;
376   vxlan_gpe_clear_output_feature_on_all_intfs (hm->vlib_main);
377   i = vec_len (hm->bool_ref_by_sw_if_index);
378   vec_free (hm->bool_ref_by_sw_if_index);
379   vec_validate_init_empty (hm->bool_ref_by_sw_if_index, i, ~0);
380   pool_foreach (t, hm->dst_tunnels, (
381                                       {
382                                       vxlan_gpe_enable_disable_ioam_for_dest
383                                       (hm->vlib_main,
384                                        t->dst_addr,
385                                        t->outer_fib_index,
386                                        (t->fp_proto == FIB_PROTOCOL_IP4), 1
387                                        /* is_add */
388                                       );
389                                       }
390                 ));
391   return;
392 }
393
394 void
395 vxlan_gpe_clear_output_feature_on_select_intfs (void)
396 {
397   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
398   u32 sw_if_index0 = 0;
399   for (sw_if_index0 = 0;
400        sw_if_index0 < vec_len (hm->bool_ref_by_sw_if_index); sw_if_index0++)
401     {
402       if (hm->bool_ref_by_sw_if_index[sw_if_index0] == 0xFF)
403         {
404           vxlan_gpe_set_clear_output_feature_on_intf
405             (hm->vlib_main, sw_if_index0, 0);
406         }
407     }
408
409   return;
410 }
411
412 static clib_error_t *
413 vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t *
414                                        vm,
415                                        unformat_input_t
416                                        * input, vlib_cli_command_t * cmd)
417 {
418   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
419   ip46_address_t local, remote;
420   u8 local_set = 0;
421   u8 remote_set = 0;
422   u8 ipv4_set = 0;
423   u8 ipv6_set = 0;
424   u32 vni;
425   u8 vni_set = 0;
426   u8 disable = 0;
427   clib_error_t *rv = 0;
428   vxlan4_gpe_tunnel_key_t key4;
429   vxlan6_gpe_tunnel_key_t key6;
430   uword *p;
431   vxlan_gpe_main_t *gm = &vxlan_gpe_main;
432   vxlan_gpe_tunnel_t *t = 0;
433   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
434     {
435       if (unformat (input, "local %U", unformat_ip4_address, &local.ip4))
436         {
437           local_set = 1;
438           ipv4_set = 1;
439         }
440       else
441         if (unformat (input, "remote %U", unformat_ip4_address, &remote.ip4))
442         {
443           remote_set = 1;
444           ipv4_set = 1;
445         }
446       else if (unformat (input, "local %U", unformat_ip6_address, &local.ip6))
447         {
448           local_set = 1;
449           ipv6_set = 1;
450         }
451       else
452         if (unformat (input, "remote %U", unformat_ip6_address, &remote.ip6))
453         {
454           remote_set = 1;
455           ipv6_set = 1;
456         }
457       else if (unformat (input, "vni %d", &vni))
458         vni_set = 1;
459       else if (unformat (input, "disable"))
460         disable = 1;
461       else
462         break;
463     }
464
465   if (local_set == 0)
466     return clib_error_return (0, "tunnel local address not specified");
467   if (remote_set == 0)
468     return clib_error_return (0, "tunnel remote address not specified");
469   if (ipv4_set && ipv6_set)
470     return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
471   if ((ipv4_set
472        && memcmp (&local.ip4, &remote.ip4,
473                   sizeof (local.ip4)) == 0) || (ipv6_set
474                                                 &&
475                                                 memcmp
476                                                 (&local.ip6,
477                                                  &remote.ip6,
478                                                  sizeof (local.ip6)) == 0))
479     return clib_error_return (0, "src and dst addresses are identical");
480   if (vni_set == 0)
481     return clib_error_return (0, "vni not specified");
482   if (!ipv6_set)
483     {
484       key4.local = local.ip4.as_u32;
485       key4.remote = remote.ip4.as_u32;
486       key4.vni = clib_host_to_net_u32 (vni << 8);
487       key4.pad = 0;
488       p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
489     }
490   else
491     {
492       key6.local.as_u64[0] = local.ip6.as_u64[0];
493       key6.local.as_u64[1] = local.ip6.as_u64[1];
494       key6.remote.as_u64[0] = remote.ip6.as_u64[0];
495       key6.remote.as_u64[1] = remote.ip6.as_u64[1];
496       key6.vni = clib_host_to_net_u32 (vni << 8);
497       p = hash_get_mem (gm->vxlan6_gpe_tunnel_by_key, &key6);
498     }
499
500   if (!p)
501     return clib_error_return (0, "VxLAN Tunnel not found");
502   t = pool_elt_at_index (gm->tunnels, p[0]);
503   if (!disable)
504     {
505       rv =
506         vxlan_gpe_ioam_set (t, hm->has_trace_option,
507                             hm->has_pot_option, hm->has_ppc_option, ipv6_set);
508     }
509   else
510     {
511       rv = vxlan_gpe_ioam_clear (t, 0, 0, 0, 0);
512     }
513   return rv;
514 }
515
516
517 /* *INDENT-OFF* */
518 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_rewrite_cmd, static) = {
519   .path = "set vxlan-gpe-ioam",
520   .short_help = "set vxlan-gpe-ioam vxlan <src-ip> <dst_ip> <vnid> [disable]",
521   .function = vxlan_gpe_set_ioam_rewrite_command_fn,
522 };
523 /* *INDENT-ON* */
524
525
526
527 clib_error_t *
528 vxlan_gpe_ioam_enable (int has_trace_option,
529                        int has_pot_option, int has_ppc_option)
530 {
531   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
532   hm->has_trace_option = has_trace_option;
533   hm->has_pot_option = has_pot_option;
534   hm->has_ppc_option = has_ppc_option;
535   if (hm->has_trace_option)
536     {
537       vxlan_gpe_trace_profile_setup ();
538     }
539
540   return 0;
541 }
542
543 clib_error_t *
544 vxlan_gpe_ioam_disable (int
545                         has_trace_option,
546                         int has_pot_option, int has_ppc_option)
547 {
548   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
549   hm->has_trace_option = has_trace_option;
550   hm->has_pot_option = has_pot_option;
551   hm->has_ppc_option = has_ppc_option;
552   if (!hm->has_trace_option)
553     {
554       vxlan_gpe_trace_profile_cleanup ();
555     }
556
557   return 0;
558 }
559
560 void
561 vxlan_gpe_set_next_override (uword next)
562 {
563   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
564   hm->decap_v4_next_override = next;
565   return;
566 }
567
568 static clib_error_t *
569 vxlan_gpe_set_ioam_flags_command_fn (vlib_main_t * vm,
570                                      unformat_input_t
571                                      * input, vlib_cli_command_t * cmd)
572 {
573   int has_trace_option = 0;
574   int has_pot_option = 0;
575   int has_ppc_option = 0;
576   clib_error_t *rv = 0;
577   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
578     {
579       if (unformat (input, "trace"))
580         has_trace_option = 1;
581       else if (unformat (input, "pot"))
582         has_pot_option = 1;
583       else if (unformat (input, "ppc encap"))
584         has_ppc_option = PPC_ENCAP;
585       else if (unformat (input, "ppc decap"))
586         has_ppc_option = PPC_DECAP;
587       else if (unformat (input, "ppc none"))
588         has_ppc_option = PPC_NONE;
589       else
590         break;
591     }
592
593
594   rv =
595     vxlan_gpe_ioam_enable (has_trace_option, has_pot_option, has_ppc_option);
596   return rv;
597 }
598
599 /* *INDENT-OFF* */
600 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_flags_cmd, static) =
601 {
602 .path = "set vxlan-gpe-ioam rewrite",
603 .short_help = "set vxlan-gpe-ioam [trace] [pot] [ppc <encap|decap>]",
604 .function = vxlan_gpe_set_ioam_flags_command_fn,};
605 /* *INDENT-ON* */
606
607
608 int vxlan_gpe_ioam_disable_for_dest
609   (vlib_main_t * vm, ip46_address_t dst_addr, u32 outer_fib_index,
610    u8 ipv4_set)
611 {
612   vxlan_gpe_ioam_dest_tunnels_t *t;
613   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
614
615   vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
616                                           dst_addr, outer_fib_index, ipv4_set,
617                                           0);
618   if (pool_elts (hm->dst_tunnels) == 0)
619     {
620       vxlan_gpe_clear_output_feature_on_select_intfs ();
621       return 0;
622     }
623
624   pool_foreach (t, hm->dst_tunnels, (
625                                       {
626                                       vxlan_gpe_enable_disable_ioam_for_dest
627                                       (hm->vlib_main,
628                                        t->dst_addr,
629                                        t->outer_fib_index,
630                                        (t->fp_proto ==
631                                         FIB_PROTOCOL_IP4), 1 /* is_add */ );
632                                       }
633                 ));
634   vxlan_gpe_clear_output_feature_on_select_intfs ();
635   return (0);
636
637 }
638
639 static clib_error_t *vxlan_gpe_set_ioam_transit_rewrite_command_fn
640   (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
641 {
642   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
643   ip46_address_t dst_addr;
644   u8 dst_addr_set = 0;
645   u8 ipv4_set = 0;
646   u8 ipv6_set = 0;
647   u8 disable = 0;
648   clib_error_t *rv = 0;
649   u32 outer_fib_index = 0;
650   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
651     {
652       if (unformat (input, "dst-ip %U", unformat_ip4_address, &dst_addr.ip4))
653         {
654           dst_addr_set = 1;
655           ipv4_set = 1;
656         }
657       else
658         if (unformat
659             (input, "dst-ip %U", unformat_ip6_address, &dst_addr.ip6))
660         {
661           dst_addr_set = 1;
662           ipv6_set = 1;
663         }
664       else if (unformat (input, "outer-fib-index %d", &outer_fib_index))
665         {
666         }
667
668       else if (unformat (input, "disable"))
669         disable = 1;
670       else
671         break;
672     }
673
674   if (dst_addr_set == 0)
675     return clib_error_return (0, "tunnel destination address not specified");
676   if (ipv4_set && ipv6_set)
677     return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
678   if (!disable)
679     {
680       vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
681                                               dst_addr, outer_fib_index,
682                                               ipv4_set, 1);
683     }
684   else
685     {
686       vxlan_gpe_ioam_disable_for_dest
687         (vm, dst_addr, outer_fib_index, ipv4_set);
688     }
689   return rv;
690 }
691
692        /* *INDENT-OFF* */
693 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_transit_rewrite_cmd, static) = {
694   .path = "set vxlan-gpe-ioam-transit",
695   .short_help = "set vxlan-gpe-ioam-transit dst-ip <dst_ip> [outer-fib-index <outer_fib_index>] [disable]",
696   .function = vxlan_gpe_set_ioam_transit_rewrite_command_fn,
697 };
698 /* *INDENT-ON* */
699
700 clib_error_t *clear_vxlan_gpe_ioam_rewrite_command_fn
701   (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
702 {
703   return (vxlan_gpe_ioam_disable (0, 0, 0));
704 }
705
706 /* *INDENT-OFF* */
707 VLIB_CLI_COMMAND (vxlan_gpe_clear_ioam_flags_cmd, static) =
708 {
709 .path = "clear vxlan-gpe-ioam rewrite",
710 .short_help = "clear vxlan-gpe-ioam rewrite",
711 .function = clear_vxlan_gpe_ioam_rewrite_command_fn,
712 };
713 /* *INDENT-ON* */
714
715
716 /**
717  * Function definition to backwalk a FIB node
718  */
719 static fib_node_back_walk_rc_t
720 vxlan_gpe_ioam_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
721 {
722   vxlan_gpe_refresh_output_feature_on_all_dest ();
723   return (FIB_NODE_BACK_WALK_CONTINUE);
724 }
725
726 /**
727  * Function definition to get a FIB node from its index
728  */
729 static fib_node_t *
730 vxlan_gpe_ioam_fib_node_get (fib_node_index_t index)
731 {
732   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
733   return (&hm->node);
734 }
735
736 /**
737  * Function definition to inform the FIB node that its last lock has gone.
738  */
739 static void
740 vxlan_gpe_ioam_last_lock_gone (fib_node_t * node)
741 {
742   ASSERT (0);
743 }
744
745
746 /*
747  * Virtual function table registered by MPLS GRE tunnels
748  * for participation in the FIB object graph.
749  */
750 const static fib_node_vft_t vxlan_gpe_ioam_vft = {
751   .fnv_get = vxlan_gpe_ioam_fib_node_get,
752   .fnv_last_lock = vxlan_gpe_ioam_last_lock_gone,
753   .fnv_back_walk = vxlan_gpe_ioam_back_walk,
754 };
755
756 void
757 vxlan_gpe_ioam_interface_init (void)
758 {
759   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
760   hm->fib_entry_type = fib_node_register_new_type (&vxlan_gpe_ioam_vft);
761   return;
762 }
763
764 /*
765  * fd.io coding-style-patch-verification: ON
766  *
767  * Local Variables:
768  * eval: (c-set-style "gnu")
769  * End:
770  */