Repair Doxygen build infrastructure
[vpp.git] / plugins / ioam-plugin / ioam / lib-vxlan-gpe / vxlan_gpe_ioam.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vxlan-gpe/vxlan_gpe.h>
16 #include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
17 #include <vnet/ip/format.h>
18 #include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
19 #include <vnet/fib/ip6_fib.h>
20 #include <vnet/fib/ip4_fib.h>
21 #include <vnet/fib/fib_entry.h>
22
23 vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main;
24
25 int
26 vxlan_gpe_ioam_set_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
27                             int has_pot_option, int has_ppc_option,
28                             u8 ipv6_set)
29 {
30   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
31   u32 size;
32   vxlan_gpe_ioam_hdr_t *vxlan_gpe_ioam_hdr;
33   u8 *current;
34   u8 trace_data_size = 0;
35   u8 pot_data_size = 0;
36
37   if (has_trace_option == 0 && has_pot_option == 0)
38     return -1;
39
40   /* Work out how much space we need */
41   size = sizeof (vxlan_gpe_ioam_hdr_t);
42
43   if (has_trace_option
44       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
45     {
46       size += sizeof (vxlan_gpe_ioam_option_t);
47       size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE];
48     }
49   if (has_pot_option
50       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
51     {
52       size += sizeof (vxlan_gpe_ioam_option_t);
53       size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
54     }
55
56   t->rewrite_size = size;
57
58   if (!ipv6_set)
59     {
60       vxlan4_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
61                           hm->encap_v4_next_node);
62       vxlan_gpe_ioam_hdr =
63         (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
64                                   sizeof (ip4_vxlan_gpe_header_t));
65     }
66   else
67     {
68       vxlan6_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
69                           VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
70       vxlan_gpe_ioam_hdr =
71         (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
72                                   sizeof (ip6_vxlan_gpe_header_t));
73     }
74
75
76   vxlan_gpe_ioam_hdr->type = VXLAN_GPE_PROTOCOL_IOAM;
77   /* Length of the header in octets */
78   vxlan_gpe_ioam_hdr->length = size;
79   vxlan_gpe_ioam_hdr->protocol = t->protocol;
80   current = (u8 *) vxlan_gpe_ioam_hdr + sizeof (vxlan_gpe_ioam_hdr_t);
81
82   if (has_trace_option
83       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
84     {
85       if (0 != hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] (current,
86                                                                   &trace_data_size))
87         return -1;
88       current += trace_data_size;
89     }
90   if (has_pot_option
91       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
92     {
93       pot_data_size =
94         hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
95       if (0 ==
96           hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT]
97           (current, &pot_data_size))
98         current += pot_data_size;
99     }
100
101   return 0;
102 }
103
104 int
105 vxlan_gpe_ioam_clear_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
106                               int has_pot_option, int has_ppc_option,
107                               u8 ipv6_set)
108 {
109
110   t->rewrite_size = 0;
111
112   if (!ipv6_set)
113     {
114       vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
115     }
116   else
117     {
118       vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
119     }
120
121
122   return 0;
123 }
124
125 clib_error_t *
126 vxlan_gpe_ioam_clear (vxlan_gpe_tunnel_t * t,
127                       int has_trace_option, int has_pot_option,
128                       int has_ppc_option, u8 ipv6_set)
129 {
130   int rv;
131   rv = vxlan_gpe_ioam_clear_rewrite (t, 0, 0, 0, 0);
132
133   if (rv == 0)
134     {
135       return (0);
136     }
137   else
138     {
139       return clib_error_return_code (0, rv, 0,
140                                      "vxlan_gpe_ioam_clear_rewrite returned %d",
141                                      rv);
142     }
143
144 }
145
146
147 clib_error_t *
148 vxlan_gpe_ioam_set (vxlan_gpe_tunnel_t * t,
149                     int has_trace_option, int has_pot_option,
150                     int has_ppc_option, u8 ipv6_set)
151 {
152   int rv;
153   rv = vxlan_gpe_ioam_set_rewrite (t, has_trace_option,
154                                    has_pot_option, has_ppc_option, ipv6_set);
155
156   if (rv == 0)
157     {
158       return (0);
159     }
160   else
161     {
162       return clib_error_return_code (0, rv, 0,
163                                      "vxlan_gpe_ioam_set_rewrite returned %d",
164                                      rv);
165     }
166
167 }
168
169 static void
170 vxlan_gpe_set_clear_output_feature_on_intf (vlib_main_t * vm,
171                                             u32 sw_if_index0, u8 is_add)
172 {
173
174
175
176   vnet_feature_enable_disable ("ip4-output", "vxlan-gpe-transit-ioam",
177                                sw_if_index0, is_add,
178                                0 /* void *feature_config */ ,
179                                0 /* u32 n_feature_config_bytes */ );
180   return;
181 }
182
183 void
184 vxlan_gpe_clear_output_feature_on_all_intfs (vlib_main_t * vm)
185 {
186   vnet_sw_interface_t *si = 0;
187   vnet_main_t *vnm = vnet_get_main ();
188   vnet_interface_main_t *im = &vnm->interface_main;
189
190   pool_foreach (si, im->sw_interfaces, (
191                                          {
192                                          vxlan_gpe_set_clear_output_feature_on_intf
193                                          (vm, si->sw_if_index, 0);
194                                          }));
195   return;
196 }
197
198
199 extern fib_forward_chain_type_t
200 fib_entry_get_default_chain_type (const fib_entry_t * fib_entry);
201
202 int
203 vxlan_gpe_enable_disable_ioam_for_dest (vlib_main_t * vm,
204                                         ip46_address_t dst_addr,
205                                         u32 outer_fib_index,
206                                         u8 is_ipv4, u8 is_add)
207 {
208   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
209   u32 fib_index0 = 0;
210   u32 sw_if_index0 = ~0;
211
212   fib_node_index_t fei = ~0;
213   fib_entry_t *fib_entry;
214   u32 adj_index0;
215   ip_adjacency_t *adj0;
216   fib_prefix_t fib_prefix;
217   //fib_forward_chain_type_t fct;
218   load_balance_t *lb_m, *lb_b;
219   const dpo_id_t *dpo0, *dpo1;
220   u32 i, j;
221   //vnet_hw_interface_t *hw;
222
223   if (is_ipv4)
224     {
225       memset (&fib_prefix, 0, sizeof (fib_prefix_t));
226       fib_prefix.fp_len = 32;
227       fib_prefix.fp_proto = FIB_PROTOCOL_IP4;
228       fib_prefix.fp_addr = dst_addr;
229     }
230   else
231     {
232       return 0;
233     }
234
235   fei = fib_table_lookup (fib_index0, &fib_prefix);
236   fib_entry = fib_entry_get (fei);
237
238   //fct = fib_entry_get_default_chain_type (fib_entry);
239
240   if (!dpo_id_is_valid (&fib_entry->fe_lb /*[fct] */ ))
241     {
242       return (-1);
243     }
244
245   lb_m = load_balance_get (fib_entry->fe_lb /*[fct] */ .dpoi_index);
246
247   for (i = 0; i < lb_m->lb_n_buckets; i++)
248     {
249       dpo0 = load_balance_get_bucket_i (lb_m, i);
250
251       if (dpo0->dpoi_type == DPO_LOAD_BALANCE)
252         {
253           lb_b = load_balance_get (dpo0->dpoi_index);
254
255           for (j = 0; j < lb_b->lb_n_buckets; j++)
256             {
257               dpo1 = load_balance_get_bucket_i (lb_b, j);
258
259               if (dpo1->dpoi_type == DPO_ADJACENCY)
260                 {
261                   adj_index0 = dpo1->dpoi_index;
262
263                   if (ADJ_INDEX_INVALID == adj_index0)
264                     {
265                       continue;
266                     }
267
268                   adj0 =
269                     ip_get_adjacency (&(ip4_main.lookup_main), adj_index0);
270                   sw_if_index0 = adj0->rewrite_header.sw_if_index;
271
272                   if (~0 == sw_if_index0)
273                     {
274                       continue;
275                     }
276
277
278                   if (is_add)
279                     {
280                       vnet_feature_enable_disable ("ip4-output",
281                                                    "vxlan-gpe-transit-ioam",
282                                                    sw_if_index0, is_add, 0
283                                                    /* void *feature_config */
284                                                    , 0  /* u32 n_feature_config_bytes */
285                         );
286
287                       vec_validate_init_empty (hm->bool_ref_by_sw_if_index,
288                                                sw_if_index0, ~0);
289                       hm->bool_ref_by_sw_if_index[sw_if_index0] = 1;
290                     }
291                   else
292                     {
293                       hm->bool_ref_by_sw_if_index[sw_if_index0] = ~0;
294                     }
295                 }
296             }
297         }
298     }
299
300   if (is_ipv4)
301     {
302
303       uword *t = NULL;
304       vxlan_gpe_ioam_dest_tunnels_t *t1;
305       fib_prefix_t key4, *key4_copy;
306       hash_pair_t *hp;
307       memset (&key4, 0, sizeof (key4));
308       key4.fp_proto = FIB_PROTOCOL_IP4;
309       key4.fp_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
310       t = hash_get_mem (hm->dst_by_ip4, &key4);
311       if (is_add)
312         {
313           if (t)
314             {
315               return 0;
316             }
317           pool_get_aligned (hm->dst_tunnels, t1, CLIB_CACHE_LINE_BYTES);
318           memset (t1, 0, sizeof (*t1));
319           t1->fp_proto = FIB_PROTOCOL_IP4;
320           t1->dst_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
321           key4_copy = clib_mem_alloc (sizeof (*key4_copy));
322           clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
323           hash_set_mem (hm->dst_by_ip4, key4_copy, t1 - hm->dst_tunnels);
324           /*
325            * Attach to the FIB entry for the VxLAN-GPE destination
326            * and become its child. The dest route will invoke a callback
327            * when the fib entry changes, it can be used to
328            * re-program the output feature on the egress interface.
329            */
330
331           const fib_prefix_t tun_dst_pfx = {
332             .fp_len = 32,
333             .fp_proto = FIB_PROTOCOL_IP4,
334             .fp_addr = {.ip4 = t1->dst_addr.ip4,}
335           };
336
337           t1->fib_entry_index =
338             fib_table_entry_special_add (outer_fib_index,
339                                          &tun_dst_pfx,
340                                          FIB_SOURCE_RR,
341                                          FIB_ENTRY_FLAG_NONE,
342                                          ADJ_INDEX_INVALID);
343           t1->sibling_index =
344             fib_entry_child_add (t1->fib_entry_index,
345                                  hm->fib_entry_type, t1 - hm->dst_tunnels);
346           t1->outer_fib_index = outer_fib_index;
347
348         }
349       else
350         {
351           if (!t)
352             {
353               return 0;
354             }
355           t1 = pool_elt_at_index (hm->dst_tunnels, t[0]);
356           hp = hash_get_pair (hm->dst_by_ip4, &key4);
357           key4_copy = (void *) (hp->key);
358           hash_unset_mem (hm->dst_by_ip4, &key4);
359           clib_mem_free (key4_copy);
360           pool_put (hm->dst_tunnels, t1);
361         }
362     }
363   else
364     {
365       // TBD for IPv6
366     }
367
368   return 0;
369 }
370
371 void
372 vxlan_gpe_refresh_output_feature_on_all_dest (void)
373 {
374   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
375   vxlan_gpe_ioam_dest_tunnels_t *t;
376   u32 i;
377   if (pool_elts (hm->dst_tunnels) == 0)
378     return;
379   vxlan_gpe_clear_output_feature_on_all_intfs (hm->vlib_main);
380   i = vec_len (hm->bool_ref_by_sw_if_index);
381   vec_free (hm->bool_ref_by_sw_if_index);
382   vec_validate_init_empty (hm->bool_ref_by_sw_if_index, i, ~0);
383   pool_foreach (t, hm->dst_tunnels, (
384                                       {
385                                       vxlan_gpe_enable_disable_ioam_for_dest
386                                       (hm->vlib_main,
387                                        t->dst_addr,
388                                        t->outer_fib_index,
389                                        (t->fp_proto == FIB_PROTOCOL_IP4), 1
390                                        /* is_add */
391                                       );
392                                       }
393                 ));
394   return;
395 }
396
397 void
398 vxlan_gpe_clear_output_feature_on_select_intfs (void)
399 {
400   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
401   u32 sw_if_index0 = 0;
402   for (sw_if_index0 = 0;
403        sw_if_index0 < vec_len (hm->bool_ref_by_sw_if_index); sw_if_index0++)
404     {
405       if (hm->bool_ref_by_sw_if_index[sw_if_index0] == 0xFF)
406         {
407           vxlan_gpe_set_clear_output_feature_on_intf
408             (hm->vlib_main, sw_if_index0, 0);
409         }
410     }
411
412   return;
413 }
414
415 static clib_error_t *
416 vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t *
417                                        vm,
418                                        unformat_input_t
419                                        * input, vlib_cli_command_t * cmd)
420 {
421   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
422   ip46_address_t local, remote;
423   u8 local_set = 0;
424   u8 remote_set = 0;
425   u8 ipv4_set = 0;
426   u8 ipv6_set = 0;
427   u32 vni;
428   u8 vni_set = 0;
429   u8 disable = 0;
430   clib_error_t *rv = 0;
431   vxlan4_gpe_tunnel_key_t key4;
432   vxlan6_gpe_tunnel_key_t key6;
433   uword *p;
434   vxlan_gpe_main_t *gm = &vxlan_gpe_main;
435   vxlan_gpe_tunnel_t *t = 0;
436   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
437     {
438       if (unformat (input, "local %U", unformat_ip4_address, &local.ip4))
439         {
440           local_set = 1;
441           ipv4_set = 1;
442         }
443       else
444         if (unformat (input, "remote %U", unformat_ip4_address, &remote.ip4))
445         {
446           remote_set = 1;
447           ipv4_set = 1;
448         }
449       else if (unformat (input, "local %U", unformat_ip6_address, &local.ip6))
450         {
451           local_set = 1;
452           ipv6_set = 1;
453         }
454       else
455         if (unformat (input, "remote %U", unformat_ip6_address, &remote.ip6))
456         {
457           remote_set = 1;
458           ipv6_set = 1;
459         }
460       else if (unformat (input, "vni %d", &vni))
461         vni_set = 1;
462       else if (unformat (input, "disable"))
463         disable = 1;
464       else
465         break;
466     }
467
468   if (local_set == 0)
469     return clib_error_return (0, "tunnel local address not specified");
470   if (remote_set == 0)
471     return clib_error_return (0, "tunnel remote address not specified");
472   if (ipv4_set && ipv6_set)
473     return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
474   if ((ipv4_set
475        && memcmp (&local.ip4, &remote.ip4,
476                   sizeof (local.ip4)) == 0) || (ipv6_set
477                                                 &&
478                                                 memcmp
479                                                 (&local.ip6,
480                                                  &remote.ip6,
481                                                  sizeof (local.ip6)) == 0))
482     return clib_error_return (0, "src and dst addresses are identical");
483   if (vni_set == 0)
484     return clib_error_return (0, "vni not specified");
485   if (!ipv6_set)
486     {
487       key4.local = local.ip4.as_u32;
488       key4.remote = remote.ip4.as_u32;
489       key4.vni = clib_host_to_net_u32 (vni << 8);
490       key4.pad = 0;
491       p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
492     }
493   else
494     {
495       key6.local.as_u64[0] = local.ip6.as_u64[0];
496       key6.local.as_u64[1] = local.ip6.as_u64[1];
497       key6.remote.as_u64[0] = remote.ip6.as_u64[0];
498       key6.remote.as_u64[1] = remote.ip6.as_u64[1];
499       key6.vni = clib_host_to_net_u32 (vni << 8);
500       p = hash_get_mem (gm->vxlan6_gpe_tunnel_by_key, &key6);
501     }
502
503   if (!p)
504     return clib_error_return (0, "VxLAN Tunnel not found");
505   t = pool_elt_at_index (gm->tunnels, p[0]);
506   if (!disable)
507     {
508       rv =
509         vxlan_gpe_ioam_set (t, hm->has_trace_option,
510                             hm->has_pot_option, hm->has_ppc_option, ipv6_set);
511     }
512   else
513     {
514       rv = vxlan_gpe_ioam_clear (t, 0, 0, 0, 0);
515     }
516   return rv;
517 }
518
519
520 /* *INDENT-OFF* */
521 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_rewrite_cmd, static) = {
522   .path = "set vxlan-gpe-ioam",
523   .short_help = "set vxlan-gpe-ioam vxlan <src-ip> <dst_ip> <vnid> [disable]",
524   .function = vxlan_gpe_set_ioam_rewrite_command_fn,
525 };
526 /* *INDENT-ON* */
527
528
529
530 clib_error_t *
531 vxlan_gpe_ioam_enable (int has_trace_option,
532                        int has_pot_option, int has_ppc_option)
533 {
534   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
535   hm->has_trace_option = has_trace_option;
536   hm->has_pot_option = has_pot_option;
537   hm->has_ppc_option = has_ppc_option;
538   if (hm->has_trace_option)
539     {
540       vxlan_gpe_trace_profile_setup ();
541     }
542
543   return 0;
544 }
545
546 clib_error_t *
547 vxlan_gpe_ioam_disable (int
548                         has_trace_option,
549                         int has_pot_option, int has_ppc_option)
550 {
551   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
552   hm->has_trace_option = has_trace_option;
553   hm->has_pot_option = has_pot_option;
554   hm->has_ppc_option = has_ppc_option;
555   if (!hm->has_trace_option)
556     {
557       vxlan_gpe_trace_profile_cleanup ();
558     }
559
560   return 0;
561 }
562
563 void
564 vxlan_gpe_set_next_override (uword next)
565 {
566   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
567   hm->decap_v4_next_override = next;
568   return;
569 }
570
571 static clib_error_t *
572 vxlan_gpe_set_ioam_flags_command_fn (vlib_main_t * vm,
573                                      unformat_input_t
574                                      * input, vlib_cli_command_t * cmd)
575 {
576   int has_trace_option = 0;
577   int has_pot_option = 0;
578   int has_ppc_option = 0;
579   clib_error_t *rv = 0;
580   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
581     {
582       if (unformat (input, "trace"))
583         has_trace_option = 1;
584       else if (unformat (input, "pot"))
585         has_pot_option = 1;
586       else if (unformat (input, "ppc encap"))
587         has_ppc_option = PPC_ENCAP;
588       else if (unformat (input, "ppc decap"))
589         has_ppc_option = PPC_DECAP;
590       else if (unformat (input, "ppc none"))
591         has_ppc_option = PPC_NONE;
592       else
593         break;
594     }
595
596
597   rv =
598     vxlan_gpe_ioam_enable (has_trace_option, has_pot_option, has_ppc_option);
599   return rv;
600 }
601
602 /* *INDENT-OFF* */
603 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_flags_cmd, static) =
604 {
605 .path = "set vxlan-gpe-ioam rewrite",
606 .short_help = "set vxlan-gpe-ioam [trace] [pot] [ppc <encap|decap>]",
607 .function = vxlan_gpe_set_ioam_flags_command_fn,};
608 /* *INDENT-ON* */
609
610
611 int vxlan_gpe_ioam_disable_for_dest
612   (vlib_main_t * vm, ip46_address_t dst_addr, u32 outer_fib_index,
613    u8 ipv4_set)
614 {
615   vxlan_gpe_ioam_dest_tunnels_t *t;
616   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
617
618   vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
619                                           dst_addr, outer_fib_index, ipv4_set,
620                                           0);
621   if (pool_elts (hm->dst_tunnels) == 0)
622     {
623       vxlan_gpe_clear_output_feature_on_select_intfs ();
624       return 0;
625     }
626
627   pool_foreach (t, hm->dst_tunnels, (
628                                       {
629                                       vxlan_gpe_enable_disable_ioam_for_dest
630                                       (hm->vlib_main,
631                                        t->dst_addr,
632                                        t->outer_fib_index,
633                                        (t->fp_proto ==
634                                         FIB_PROTOCOL_IP4), 1 /* is_add */ );
635                                       }
636                 ));
637   vxlan_gpe_clear_output_feature_on_select_intfs ();
638   return (0);
639
640 }
641
642 static clib_error_t *vxlan_gpe_set_ioam_transit_rewrite_command_fn
643   (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
644 {
645   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
646   ip46_address_t dst_addr;
647   u8 dst_addr_set = 0;
648   u8 ipv4_set = 0;
649   u8 ipv6_set = 0;
650   u8 disable = 0;
651   clib_error_t *rv = 0;
652   u32 outer_fib_index = 0;
653   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
654     {
655       if (unformat (input, "dst-ip %U", unformat_ip4_address, &dst_addr.ip4))
656         {
657           dst_addr_set = 1;
658           ipv4_set = 1;
659         }
660       else
661         if (unformat
662             (input, "dst-ip %U", unformat_ip6_address, &dst_addr.ip6))
663         {
664           dst_addr_set = 1;
665           ipv6_set = 1;
666         }
667       else if (unformat (input, "outer-fib-index %d", &outer_fib_index))
668         {
669         }
670
671       else if (unformat (input, "disable"))
672         disable = 1;
673       else
674         break;
675     }
676
677   if (dst_addr_set == 0)
678     return clib_error_return (0, "tunnel destination address not specified");
679   if (ipv4_set && ipv6_set)
680     return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
681   if (!disable)
682     {
683       vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
684                                               dst_addr, outer_fib_index,
685                                               ipv4_set, 1);
686     }
687   else
688     {
689       vxlan_gpe_ioam_disable_for_dest
690         (vm, dst_addr, outer_fib_index, ipv4_set);
691     }
692   return rv;
693 }
694
695        /* *INDENT-OFF* */
696 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_transit_rewrite_cmd, static) = {
697   .path = "set vxlan-gpe-ioam-transit",
698   .short_help = "set vxlan-gpe-ioam-transit dst-ip <dst_ip> [outer-fib-index <outer_fib_index>] [disable]",
699   .function = vxlan_gpe_set_ioam_transit_rewrite_command_fn,
700 };
701 /* *INDENT-ON* */
702
703 clib_error_t *clear_vxlan_gpe_ioam_rewrite_command_fn
704   (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
705 {
706   return (vxlan_gpe_ioam_disable (0, 0, 0));
707 }
708
709 /* *INDENT-OFF* */
710 VLIB_CLI_COMMAND (vxlan_gpe_clear_ioam_flags_cmd, static) =
711 {
712 .path = "clear vxlan-gpe-ioam rewrite",
713 .short_help = "clear vxlan-gpe-ioam rewrite",
714 .function = clear_vxlan_gpe_ioam_rewrite_command_fn,
715 };
716 /* *INDENT-ON* */
717
718
719 /**
720  * Function definition to backwalk a FIB node
721  */
722 static fib_node_back_walk_rc_t
723 vxlan_gpe_ioam_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
724 {
725   vxlan_gpe_refresh_output_feature_on_all_dest ();
726   return (FIB_NODE_BACK_WALK_CONTINUE);
727 }
728
729 /**
730  * Function definition to get a FIB node from its index
731  */
732 static fib_node_t *
733 vxlan_gpe_ioam_fib_node_get (fib_node_index_t index)
734 {
735   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
736   return (&hm->node);
737 }
738
739 /**
740  * Function definition to inform the FIB node that its last lock has gone.
741  */
742 static void
743 vxlan_gpe_ioam_last_lock_gone (fib_node_t * node)
744 {
745   ASSERT (0);
746 }
747
748
749 /*
750  * Virtual function table registered by MPLS GRE tunnels
751  * for participation in the FIB object graph.
752  */
753 const static fib_node_vft_t vxlan_gpe_ioam_vft = {
754   .fnv_get = vxlan_gpe_ioam_fib_node_get,
755   .fnv_last_lock = vxlan_gpe_ioam_last_lock_gone,
756   .fnv_back_walk = vxlan_gpe_ioam_back_walk,
757 };
758
759 void
760 vxlan_gpe_ioam_interface_init (void)
761 {
762   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
763   hm->fib_entry_type = fib_node_register_new_type (&vxlan_gpe_ioam_vft);
764   return;
765 }
766
767 /*
768  * fd.io coding-style-patch-verification: ON
769  *
770  * Local Variables:
771  * eval: (c-set-style "gnu")
772  * End:
773  */