Remove usued, redundant and deprecated code from lookup.h
[vpp.git] / src / plugins / ioam / lib-vxlan-gpe / vxlan_gpe_ioam.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vxlan-gpe/vxlan_gpe.h>
16 #include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
17 #include <vnet/ip/format.h>
18 #include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
19 #include <vnet/fib/ip6_fib.h>
20 #include <vnet/fib/ip4_fib.h>
21 #include <vnet/fib/fib_entry.h>
22
23 vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main;
24
25 int
26 vxlan_gpe_ioam_set_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
27                             int has_pot_option, int has_ppc_option,
28                             u8 ipv6_set)
29 {
30   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
31   u32 size;
32   vxlan_gpe_ioam_hdr_t *vxlan_gpe_ioam_hdr;
33   u8 *current;
34   u8 trace_data_size = 0;
35   u8 pot_data_size = 0;
36
37   if (has_trace_option == 0 && has_pot_option == 0)
38     return -1;
39
40   /* Work out how much space we need */
41   size = sizeof (vxlan_gpe_ioam_hdr_t);
42
43   if (has_trace_option
44       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
45     {
46       size += sizeof (vxlan_gpe_ioam_option_t);
47       size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE];
48     }
49   if (has_pot_option
50       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
51     {
52       size += sizeof (vxlan_gpe_ioam_option_t);
53       size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
54     }
55
56   t->rewrite_size = size;
57
58   if (!ipv6_set)
59     {
60       vxlan4_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
61                           hm->encap_v4_next_node);
62       vxlan_gpe_ioam_hdr =
63         (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
64                                   sizeof (ip4_vxlan_gpe_header_t));
65     }
66   else
67     {
68       vxlan6_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
69                           VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
70       vxlan_gpe_ioam_hdr =
71         (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
72                                   sizeof (ip6_vxlan_gpe_header_t));
73     }
74
75
76   vxlan_gpe_ioam_hdr->type = VXLAN_GPE_PROTOCOL_IOAM;
77   /* Length of the header in octets */
78   vxlan_gpe_ioam_hdr->length = size;
79   vxlan_gpe_ioam_hdr->protocol = t->protocol;
80   current = (u8 *) vxlan_gpe_ioam_hdr + sizeof (vxlan_gpe_ioam_hdr_t);
81
82   if (has_trace_option
83       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
84     {
85       if (0 != hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] (current,
86                                                                   &trace_data_size))
87         return -1;
88       current += trace_data_size;
89     }
90   if (has_pot_option
91       && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
92     {
93       pot_data_size =
94         hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
95       if (0 ==
96           hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT]
97           (current, &pot_data_size))
98         current += pot_data_size;
99     }
100
101   return 0;
102 }
103
104 int
105 vxlan_gpe_ioam_clear_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
106                               int has_pot_option, int has_ppc_option,
107                               u8 ipv6_set)
108 {
109
110   t->rewrite_size = 0;
111
112   if (!ipv6_set)
113     {
114       vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
115     }
116   else
117     {
118       vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
119     }
120
121
122   return 0;
123 }
124
125 clib_error_t *
126 vxlan_gpe_ioam_clear (vxlan_gpe_tunnel_t * t,
127                       int has_trace_option, int has_pot_option,
128                       int has_ppc_option, u8 ipv6_set)
129 {
130   int rv;
131   rv = vxlan_gpe_ioam_clear_rewrite (t, 0, 0, 0, 0);
132
133   if (rv == 0)
134     {
135       return (0);
136     }
137   else
138     {
139       return clib_error_return_code (0, rv, 0,
140                                      "vxlan_gpe_ioam_clear_rewrite returned %d",
141                                      rv);
142     }
143
144 }
145
146
147 clib_error_t *
148 vxlan_gpe_ioam_set (vxlan_gpe_tunnel_t * t,
149                     int has_trace_option, int has_pot_option,
150                     int has_ppc_option, u8 ipv6_set)
151 {
152   int rv;
153   rv = vxlan_gpe_ioam_set_rewrite (t, has_trace_option,
154                                    has_pot_option, has_ppc_option, ipv6_set);
155
156   if (rv == 0)
157     {
158       return (0);
159     }
160   else
161     {
162       return clib_error_return_code (0, rv, 0,
163                                      "vxlan_gpe_ioam_set_rewrite returned %d",
164                                      rv);
165     }
166
167 }
168
169 static void
170 vxlan_gpe_set_clear_output_feature_on_intf (vlib_main_t * vm,
171                                             u32 sw_if_index0, u8 is_add)
172 {
173
174
175
176   vnet_feature_enable_disable ("ip4-output", "vxlan-gpe-transit-ioam",
177                                sw_if_index0, is_add,
178                                0 /* void *feature_config */ ,
179                                0 /* u32 n_feature_config_bytes */ );
180   return;
181 }
182
183 void
184 vxlan_gpe_clear_output_feature_on_all_intfs (vlib_main_t * vm)
185 {
186   vnet_sw_interface_t *si = 0;
187   vnet_main_t *vnm = vnet_get_main ();
188   vnet_interface_main_t *im = &vnm->interface_main;
189
190   pool_foreach (si, im->sw_interfaces, (
191                                          {
192                                          vxlan_gpe_set_clear_output_feature_on_intf
193                                          (vm, si->sw_if_index, 0);
194                                          }));
195   return;
196 }
197
198
199 extern fib_forward_chain_type_t
200 fib_entry_get_default_chain_type (const fib_entry_t * fib_entry);
201
202 int
203 vxlan_gpe_enable_disable_ioam_for_dest (vlib_main_t * vm,
204                                         ip46_address_t dst_addr,
205                                         u32 outer_fib_index,
206                                         u8 is_ipv4, u8 is_add)
207 {
208   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
209   u32 fib_index0 = 0;
210   u32 sw_if_index0 = ~0;
211
212   fib_node_index_t fei = ~0;
213   fib_entry_t *fib_entry;
214   u32 adj_index0;
215   ip_adjacency_t *adj0;
216   fib_prefix_t fib_prefix;
217   //fib_forward_chain_type_t fct;
218   load_balance_t *lb_m, *lb_b;
219   const dpo_id_t *dpo0, *dpo1;
220   u32 i, j;
221   //vnet_hw_interface_t *hw;
222
223   if (is_ipv4)
224     {
225       memset (&fib_prefix, 0, sizeof (fib_prefix_t));
226       fib_prefix.fp_len = 32;
227       fib_prefix.fp_proto = FIB_PROTOCOL_IP4;
228       fib_prefix.fp_addr = dst_addr;
229     }
230   else
231     {
232       return 0;
233     }
234
235   fei = fib_table_lookup (fib_index0, &fib_prefix);
236   fib_entry = fib_entry_get (fei);
237
238   //fct = fib_entry_get_default_chain_type (fib_entry);
239
240   if (!dpo_id_is_valid (&fib_entry->fe_lb /*[fct] */ ))
241     {
242       return (-1);
243     }
244
245   lb_m = load_balance_get (fib_entry->fe_lb /*[fct] */ .dpoi_index);
246
247   for (i = 0; i < lb_m->lb_n_buckets; i++)
248     {
249       dpo0 = load_balance_get_bucket_i (lb_m, i);
250
251       if (dpo0->dpoi_type == DPO_LOAD_BALANCE)
252         {
253           lb_b = load_balance_get (dpo0->dpoi_index);
254
255           for (j = 0; j < lb_b->lb_n_buckets; j++)
256             {
257               dpo1 = load_balance_get_bucket_i (lb_b, j);
258
259               if (dpo1->dpoi_type == DPO_ADJACENCY)
260                 {
261                   adj_index0 = dpo1->dpoi_index;
262
263                   if (ADJ_INDEX_INVALID == adj_index0)
264                     {
265                       continue;
266                     }
267
268                   adj0 = adj_get (adj_index0);
269                   sw_if_index0 = adj0->rewrite_header.sw_if_index;
270
271                   if (~0 == sw_if_index0)
272                     {
273                       continue;
274                     }
275
276
277                   if (is_add)
278                     {
279                       vnet_feature_enable_disable ("ip4-output",
280                                                    "vxlan-gpe-transit-ioam",
281                                                    sw_if_index0, is_add, 0
282                                                    /* void *feature_config */
283                                                    , 0  /* u32 n_feature_config_bytes */
284                         );
285
286                       vec_validate_init_empty (hm->bool_ref_by_sw_if_index,
287                                                sw_if_index0, ~0);
288                       hm->bool_ref_by_sw_if_index[sw_if_index0] = 1;
289                     }
290                   else
291                     {
292                       hm->bool_ref_by_sw_if_index[sw_if_index0] = ~0;
293                     }
294                 }
295             }
296         }
297     }
298
299   if (is_ipv4)
300     {
301
302       uword *t = NULL;
303       vxlan_gpe_ioam_dest_tunnels_t *t1;
304       fib_prefix_t key4, *key4_copy;
305       hash_pair_t *hp;
306       memset (&key4, 0, sizeof (key4));
307       key4.fp_proto = FIB_PROTOCOL_IP4;
308       key4.fp_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
309       t = hash_get_mem (hm->dst_by_ip4, &key4);
310       if (is_add)
311         {
312           if (t)
313             {
314               return 0;
315             }
316           pool_get_aligned (hm->dst_tunnels, t1, CLIB_CACHE_LINE_BYTES);
317           memset (t1, 0, sizeof (*t1));
318           t1->fp_proto = FIB_PROTOCOL_IP4;
319           t1->dst_addr.ip4.as_u32 = fib_prefix.fp_addr.ip4.as_u32;
320           key4_copy = clib_mem_alloc (sizeof (*key4_copy));
321           clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
322           hash_set_mem (hm->dst_by_ip4, key4_copy, t1 - hm->dst_tunnels);
323           /*
324            * Attach to the FIB entry for the VxLAN-GPE destination
325            * and become its child. The dest route will invoke a callback
326            * when the fib entry changes, it can be used to
327            * re-program the output feature on the egress interface.
328            */
329
330           const fib_prefix_t tun_dst_pfx = {
331             .fp_len = 32,
332             .fp_proto = FIB_PROTOCOL_IP4,
333             .fp_addr = {.ip4 = t1->dst_addr.ip4,}
334           };
335
336           t1->fib_entry_index =
337             fib_table_entry_special_add (outer_fib_index,
338                                          &tun_dst_pfx,
339                                          FIB_SOURCE_RR,
340                                          FIB_ENTRY_FLAG_NONE,
341                                          ADJ_INDEX_INVALID);
342           t1->sibling_index =
343             fib_entry_child_add (t1->fib_entry_index,
344                                  hm->fib_entry_type, t1 - hm->dst_tunnels);
345           t1->outer_fib_index = outer_fib_index;
346
347         }
348       else
349         {
350           if (!t)
351             {
352               return 0;
353             }
354           t1 = pool_elt_at_index (hm->dst_tunnels, t[0]);
355           hp = hash_get_pair (hm->dst_by_ip4, &key4);
356           key4_copy = (void *) (hp->key);
357           hash_unset_mem (hm->dst_by_ip4, &key4);
358           clib_mem_free (key4_copy);
359           pool_put (hm->dst_tunnels, t1);
360         }
361     }
362   else
363     {
364       // TBD for IPv6
365     }
366
367   return 0;
368 }
369
370 void
371 vxlan_gpe_refresh_output_feature_on_all_dest (void)
372 {
373   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
374   vxlan_gpe_ioam_dest_tunnels_t *t;
375   u32 i;
376   if (pool_elts (hm->dst_tunnels) == 0)
377     return;
378   vxlan_gpe_clear_output_feature_on_all_intfs (hm->vlib_main);
379   i = vec_len (hm->bool_ref_by_sw_if_index);
380   vec_free (hm->bool_ref_by_sw_if_index);
381   vec_validate_init_empty (hm->bool_ref_by_sw_if_index, i, ~0);
382   pool_foreach (t, hm->dst_tunnels, (
383                                       {
384                                       vxlan_gpe_enable_disable_ioam_for_dest
385                                       (hm->vlib_main,
386                                        t->dst_addr,
387                                        t->outer_fib_index,
388                                        (t->fp_proto == FIB_PROTOCOL_IP4), 1
389                                        /* is_add */
390                                       );
391                                       }
392                 ));
393   return;
394 }
395
396 void
397 vxlan_gpe_clear_output_feature_on_select_intfs (void)
398 {
399   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
400   u32 sw_if_index0 = 0;
401   for (sw_if_index0 = 0;
402        sw_if_index0 < vec_len (hm->bool_ref_by_sw_if_index); sw_if_index0++)
403     {
404       if (hm->bool_ref_by_sw_if_index[sw_if_index0] == 0xFF)
405         {
406           vxlan_gpe_set_clear_output_feature_on_intf
407             (hm->vlib_main, sw_if_index0, 0);
408         }
409     }
410
411   return;
412 }
413
414 static clib_error_t *
415 vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t *
416                                        vm,
417                                        unformat_input_t
418                                        * input, vlib_cli_command_t * cmd)
419 {
420   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
421   ip46_address_t local, remote;
422   u8 local_set = 0;
423   u8 remote_set = 0;
424   u8 ipv4_set = 0;
425   u8 ipv6_set = 0;
426   u32 vni;
427   u8 vni_set = 0;
428   u8 disable = 0;
429   clib_error_t *rv = 0;
430   vxlan4_gpe_tunnel_key_t key4;
431   vxlan6_gpe_tunnel_key_t key6;
432   uword *p;
433   vxlan_gpe_main_t *gm = &vxlan_gpe_main;
434   vxlan_gpe_tunnel_t *t = 0;
435   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
436     {
437       if (unformat (input, "local %U", unformat_ip4_address, &local.ip4))
438         {
439           local_set = 1;
440           ipv4_set = 1;
441         }
442       else
443         if (unformat (input, "remote %U", unformat_ip4_address, &remote.ip4))
444         {
445           remote_set = 1;
446           ipv4_set = 1;
447         }
448       else if (unformat (input, "local %U", unformat_ip6_address, &local.ip6))
449         {
450           local_set = 1;
451           ipv6_set = 1;
452         }
453       else
454         if (unformat (input, "remote %U", unformat_ip6_address, &remote.ip6))
455         {
456           remote_set = 1;
457           ipv6_set = 1;
458         }
459       else if (unformat (input, "vni %d", &vni))
460         vni_set = 1;
461       else if (unformat (input, "disable"))
462         disable = 1;
463       else
464         break;
465     }
466
467   if (local_set == 0)
468     return clib_error_return (0, "tunnel local address not specified");
469   if (remote_set == 0)
470     return clib_error_return (0, "tunnel remote address not specified");
471   if (ipv4_set && ipv6_set)
472     return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
473   if ((ipv4_set
474        && memcmp (&local.ip4, &remote.ip4,
475                   sizeof (local.ip4)) == 0) || (ipv6_set
476                                                 &&
477                                                 memcmp
478                                                 (&local.ip6,
479                                                  &remote.ip6,
480                                                  sizeof (local.ip6)) == 0))
481     return clib_error_return (0, "src and dst addresses are identical");
482   if (vni_set == 0)
483     return clib_error_return (0, "vni not specified");
484   if (!ipv6_set)
485     {
486       key4.local = local.ip4.as_u32;
487       key4.remote = remote.ip4.as_u32;
488       key4.vni = clib_host_to_net_u32 (vni << 8);
489       key4.pad = 0;
490       p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
491     }
492   else
493     {
494       key6.local.as_u64[0] = local.ip6.as_u64[0];
495       key6.local.as_u64[1] = local.ip6.as_u64[1];
496       key6.remote.as_u64[0] = remote.ip6.as_u64[0];
497       key6.remote.as_u64[1] = remote.ip6.as_u64[1];
498       key6.vni = clib_host_to_net_u32 (vni << 8);
499       p = hash_get_mem (gm->vxlan6_gpe_tunnel_by_key, &key6);
500     }
501
502   if (!p)
503     return clib_error_return (0, "VxLAN Tunnel not found");
504   t = pool_elt_at_index (gm->tunnels, p[0]);
505   if (!disable)
506     {
507       rv =
508         vxlan_gpe_ioam_set (t, hm->has_trace_option,
509                             hm->has_pot_option, hm->has_ppc_option, ipv6_set);
510     }
511   else
512     {
513       rv = vxlan_gpe_ioam_clear (t, 0, 0, 0, 0);
514     }
515   return rv;
516 }
517
518
519 /* *INDENT-OFF* */
520 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_rewrite_cmd, static) = {
521   .path = "set vxlan-gpe-ioam",
522   .short_help = "set vxlan-gpe-ioam vxlan <src-ip> <dst_ip> <vnid> [disable]",
523   .function = vxlan_gpe_set_ioam_rewrite_command_fn,
524 };
525 /* *INDENT-ON* */
526
527
528
529 clib_error_t *
530 vxlan_gpe_ioam_enable (int has_trace_option,
531                        int has_pot_option, int has_ppc_option)
532 {
533   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
534   hm->has_trace_option = has_trace_option;
535   hm->has_pot_option = has_pot_option;
536   hm->has_ppc_option = has_ppc_option;
537   if (hm->has_trace_option)
538     {
539       vxlan_gpe_trace_profile_setup ();
540     }
541
542   return 0;
543 }
544
545 clib_error_t *
546 vxlan_gpe_ioam_disable (int
547                         has_trace_option,
548                         int has_pot_option, int has_ppc_option)
549 {
550   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
551   hm->has_trace_option = has_trace_option;
552   hm->has_pot_option = has_pot_option;
553   hm->has_ppc_option = has_ppc_option;
554   if (!hm->has_trace_option)
555     {
556       vxlan_gpe_trace_profile_cleanup ();
557     }
558
559   return 0;
560 }
561
562 void
563 vxlan_gpe_set_next_override (uword next)
564 {
565   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
566   hm->decap_v4_next_override = next;
567   return;
568 }
569
570 static clib_error_t *
571 vxlan_gpe_set_ioam_flags_command_fn (vlib_main_t * vm,
572                                      unformat_input_t
573                                      * input, vlib_cli_command_t * cmd)
574 {
575   int has_trace_option = 0;
576   int has_pot_option = 0;
577   int has_ppc_option = 0;
578   clib_error_t *rv = 0;
579   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
580     {
581       if (unformat (input, "trace"))
582         has_trace_option = 1;
583       else if (unformat (input, "pot"))
584         has_pot_option = 1;
585       else if (unformat (input, "ppc encap"))
586         has_ppc_option = PPC_ENCAP;
587       else if (unformat (input, "ppc decap"))
588         has_ppc_option = PPC_DECAP;
589       else if (unformat (input, "ppc none"))
590         has_ppc_option = PPC_NONE;
591       else
592         break;
593     }
594
595
596   rv =
597     vxlan_gpe_ioam_enable (has_trace_option, has_pot_option, has_ppc_option);
598   return rv;
599 }
600
601 /* *INDENT-OFF* */
602 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_flags_cmd, static) =
603 {
604 .path = "set vxlan-gpe-ioam rewrite",
605 .short_help = "set vxlan-gpe-ioam [trace] [pot] [ppc <encap|decap>]",
606 .function = vxlan_gpe_set_ioam_flags_command_fn,};
607 /* *INDENT-ON* */
608
609
610 int vxlan_gpe_ioam_disable_for_dest
611   (vlib_main_t * vm, ip46_address_t dst_addr, u32 outer_fib_index,
612    u8 ipv4_set)
613 {
614   vxlan_gpe_ioam_dest_tunnels_t *t;
615   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
616
617   vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
618                                           dst_addr, outer_fib_index, ipv4_set,
619                                           0);
620   if (pool_elts (hm->dst_tunnels) == 0)
621     {
622       vxlan_gpe_clear_output_feature_on_select_intfs ();
623       return 0;
624     }
625
626   pool_foreach (t, hm->dst_tunnels, (
627                                       {
628                                       vxlan_gpe_enable_disable_ioam_for_dest
629                                       (hm->vlib_main,
630                                        t->dst_addr,
631                                        t->outer_fib_index,
632                                        (t->fp_proto ==
633                                         FIB_PROTOCOL_IP4), 1 /* is_add */ );
634                                       }
635                 ));
636   vxlan_gpe_clear_output_feature_on_select_intfs ();
637   return (0);
638
639 }
640
641 static clib_error_t *vxlan_gpe_set_ioam_transit_rewrite_command_fn
642   (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
643 {
644   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
645   ip46_address_t dst_addr;
646   u8 dst_addr_set = 0;
647   u8 ipv4_set = 0;
648   u8 ipv6_set = 0;
649   u8 disable = 0;
650   clib_error_t *rv = 0;
651   u32 outer_fib_index = 0;
652   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
653     {
654       if (unformat (input, "dst-ip %U", unformat_ip4_address, &dst_addr.ip4))
655         {
656           dst_addr_set = 1;
657           ipv4_set = 1;
658         }
659       else
660         if (unformat
661             (input, "dst-ip %U", unformat_ip6_address, &dst_addr.ip6))
662         {
663           dst_addr_set = 1;
664           ipv6_set = 1;
665         }
666       else if (unformat (input, "outer-fib-index %d", &outer_fib_index))
667         {
668         }
669
670       else if (unformat (input, "disable"))
671         disable = 1;
672       else
673         break;
674     }
675
676   if (dst_addr_set == 0)
677     return clib_error_return (0, "tunnel destination address not specified");
678   if (ipv4_set && ipv6_set)
679     return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
680   if (!disable)
681     {
682       vxlan_gpe_enable_disable_ioam_for_dest (hm->vlib_main,
683                                               dst_addr, outer_fib_index,
684                                               ipv4_set, 1);
685     }
686   else
687     {
688       vxlan_gpe_ioam_disable_for_dest
689         (vm, dst_addr, outer_fib_index, ipv4_set);
690     }
691   return rv;
692 }
693
694        /* *INDENT-OFF* */
695 VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_transit_rewrite_cmd, static) = {
696   .path = "set vxlan-gpe-ioam-transit",
697   .short_help = "set vxlan-gpe-ioam-transit dst-ip <dst_ip> [outer-fib-index <outer_fib_index>] [disable]",
698   .function = vxlan_gpe_set_ioam_transit_rewrite_command_fn,
699 };
700 /* *INDENT-ON* */
701
702 clib_error_t *clear_vxlan_gpe_ioam_rewrite_command_fn
703   (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
704 {
705   return (vxlan_gpe_ioam_disable (0, 0, 0));
706 }
707
708 /* *INDENT-OFF* */
709 VLIB_CLI_COMMAND (vxlan_gpe_clear_ioam_flags_cmd, static) =
710 {
711 .path = "clear vxlan-gpe-ioam rewrite",
712 .short_help = "clear vxlan-gpe-ioam rewrite",
713 .function = clear_vxlan_gpe_ioam_rewrite_command_fn,
714 };
715 /* *INDENT-ON* */
716
717
718 /**
719  * Function definition to backwalk a FIB node
720  */
721 static fib_node_back_walk_rc_t
722 vxlan_gpe_ioam_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
723 {
724   vxlan_gpe_refresh_output_feature_on_all_dest ();
725   return (FIB_NODE_BACK_WALK_CONTINUE);
726 }
727
728 /**
729  * Function definition to get a FIB node from its index
730  */
731 static fib_node_t *
732 vxlan_gpe_ioam_fib_node_get (fib_node_index_t index)
733 {
734   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
735   return (&hm->node);
736 }
737
738 /**
739  * Function definition to inform the FIB node that its last lock has gone.
740  */
741 static void
742 vxlan_gpe_ioam_last_lock_gone (fib_node_t * node)
743 {
744   ASSERT (0);
745 }
746
747
748 /*
749  * Virtual function table registered by MPLS GRE tunnels
750  * for participation in the FIB object graph.
751  */
752 const static fib_node_vft_t vxlan_gpe_ioam_vft = {
753   .fnv_get = vxlan_gpe_ioam_fib_node_get,
754   .fnv_last_lock = vxlan_gpe_ioam_last_lock_gone,
755   .fnv_back_walk = vxlan_gpe_ioam_back_walk,
756 };
757
758 void
759 vxlan_gpe_ioam_interface_init (void)
760 {
761   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
762   hm->fib_entry_type = fib_node_register_new_type (&vxlan_gpe_ioam_vft);
763   return;
764 }
765
766 /*
767  * fd.io coding-style-patch-verification: ON
768  *
769  * Local Variables:
770  * eval: (c-set-style "gnu")
771  * End:
772  */