BFD-FIB interactions
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24
25 #include <vnet/adj/adj.h>
26 #include <vnet/adj/adj_mcast.h>
27
28 #include <vnet/fib/fib_path.h>
29 #include <vnet/fib/fib_node.h>
30 #include <vnet/fib/fib_table.h>
31 #include <vnet/fib/fib_entry.h>
32 #include <vnet/fib/fib_path_list.h>
33 #include <vnet/fib/fib_internal.h>
34 #include <vnet/fib/fib_urpf_list.h>
35 #include <vnet/fib/mpls_fib.h>
36
37 /**
38  * Enurmeration of path types
39  */
40 typedef enum fib_path_type_t_ {
41     /**
42      * Marker. Add new types after this one.
43      */
44     FIB_PATH_TYPE_FIRST = 0,
45     /**
46      * Attached-nexthop. An interface and a nexthop are known.
47      */
48     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
49     /**
50      * attached. Only the interface is known.
51      */
52     FIB_PATH_TYPE_ATTACHED,
53     /**
54      * recursive. Only the next-hop is known.
55      */
56     FIB_PATH_TYPE_RECURSIVE,
57     /**
58      * special. nothing is known. so we drop.
59      */
60     FIB_PATH_TYPE_SPECIAL,
61     /**
62      * exclusive. user provided adj.
63      */
64     FIB_PATH_TYPE_EXCLUSIVE,
65     /**
66      * deag. Link to a lookup adj in the next table
67      */
68     FIB_PATH_TYPE_DEAG,
69     /**
70      * receive. it's for-us.
71      */
72     FIB_PATH_TYPE_RECEIVE,
73     /**
74      * Marker. Add new types before this one, then update it.
75      */
76     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_RECEIVE,
77 } __attribute__ ((packed)) fib_path_type_t;
78
79 /**
80  * The maximum number of path_types
81  */
82 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
83
84 #define FIB_PATH_TYPES {                                        \
85     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
86     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
87     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
88     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
89     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
90     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
91     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
92 }
93
94 #define FOR_EACH_FIB_PATH_TYPE(_item) \
95     for (_item = FIB_PATH_TYPE_FIRST; _item <= FIB_PATH_TYPE_LAST; _item++)
96
97 /**
98  * Enurmeration of path operational (i.e. derived) attributes
99  */
100 typedef enum fib_path_oper_attribute_t_ {
101     /**
102      * Marker. Add new types after this one.
103      */
104     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
105     /**
106      * The path forms part of a recursive loop.
107      */
108     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
109     /**
110      * The path is resolved
111      */
112     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
113     /**
114      * The path is attached, despite what the next-hop may say.
115      */
116     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
117     /**
118      * The path has become a permanent drop.
119      */
120     FIB_PATH_OPER_ATTRIBUTE_DROP,
121     /**
122      * Marker. Add new types before this one, then update it.
123      */
124     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
125 } __attribute__ ((packed)) fib_path_oper_attribute_t;
126
127 /**
128  * The maximum number of path operational attributes
129  */
130 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
131
132 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
133     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
134     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
135     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
136 }
137
138 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
139     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
140          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
141          _item++)
142
143 /**
144  * Path flags from the attributes
145  */
146 typedef enum fib_path_oper_flags_t_ {
147     FIB_PATH_OPER_FLAG_NONE = 0,
148     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
149     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
150     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
151     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
152 } __attribute__ ((packed)) fib_path_oper_flags_t;
153
154 /**
155  * A FIB path
156  */
157 typedef struct fib_path_t_ {
158     /**
159      * A path is a node in the FIB graph.
160      */
161     fib_node_t fp_node;
162
163     /**
164      * The index of the path-list to which this path belongs
165      */
166     u32 fp_pl_index;
167
168     /**
169      * This marks the start of the memory area used to hash
170      * the path
171      */
172     STRUCT_MARK(path_hash_start);
173
174     /**
175      * Configuration Flags
176      */
177     fib_path_cfg_flags_t fp_cfg_flags;
178
179     /**
180      * The type of the path. This is the selector for the union
181      */
182     fib_path_type_t fp_type;
183
184     /**
185      * The protocol of the next-hop, i.e. the address family of the
186      * next-hop's address. We can't derive this from the address itself
187      * since the address can be all zeros
188      */
189     fib_protocol_t fp_nh_proto;
190
191     /**
192      * UCMP [unnormalised] weigt
193      */
194     u32 fp_weight;
195
196     /**
197      * per-type union of the data required to resolve the path
198      */
199     union {
200         struct {
201             /**
202              * The next-hop
203              */
204             ip46_address_t fp_nh;
205             /**
206              * The interface
207              */
208             u32 fp_interface;
209         } attached_next_hop;
210         struct {
211             /**
212              * The interface
213              */
214             u32 fp_interface;
215         } attached;
216         struct {
217             union
218             {
219                 /**
220                  * The next-hop
221                  */
222                 ip46_address_t fp_ip;
223                 /**
224                  * The local label to resolve through.
225                  */
226                 mpls_label_t fp_local_label;
227             } fp_nh;
228             /**
229              * The FIB table index in which to find the next-hop.
230              * This needs to be fixed. We should lookup the adjacencies in
231              * a separate table of adjacencies, rather than from the FIB.
232              * Two reasons I can think of:
233              *   - consider:
234              *       int ip addr Gig0 10.0.0.1/24
235              *       ip route 10.0.0.2/32 via Gig1 192.168.1.2
236              *       ip route 1.1.1.1/32 via Gig0 10.0.0.2
237              *     this is perfectly valid.
238              *     Packets addressed to 10.0.0.2 should be sent via Gig1.
239              *     Packets address to 1.1.1.1 should be sent via Gig0.
240              *    when we perform the adj resolution from the FIB for the path
241              *    "via Gig0 10.0.0.2" the lookup will result in the route via Gig1
242              *    and so we will pick up the adj via Gig1 - which was not what the
243              *    operator wanted.
244              *  - we can only return link-type IPv4 and so not the link-type MPLS.
245              *    more on this in a later commit.
246              *
247              * The table ID should only belong to a recursive path and indicate
248              * which FIB should be used to resolve the next-hop.
249              */
250             fib_node_index_t fp_tbl_id;
251         } recursive;
252         struct {
253             /**
254              * The FIB index in which to perfom the next lookup
255              */
256             fib_node_index_t fp_tbl_id;
257         } deag;
258         struct {
259         } special;
260         struct {
261             /**
262              * The user provided 'exclusive' DPO
263              */
264             dpo_id_t fp_ex_dpo;
265         } exclusive;
266         struct {
267             /**
268              * The interface on which the local address is configured
269              */
270             u32 fp_interface;
271             /**
272              * The next-hop
273              */
274             ip46_address_t fp_addr;
275         } receive;
276     };
277     STRUCT_MARK(path_hash_end);
278
279     /**
280      * Memebers in this last section represent information that is
281      * dervied during resolution. It should not be copied to new paths
282      * nor compared.
283      */
284
285     /**
286      * Operational Flags
287      */
288     fib_path_oper_flags_t fp_oper_flags;
289
290     /**
291      * the resolving via fib. not part of the union, since it it not part
292      * of the path's hash.
293      */
294     fib_node_index_t fp_via_fib;
295
296     /**
297      * The Data-path objects through which this path resolves for IP.
298      */
299     dpo_id_t fp_dpo;
300
301     /**
302      * the index of this path in the parent's child list.
303      */
304     u32 fp_sibling;
305 } fib_path_t;
306
307 /*
308  * Array of strings/names for the path types and attributes
309  */
310 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
311 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
312 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
313
314 /*
315  * The memory pool from which we allocate all the paths
316  */
317 static fib_path_t *fib_path_pool;
318
319 /*
320  * Debug macro
321  */
322 #ifdef FIB_DEBUG
323 #define FIB_PATH_DBG(_p, _fmt, _args...)                        \
324 {                                                               \
325     u8 *_tmp = NULL;                                            \
326     _tmp = fib_path_format(fib_path_get_index(_p), _tmp);       \
327     clib_warning("path:[%d:%s]:" _fmt,                          \
328                  fib_path_get_index(_p), _tmp,                  \
329                  ##_args);                                      \
330     vec_free(_tmp);                                             \
331 }
332 #else
333 #define FIB_PATH_DBG(_p, _fmt, _args...)
334 #endif
335
336 static fib_path_t *
337 fib_path_get (fib_node_index_t index)
338 {
339     return (pool_elt_at_index(fib_path_pool, index));
340 }
341
342 static fib_node_index_t 
343 fib_path_get_index (fib_path_t *path)
344 {
345     return (path - fib_path_pool);
346 }
347
348 static fib_node_t *
349 fib_path_get_node (fib_node_index_t index)
350 {
351     return ((fib_node_t*)fib_path_get(index));
352 }
353
354 static fib_path_t*
355 fib_path_from_fib_node (fib_node_t *node)
356 {
357 #if CLIB_DEBUG > 0
358     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
359 #endif
360     return ((fib_path_t*)node);
361 }
362
363 u8 *
364 format_fib_path (u8 * s, va_list * args)
365 {
366     fib_path_t *path = va_arg (*args, fib_path_t *);
367     vnet_main_t * vnm = vnet_get_main();
368     fib_path_oper_attribute_t oattr;
369     fib_path_cfg_attribute_t cattr;
370
371     s = format (s, "      index:%d ", fib_path_get_index(path));
372     s = format (s, "pl-index:%d ", path->fp_pl_index);
373     s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto);
374     s = format (s, "weight=%d ", path->fp_weight);
375     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
376     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
377         s = format(s, " oper-flags:");
378         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
379             if ((1<<oattr) & path->fp_oper_flags) {
380                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
381             }
382         }
383     }
384     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
385         s = format(s, " cfg-flags:");
386         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
387             if ((1<<cattr) & path->fp_cfg_flags) {
388                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
389             }
390         }
391     }
392     s = format(s, "\n       ");
393
394     switch (path->fp_type)
395     {
396     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
397         s = format (s, "%U", format_ip46_address,
398                     &path->attached_next_hop.fp_nh,
399                     IP46_TYPE_ANY);
400         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
401         {
402             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
403         }
404         else
405         {
406             s = format (s, " %U",
407                         format_vnet_sw_interface_name,
408                         vnm,
409                         vnet_get_sw_interface(
410                             vnm,
411                             path->attached_next_hop.fp_interface));
412             if (vnet_sw_interface_is_p2p(vnet_get_main(),
413                                          path->attached_next_hop.fp_interface))
414             {
415                 s = format (s, " (p2p)");
416             }
417         }
418         if (!dpo_id_is_valid(&path->fp_dpo))
419         {
420             s = format(s, "\n          unresolved");
421         }
422         else
423         {
424             s = format(s, "\n          %U",
425                        format_dpo_id,
426                        &path->fp_dpo, 13);
427         }
428         break;
429     case FIB_PATH_TYPE_ATTACHED:
430         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
431         {
432             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
433         }
434         else
435         {
436             s = format (s, " %U",
437                         format_vnet_sw_interface_name,
438                         vnm,
439                         vnet_get_sw_interface(
440                             vnm,
441                             path->attached.fp_interface));
442         }
443         break;
444     case FIB_PATH_TYPE_RECURSIVE:
445         if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
446         {
447             s = format (s, "via %U",
448                         format_mpls_unicast_label,
449                         path->recursive.fp_nh.fp_local_label);
450         }
451         else
452         {
453             s = format (s, "via %U",
454                         format_ip46_address,
455                         &path->recursive.fp_nh.fp_ip,
456                         IP46_TYPE_ANY);
457         }
458         s = format (s, " in fib:%d",
459                     path->recursive.fp_tbl_id,
460                     path->fp_via_fib); 
461         s = format (s, " via-fib:%d", path->fp_via_fib); 
462         s = format (s, " via-dpo:[%U:%d]",
463                     format_dpo_type, path->fp_dpo.dpoi_type, 
464                     path->fp_dpo.dpoi_index);
465
466         break;
467     case FIB_PATH_TYPE_RECEIVE:
468     case FIB_PATH_TYPE_SPECIAL:
469     case FIB_PATH_TYPE_DEAG:
470     case FIB_PATH_TYPE_EXCLUSIVE:
471         if (dpo_id_is_valid(&path->fp_dpo))
472         {
473             s = format(s, "%U", format_dpo_id,
474                        &path->fp_dpo, 2);
475         }
476         break;
477     }
478     return (s);
479 }
480
481 u8 *
482 fib_path_format (fib_node_index_t pi, u8 *s)
483 {
484     fib_path_t *path;
485
486     path = fib_path_get(pi);
487     ASSERT(NULL != path);
488
489     return (format (s, "%U", format_fib_path, path));
490 }
491
492 u8 *
493 fib_path_adj_format (fib_node_index_t pi,
494                      u32 indent,
495                      u8 *s)
496 {
497     fib_path_t *path;
498
499     path = fib_path_get(pi);
500     ASSERT(NULL != path);
501
502     if (!dpo_id_is_valid(&path->fp_dpo))
503     {
504         s = format(s, " unresolved");
505     }
506     else
507     {
508         s = format(s, "%U", format_dpo_id,
509                    &path->fp_dpo, 2);
510     }
511
512     return (s);
513 }
514
515 /*
516  * fib_path_last_lock_gone
517  *
518  * We don't share paths, we share path lists, so the [un]lock functions
519  * are no-ops
520  */
521 static void
522 fib_path_last_lock_gone (fib_node_t *node)
523 {
524     ASSERT(0);
525 }
526
527 static const adj_index_t
528 fib_path_attached_next_hop_get_adj (fib_path_t *path,
529                                     vnet_link_t link)
530 {
531     if (vnet_sw_interface_is_p2p(vnet_get_main(),
532                                  path->attached_next_hop.fp_interface))
533     {
534         /*
535          * if the interface is p2p then the adj for the specific
536          * neighbour on that link will never exist. on p2p links
537          * the subnet address (the attached route) links to the
538          * auto-adj (see below), we want that adj here too.
539          */
540         return (adj_nbr_add_or_lock(path->fp_nh_proto,
541                                     link,
542                                     &zero_addr,
543                                     path->attached_next_hop.fp_interface));
544     }
545     else
546     {
547         return (adj_nbr_add_or_lock(path->fp_nh_proto,
548                                     link,
549                                     &path->attached_next_hop.fp_nh,
550                                     path->attached_next_hop.fp_interface));
551     }
552 }
553
554 static void
555 fib_path_attached_next_hop_set (fib_path_t *path)
556 {
557     /*
558      * resolve directly via the adjacnecy discribed by the
559      * interface and next-hop
560      */
561     dpo_set(&path->fp_dpo,
562             DPO_ADJACENCY,
563             fib_proto_to_dpo(path->fp_nh_proto),
564             fib_path_attached_next_hop_get_adj(
565                  path,
566                  fib_proto_to_link(path->fp_nh_proto)));
567
568     /*
569      * become a child of the adjacency so we receive updates
570      * when its rewrite changes
571      */
572     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
573                                      FIB_NODE_TYPE_PATH,
574                                      fib_path_get_index(path));
575
576     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
577                                       path->attached_next_hop.fp_interface) ||
578         !adj_is_up(path->fp_dpo.dpoi_index))
579     {
580         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
581     }
582 }
583
584 /*
585  * create of update the paths recursive adj
586  */
587 static void
588 fib_path_recursive_adj_update (fib_path_t *path,
589                                fib_forward_chain_type_t fct,
590                                dpo_id_t *dpo)
591 {
592     dpo_id_t via_dpo = DPO_INVALID;
593
594     /*
595      * get the DPO to resolve through from the via-entry
596      */
597     fib_entry_contribute_forwarding(path->fp_via_fib,
598                                     fct,
599                                     &via_dpo);
600
601
602     /*
603      * hope for the best - clear if restrictions apply.
604      */
605     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
606
607     /*
608      * Validate any recursion constraints and over-ride the via
609      * adj if not met
610      */
611     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
612     {
613         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
614         dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
615     }
616     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
617     {
618         /*
619          * the via FIB must be a host route.
620          * note the via FIB just added will always be a host route
621          * since it is an RR source added host route. So what we need to
622          * check is whether the route has other sources. If it does then
623          * some other source has added it as a host route. If it doesn't
624          * then it was added only here and inherits forwarding from a cover.
625          * the cover is not a host route.
626          * The RR source is the lowest priority source, so we check if it
627          * is the best. if it is there are no other sources.
628          */
629         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
630         {
631             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
632             dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
633
634             /*
635              * PIC edge trigger. let the load-balance maps know
636              */
637             load_balance_map_path_state_change(fib_path_get_index(path));
638         }
639     }
640     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
641     {
642         /*
643          * RR source entries inherit the flags from the cover, so
644          * we can check the via directly
645          */
646         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
647         {
648             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
649             dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
650
651             /*
652              * PIC edge trigger. let the load-balance maps know
653              */
654             load_balance_map_path_state_change(fib_path_get_index(path));
655         }
656     }
657     /*
658      * check for over-riding factors on the FIB entry itself
659      */
660     if (!fib_entry_is_resolved(path->fp_via_fib))
661     {
662         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
663         dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
664
665         /*
666          * PIC edge trigger. let the load-balance maps know
667          */
668         load_balance_map_path_state_change(fib_path_get_index(path));
669     }
670
671     /*
672      * update the path's contributed DPO
673      */
674     dpo_copy(dpo, &via_dpo);
675
676     FIB_PATH_DBG(path, "recursive update: %U",
677                  fib_get_lookup_main(path->fp_nh_proto),
678                  &path->fp_dpo, 2);
679
680     dpo_reset(&via_dpo);
681 }
682
683 /*
684  * fib_path_is_permanent_drop
685  *
686  * Return !0 if the path is configured to permanently drop,
687  * despite other attributes.
688  */
689 static int
690 fib_path_is_permanent_drop (fib_path_t *path)
691 {
692     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
693             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
694 }
695
696 /*
697  * fib_path_unresolve
698  *
699  * Remove our dependency on the resolution target
700  */
701 static void
702 fib_path_unresolve (fib_path_t *path)
703 {
704     /*
705      * the forced drop path does not need unresolving
706      */
707     if (fib_path_is_permanent_drop(path))
708     {
709         return;
710     }
711
712     switch (path->fp_type)
713     {
714     case FIB_PATH_TYPE_RECURSIVE:
715         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
716         {
717             fib_prefix_t pfx;
718
719             fib_entry_get_prefix(path->fp_via_fib, &pfx);
720             fib_entry_child_remove(path->fp_via_fib,
721                                    path->fp_sibling);
722             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
723                                            &pfx,
724                                            FIB_SOURCE_RR);
725             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
726         }
727         break;
728     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
729     case FIB_PATH_TYPE_ATTACHED:
730         adj_child_remove(path->fp_dpo.dpoi_index,
731                          path->fp_sibling);
732         adj_unlock(path->fp_dpo.dpoi_index);
733         break;
734     case FIB_PATH_TYPE_EXCLUSIVE:
735         dpo_reset(&path->exclusive.fp_ex_dpo);
736         break;
737     case FIB_PATH_TYPE_SPECIAL:
738     case FIB_PATH_TYPE_RECEIVE:
739     case FIB_PATH_TYPE_DEAG:
740         /*
741          * these hold only the path's DPO, which is reset below.
742          */
743         break;
744     }
745
746     /*
747      * release the adj we were holding and pick up the
748      * drop just in case.
749      */
750     dpo_reset(&path->fp_dpo);
751     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
752
753     return;
754 }
755
756 static fib_forward_chain_type_t
757 fib_path_proto_to_chain_type (fib_protocol_t proto)
758 {
759     switch (proto)
760     {
761     case FIB_PROTOCOL_IP4:
762         return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
763     case FIB_PROTOCOL_IP6:
764         return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
765     case FIB_PROTOCOL_MPLS:
766         return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
767     }
768     return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
769 }
770
771 /*
772  * fib_path_back_walk_notify
773  *
774  * A back walk has reach this path.
775  */
776 static fib_node_back_walk_rc_t
777 fib_path_back_walk_notify (fib_node_t *node,
778                            fib_node_back_walk_ctx_t *ctx)
779 {
780     fib_path_t *path;
781
782     path = fib_path_from_fib_node(node);
783
784     switch (path->fp_type)
785     {
786     case FIB_PATH_TYPE_RECURSIVE:
787         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
788         {
789             /*
790              * modify the recursive adjacency to use the new forwarding
791              * of the via-fib.
792              * this update is visible to packets in flight in the DP.
793              */
794             fib_path_recursive_adj_update(
795                 path,
796                 fib_path_proto_to_chain_type(path->fp_nh_proto),
797                 &path->fp_dpo);
798         }
799         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
800             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
801         {
802             /*
803              * ADJ updates (complete<->incomplete) do not need to propagate to
804              * recursive entries.
805              * The only reason its needed as far back as here, is that the adj
806              * and the incomplete adj are a different DPO type, so the LBs need
807              * to re-stack.
808              * If this walk was quashed in the fib_entry, then any non-fib_path
809              * children (like tunnels that collapse out the LB when they stack)
810              * would not see the update.
811              */
812             return (FIB_NODE_BACK_WALK_CONTINUE);
813         }
814         break;
815     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
816         /*
817 FIXME comment
818          * ADJ_UPDATE backwalk pass silently through here and up to
819          * the path-list when the multipath adj collapse occurs.
820          * The reason we do this is that the assumtption is that VPP
821          * runs in an environment where the Control-Plane is remote
822          * and hence reacts slowly to link up down. In order to remove
823          * this down link from the ECMP set quickly, we back-walk.
824          * VPP also has dedicated CPUs, so we are not stealing resources
825          * from the CP to do so.
826          */
827         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
828         {
829             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
830             {
831                 /*
832                  * alreday resolved. no need to walk back again
833                  */
834                 return (FIB_NODE_BACK_WALK_CONTINUE);
835             }
836             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
837         }
838         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
839         {
840             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
841             {
842                 /*
843                  * alreday unresolved. no need to walk back again
844                  */
845                 return (FIB_NODE_BACK_WALK_CONTINUE);
846             }
847             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
848         }
849         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
850         {
851             /*
852              * The interface this path resolves through has been deleted.
853              * This will leave the path in a permanent drop state. The route
854              * needs to be removed and readded (and hence the path-list deleted)
855              * before it can forward again.
856              */
857             fib_path_unresolve(path);
858             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
859         }
860         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
861         {
862             /*
863              * restack the DPO to pick up the correct DPO sub-type
864              */
865             uword if_is_up;
866             adj_index_t ai;
867
868             if_is_up = vnet_sw_interface_is_admin_up(
869                            vnet_get_main(),
870                            path->attached_next_hop.fp_interface);
871
872             ai = fib_path_attached_next_hop_get_adj(
873                      path,
874                      fib_proto_to_link(path->fp_nh_proto));
875
876             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
877             if (if_is_up && adj_is_up(ai))
878             {
879                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
880             }
881
882             dpo_set(&path->fp_dpo, DPO_ADJACENCY,
883                     fib_proto_to_dpo(path->fp_nh_proto),
884                     ai);
885             adj_unlock(ai);
886
887             if (!if_is_up)
888             {
889                 /*
890                  * If the interface is not up there is no reason to walk
891                  * back to children. if we did they would only evalute
892                  * that this path is unresolved and hence it would
893                  * not contribute the adjacency - so it would be wasted
894                  * CPU time.
895                  */
896                 return (FIB_NODE_BACK_WALK_CONTINUE);
897             }
898         }
899         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
900         {
901             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
902             {
903                 /*
904                  * alreday unresolved. no need to walk back again
905                  */
906                 return (FIB_NODE_BACK_WALK_CONTINUE);
907             }
908             /*
909              * the adj has gone down. the path is no longer resolved.
910              */
911             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
912         }
913         break;
914     case FIB_PATH_TYPE_ATTACHED:
915         /*
916          * FIXME; this could schedule a lower priority walk, since attached
917          * routes are not usually in ECMP configurations so the backwalk to
918          * the FIB entry does not need to be high priority
919          */
920         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
921         {
922             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
923         }
924         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
925         {
926             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
927         }
928         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
929         {
930             fib_path_unresolve(path);
931             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
932         }
933         break;
934     case FIB_PATH_TYPE_DEAG:
935         /*
936          * FIXME When VRF delete is allowed this will need a poke.
937          */
938     case FIB_PATH_TYPE_SPECIAL:
939     case FIB_PATH_TYPE_RECEIVE:
940     case FIB_PATH_TYPE_EXCLUSIVE:
941         /*
942          * these path types have no parents. so to be
943          * walked from one is unexpected.
944          */
945         ASSERT(0);
946         break;
947     }
948
949     /*
950      * propagate the backwalk further to the path-list
951      */
952     fib_path_list_back_walk(path->fp_pl_index, ctx);
953
954     return (FIB_NODE_BACK_WALK_CONTINUE);
955 }
956
957 static void
958 fib_path_memory_show (void)
959 {
960     fib_show_memory_usage("Path",
961                           pool_elts(fib_path_pool),
962                           pool_len(fib_path_pool),
963                           sizeof(fib_path_t));
964 }
965
966 /*
967  * The FIB path's graph node virtual function table
968  */
969 static const fib_node_vft_t fib_path_vft = {
970     .fnv_get = fib_path_get_node,
971     .fnv_last_lock = fib_path_last_lock_gone,
972     .fnv_back_walk = fib_path_back_walk_notify,
973     .fnv_mem_show = fib_path_memory_show,
974 };
975
976 static fib_path_cfg_flags_t
977 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
978 {
979     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
980
981     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
982         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
983     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
984         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
985     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
986         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
987     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
988         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
989
990     return (cfg_flags);
991 }
992
993 /*
994  * fib_path_create
995  *
996  * Create and initialise a new path object.
997  * return the index of the path.
998  */
999 fib_node_index_t
1000 fib_path_create (fib_node_index_t pl_index,
1001                  fib_protocol_t nh_proto,
1002                  fib_path_cfg_flags_t flags,
1003                  const fib_route_path_t *rpath)
1004 {
1005     fib_path_t *path;
1006
1007     pool_get(fib_path_pool, path);
1008     memset(path, 0, sizeof(*path));
1009
1010     fib_node_init(&path->fp_node,
1011                   FIB_NODE_TYPE_PATH);
1012
1013     dpo_reset(&path->fp_dpo);
1014     path->fp_pl_index = pl_index;
1015     path->fp_nh_proto = nh_proto;
1016     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1017     path->fp_weight = rpath->frp_weight;
1018     if (0 == path->fp_weight)
1019     {
1020         /*
1021          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1022          * clients to always use 1, or we can accept it and fixup approrpiately.
1023          */
1024         path->fp_weight = 1;
1025     }
1026     path->fp_cfg_flags = flags;
1027     path->fp_cfg_flags |= fib_path_route_flags_to_cfg_flags(rpath);
1028
1029     /*
1030      * deduce the path's tpye from the parementers and save what is needed.
1031      */
1032     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1033     {
1034         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1035         path->receive.fp_interface = rpath->frp_sw_if_index;
1036         path->receive.fp_addr = rpath->frp_addr;
1037     }
1038     else if (~0 != rpath->frp_sw_if_index)
1039     {
1040         if (ip46_address_is_zero(&rpath->frp_addr))
1041         {
1042             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1043             path->attached.fp_interface = rpath->frp_sw_if_index;
1044         }
1045         else
1046         {
1047             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1048             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1049             path->attached_next_hop.fp_nh = rpath->frp_addr;
1050         }
1051     }
1052     else
1053     {
1054         if (ip46_address_is_zero(&rpath->frp_addr))
1055         {
1056             if (~0 == rpath->frp_fib_index)
1057             {
1058                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1059             }
1060             else
1061             {
1062                 path->fp_type = FIB_PATH_TYPE_DEAG;
1063                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1064             }           
1065         }
1066         else
1067         {
1068             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1069             if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1070             {
1071                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1072             }
1073             else
1074             {
1075                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1076             }
1077             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1078         }
1079     }
1080
1081     FIB_PATH_DBG(path, "create");
1082
1083     return (fib_path_get_index(path));
1084 }
1085
1086 /*
1087  * fib_path_create_special
1088  *
1089  * Create and initialise a new path object.
1090  * return the index of the path.
1091  */
1092 fib_node_index_t
1093 fib_path_create_special (fib_node_index_t pl_index,
1094                          fib_protocol_t nh_proto,
1095                          fib_path_cfg_flags_t flags,
1096                          const dpo_id_t *dpo)
1097 {
1098     fib_path_t *path;
1099
1100     pool_get(fib_path_pool, path);
1101     memset(path, 0, sizeof(*path));
1102
1103     fib_node_init(&path->fp_node,
1104                   FIB_NODE_TYPE_PATH);
1105     dpo_reset(&path->fp_dpo);
1106
1107     path->fp_pl_index = pl_index;
1108     path->fp_weight = 1;
1109     path->fp_nh_proto = nh_proto;
1110     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1111     path->fp_cfg_flags = flags;
1112
1113     if (FIB_PATH_CFG_FLAG_DROP & flags)
1114     {
1115         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1116     }
1117     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1118     {
1119         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1120         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1121     }
1122     else
1123     {
1124         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1125         ASSERT(NULL != dpo);
1126         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1127     }
1128
1129     return (fib_path_get_index(path));
1130 }
1131
1132 /*
1133  * fib_path_copy
1134  *
1135  * Copy a path. return index of new path.
1136  */
1137 fib_node_index_t
1138 fib_path_copy (fib_node_index_t path_index,
1139                fib_node_index_t path_list_index)
1140 {
1141     fib_path_t *path, *orig_path;
1142
1143     pool_get(fib_path_pool, path);
1144
1145     orig_path = fib_path_get(path_index);
1146     ASSERT(NULL != orig_path);
1147
1148     memcpy(path, orig_path, sizeof(*path));
1149
1150     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1151
1152     /*
1153      * reset the dynamic section
1154      */
1155     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1156     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1157     path->fp_pl_index  = path_list_index;
1158     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1159     memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1160     dpo_reset(&path->fp_dpo);
1161
1162     return (fib_path_get_index(path));
1163 }
1164
1165 /*
1166  * fib_path_destroy
1167  *
1168  * destroy a path that is no longer required
1169  */
1170 void
1171 fib_path_destroy (fib_node_index_t path_index)
1172 {
1173     fib_path_t *path;
1174
1175     path = fib_path_get(path_index);
1176
1177     ASSERT(NULL != path);
1178     FIB_PATH_DBG(path, "destroy");
1179
1180     fib_path_unresolve(path);
1181
1182     fib_node_deinit(&path->fp_node);
1183     pool_put(fib_path_pool, path);
1184 }
1185
1186 /*
1187  * fib_path_destroy
1188  *
1189  * destroy a path that is no longer required
1190  */
1191 uword
1192 fib_path_hash (fib_node_index_t path_index)
1193 {
1194     fib_path_t *path;
1195
1196     path = fib_path_get(path_index);
1197
1198     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1199                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1200                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1201                         0));
1202 }
1203
1204 /*
1205  * fib_path_cmp_i
1206  *
1207  * Compare two paths for equivalence.
1208  */
1209 static int
1210 fib_path_cmp_i (const fib_path_t *path1,
1211                 const fib_path_t *path2)
1212 {
1213     int res;
1214
1215     res = 1;
1216
1217     /*
1218      * paths of different types and protocol are not equal.
1219      * different weights only are the same path.
1220      */
1221     if (path1->fp_type != path2->fp_type)
1222     {
1223         res = (path1->fp_type - path2->fp_type);
1224     }
1225     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1226     {
1227         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1228     }
1229     else
1230     {
1231         /*
1232          * both paths are of the same type.
1233          * consider each type and its attributes in turn.
1234          */
1235         switch (path1->fp_type)
1236         {
1237         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1238             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1239                                    &path2->attached_next_hop.fp_nh);
1240             if (0 == res) {
1241                 res = vnet_sw_interface_compare(
1242                           vnet_get_main(),
1243                           path1->attached_next_hop.fp_interface,
1244                           path2->attached_next_hop.fp_interface);
1245             }
1246             break;
1247         case FIB_PATH_TYPE_ATTACHED:
1248             res = vnet_sw_interface_compare(
1249                       vnet_get_main(),
1250                       path1->attached.fp_interface,
1251                       path2->attached.fp_interface);
1252             break;
1253         case FIB_PATH_TYPE_RECURSIVE:
1254             res = ip46_address_cmp(&path1->recursive.fp_nh,
1255                                    &path2->recursive.fp_nh);
1256  
1257             if (0 == res)
1258             {
1259                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1260             }
1261             break;
1262         case FIB_PATH_TYPE_DEAG:
1263             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1264             break;
1265         case FIB_PATH_TYPE_SPECIAL:
1266         case FIB_PATH_TYPE_RECEIVE:
1267         case FIB_PATH_TYPE_EXCLUSIVE:
1268             res = 0;
1269             break;
1270         }
1271     }
1272     return (res);
1273 }
1274
1275 /*
1276  * fib_path_cmp_for_sort
1277  *
1278  * Compare two paths for equivalence. Used during path sorting.
1279  * As usual 0 means equal.
1280  */
1281 int
1282 fib_path_cmp_for_sort (void * v1,
1283                        void * v2)
1284 {
1285     fib_node_index_t *pi1 = v1, *pi2 = v2;
1286     fib_path_t *path1, *path2;
1287
1288     path1 = fib_path_get(*pi1);
1289     path2 = fib_path_get(*pi2);
1290
1291     return (fib_path_cmp_i(path1, path2));
1292 }
1293
1294 /*
1295  * fib_path_cmp
1296  *
1297  * Compare two paths for equivalence.
1298  */
1299 int
1300 fib_path_cmp (fib_node_index_t pi1,
1301               fib_node_index_t pi2)
1302 {
1303     fib_path_t *path1, *path2;
1304
1305     path1 = fib_path_get(pi1);
1306     path2 = fib_path_get(pi2);
1307
1308     return (fib_path_cmp_i(path1, path2));
1309 }
1310
1311 int
1312 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1313                            const fib_route_path_t *rpath)
1314 {
1315     fib_path_t *path;
1316     int res;
1317
1318     path = fib_path_get(path_index);
1319
1320     res = 1;
1321
1322     if (path->fp_weight != rpath->frp_weight)
1323     {
1324         res = (path->fp_weight - rpath->frp_weight);
1325     }
1326     else
1327     {
1328         /*
1329          * both paths are of the same type.
1330          * consider each type and its attributes in turn.
1331          */
1332         switch (path->fp_type)
1333         {
1334         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1335             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1336                                    &rpath->frp_addr);
1337             if (0 == res)
1338             {
1339                 res = vnet_sw_interface_compare(
1340                           vnet_get_main(),
1341                           path->attached_next_hop.fp_interface,
1342                           rpath->frp_sw_if_index);
1343             }
1344             break;
1345         case FIB_PATH_TYPE_ATTACHED:
1346             res = vnet_sw_interface_compare(
1347                       vnet_get_main(),
1348                       path->attached.fp_interface,
1349                       rpath->frp_sw_if_index);
1350             break;
1351         case FIB_PATH_TYPE_RECURSIVE:
1352             if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1353             {
1354                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1355             }
1356             else
1357             {
1358                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1359                                        &rpath->frp_addr);
1360             }
1361
1362             if (0 == res)
1363             {
1364                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1365             }
1366             break;
1367         case FIB_PATH_TYPE_DEAG:
1368             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1369             break;
1370         case FIB_PATH_TYPE_SPECIAL:
1371         case FIB_PATH_TYPE_RECEIVE:
1372         case FIB_PATH_TYPE_EXCLUSIVE:
1373             res = 0;
1374             break;
1375         }
1376     }
1377     return (res);
1378 }
1379
1380 /*
1381  * fib_path_recursive_loop_detect
1382  *
1383  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1384  * walk is initiated when an entry is linking to a new path list or from an old.
1385  * The entry vector passed contains all the FIB entrys that are children of this
1386  * path (it is all the entries encountered on the walk so far). If this vector
1387  * contains the entry this path resolve via, then a loop is about to form.
1388  * The loop must be allowed to form, since we need the dependencies in place
1389  * so that we can track when the loop breaks.
1390  * However, we MUST not produce a loop in the forwarding graph (else packets
1391  * would loop around the switch path until the loop breaks), so we mark recursive
1392  * paths as looped so that they do not contribute forwarding information.
1393  * By marking the path as looped, an etry such as;
1394  *    X/Y
1395  *     via a.a.a.a (looped)
1396  *     via b.b.b.b (not looped)
1397  * can still forward using the info provided by b.b.b.b only
1398  */
1399 int
1400 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1401                                 fib_node_index_t **entry_indicies)
1402 {
1403     fib_path_t *path;
1404
1405     path = fib_path_get(path_index);
1406
1407     /*
1408      * the forced drop path is never looped, cos it is never resolved.
1409      */
1410     if (fib_path_is_permanent_drop(path))
1411     {
1412         return (0);
1413     }
1414
1415     switch (path->fp_type)
1416     {
1417     case FIB_PATH_TYPE_RECURSIVE:
1418     {
1419         fib_node_index_t *entry_index, *entries;
1420         int looped = 0;
1421         entries = *entry_indicies;
1422
1423         vec_foreach(entry_index, entries) {
1424             if (*entry_index == path->fp_via_fib)
1425             {
1426                 /*
1427                  * the entry that is about to link to this path-list (or
1428                  * one of this path-list's children) is the same entry that
1429                  * this recursive path resolves through. this is a cycle.
1430                  * abort the walk.
1431                  */
1432                 looped = 1;
1433                 break;
1434             }
1435         }
1436
1437         if (looped)
1438         {
1439             FIB_PATH_DBG(path, "recursive loop formed");
1440             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1441
1442             dpo_copy(&path->fp_dpo,
1443                     drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1444         }
1445         else
1446         {
1447             /*
1448              * no loop here yet. keep forward walking the graph.
1449              */     
1450             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1451             {
1452                 FIB_PATH_DBG(path, "recursive loop formed");
1453                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1454             }
1455             else
1456             {
1457                 FIB_PATH_DBG(path, "recursive loop cleared");
1458                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1459             }
1460         }
1461         break;
1462     }
1463     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1464     case FIB_PATH_TYPE_ATTACHED:
1465     case FIB_PATH_TYPE_SPECIAL:
1466     case FIB_PATH_TYPE_DEAG:
1467     case FIB_PATH_TYPE_RECEIVE:
1468     case FIB_PATH_TYPE_EXCLUSIVE:
1469         /*
1470          * these path types cannot be part of a loop, since they are the leaves
1471          * of the graph.
1472          */
1473         break;
1474     }
1475
1476     return (fib_path_is_looped(path_index));
1477 }
1478
1479 int
1480 fib_path_resolve (fib_node_index_t path_index)
1481 {
1482     fib_path_t *path;
1483
1484     path = fib_path_get(path_index);
1485
1486     /*
1487      * hope for the best.
1488      */
1489     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1490
1491     /*
1492      * the forced drop path resolves via the drop adj
1493      */
1494     if (fib_path_is_permanent_drop(path))
1495     {
1496         dpo_copy(&path->fp_dpo,
1497                  drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1498         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1499         return (fib_path_is_resolved(path_index));
1500     }
1501
1502     switch (path->fp_type)
1503     {
1504     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1505         fib_path_attached_next_hop_set(path);
1506         break;
1507     case FIB_PATH_TYPE_ATTACHED:
1508         /*
1509          * path->attached.fp_interface
1510          */
1511         if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1512                                            path->attached.fp_interface))
1513         {
1514             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1515         }
1516         if (vnet_sw_interface_is_p2p(vnet_get_main(),
1517                                      path->attached.fp_interface))
1518         {
1519             /*
1520              * point-2-point interfaces do not require a glean, since
1521              * there is nothing to ARP. Install a rewrite/nbr adj instead
1522              */
1523             dpo_set(&path->fp_dpo,
1524                     DPO_ADJACENCY,
1525                     fib_proto_to_dpo(path->fp_nh_proto),
1526                     adj_nbr_add_or_lock(
1527                         path->fp_nh_proto,
1528                         fib_proto_to_link(path->fp_nh_proto),
1529                         &zero_addr,
1530                         path->attached.fp_interface));
1531         }
1532         else
1533         {
1534             dpo_set(&path->fp_dpo,
1535                     DPO_ADJACENCY_GLEAN,
1536                     fib_proto_to_dpo(path->fp_nh_proto),
1537                     adj_glean_add_or_lock(path->fp_nh_proto,
1538                                           path->attached.fp_interface,
1539                                           NULL));
1540         }
1541         /*
1542          * become a child of the adjacency so we receive updates
1543          * when the interface state changes
1544          */
1545         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1546                                          FIB_NODE_TYPE_PATH,
1547                                          fib_path_get_index(path));
1548
1549         break;
1550     case FIB_PATH_TYPE_RECURSIVE:
1551     {
1552         /*
1553          * Create a RR source entry in the table for the address
1554          * that this path recurses through.
1555          * This resolve action is recursive, hence we may create
1556          * more paths in the process. more creates mean maybe realloc
1557          * of this path.
1558          */
1559         fib_node_index_t fei;
1560         fib_prefix_t pfx;
1561
1562         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1563
1564         if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1565         {
1566             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label, &pfx);
1567         }
1568         else
1569         {
1570             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1571         }
1572
1573         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1574                                           &pfx,
1575                                           FIB_SOURCE_RR,
1576                                           FIB_ENTRY_FLAG_NONE,
1577                                           ADJ_INDEX_INVALID);
1578
1579         path = fib_path_get(path_index);
1580         path->fp_via_fib = fei;
1581
1582         /*
1583          * become a dependent child of the entry so the path is 
1584          * informed when the forwarding for the entry changes.
1585          */
1586         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1587                                                FIB_NODE_TYPE_PATH,
1588                                                fib_path_get_index(path));
1589
1590         /*
1591          * create and configure the IP DPO
1592          */
1593         fib_path_recursive_adj_update(
1594             path,
1595             fib_path_proto_to_chain_type(path->fp_nh_proto),
1596             &path->fp_dpo);
1597
1598         break;
1599     }
1600     case FIB_PATH_TYPE_SPECIAL:
1601         /*
1602          * Resolve via the drop
1603          */
1604         dpo_copy(&path->fp_dpo,
1605                  drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1606         break;
1607     case FIB_PATH_TYPE_DEAG:
1608         /*
1609          * Resolve via a lookup DPO.
1610          * FIXME. control plane should add routes with a table ID
1611          */
1612         lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
1613                                           fib_proto_to_dpo(path->fp_nh_proto),
1614                                           LOOKUP_INPUT_DST_ADDR,
1615                                           LOOKUP_TABLE_FROM_CONFIG,
1616                                           &path->fp_dpo);
1617         break;
1618     case FIB_PATH_TYPE_RECEIVE:
1619         /*
1620          * Resolve via a receive DPO.
1621          */
1622         receive_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1623                                 path->receive.fp_interface,
1624                                 &path->receive.fp_addr,
1625                                 &path->fp_dpo);
1626         break;
1627     case FIB_PATH_TYPE_EXCLUSIVE:
1628         /*
1629          * Resolve via the user provided DPO
1630          */
1631         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
1632         break;
1633     }
1634
1635     return (fib_path_is_resolved(path_index));
1636 }
1637
1638 u32
1639 fib_path_get_resolving_interface (fib_node_index_t path_index)
1640 {
1641     fib_path_t *path;
1642
1643     path = fib_path_get(path_index);
1644
1645     switch (path->fp_type)
1646     {
1647     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1648         return (path->attached_next_hop.fp_interface);
1649     case FIB_PATH_TYPE_ATTACHED:
1650         return (path->attached.fp_interface);
1651     case FIB_PATH_TYPE_RECEIVE:
1652         return (path->receive.fp_interface);
1653     case FIB_PATH_TYPE_RECURSIVE:
1654         return (fib_entry_get_resolving_interface(path->fp_via_fib));    
1655     case FIB_PATH_TYPE_SPECIAL:
1656     case FIB_PATH_TYPE_DEAG:
1657     case FIB_PATH_TYPE_EXCLUSIVE:
1658         break;
1659     }
1660     return (~0);
1661 }
1662
1663 adj_index_t
1664 fib_path_get_adj (fib_node_index_t path_index)
1665 {
1666     fib_path_t *path;
1667
1668     path = fib_path_get(path_index);
1669
1670     ASSERT(dpo_is_adj(&path->fp_dpo));
1671     if (dpo_is_adj(&path->fp_dpo))
1672     {
1673         return (path->fp_dpo.dpoi_index);
1674     }
1675     return (ADJ_INDEX_INVALID);
1676 }
1677
1678 int
1679 fib_path_get_weight (fib_node_index_t path_index)
1680 {
1681     fib_path_t *path;
1682
1683     path = fib_path_get(path_index);
1684
1685     ASSERT(path);
1686
1687     return (path->fp_weight);
1688 }
1689
1690 /**
1691  * @brief Contribute the path's adjacency to the list passed.
1692  * By calling this function over all paths, recursively, a child
1693  * can construct its full set of forwarding adjacencies, and hence its
1694  * uRPF list.
1695  */
1696 void
1697 fib_path_contribute_urpf (fib_node_index_t path_index,
1698                           index_t urpf)
1699 {
1700     fib_path_t *path;
1701
1702     path = fib_path_get(path_index);
1703
1704     /*
1705      * resolved and unresolved paths contribute to the RPF list.
1706      */
1707     switch (path->fp_type)
1708     {
1709     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1710         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
1711         break;
1712
1713     case FIB_PATH_TYPE_ATTACHED:
1714         fib_urpf_list_append(urpf, path->attached.fp_interface);
1715         break;
1716
1717     case FIB_PATH_TYPE_RECURSIVE:
1718         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
1719         {
1720             /*
1721              * there's unresolved due to constraints, and there's unresolved
1722              * due to ain't go no via. can't do nowt w'out via.
1723              */
1724             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
1725         }
1726         break;
1727
1728     case FIB_PATH_TYPE_EXCLUSIVE:
1729     case FIB_PATH_TYPE_SPECIAL:
1730         /*
1731          * these path types may link to an adj, if that's what
1732          * the clinet gave
1733          */
1734         if (dpo_is_adj(&path->fp_dpo))
1735         {
1736             ip_adjacency_t *adj;
1737
1738             adj = adj_get(path->fp_dpo.dpoi_index);
1739
1740             fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
1741         }
1742         break;
1743
1744     case FIB_PATH_TYPE_DEAG:
1745     case FIB_PATH_TYPE_RECEIVE:
1746         /*
1747          * these path types don't link to an adj
1748          */
1749         break;
1750     }
1751 }
1752
1753 void
1754 fib_path_contribute_forwarding (fib_node_index_t path_index,
1755                                 fib_forward_chain_type_t fct,
1756                                 dpo_id_t *dpo)
1757 {
1758     fib_path_t *path;
1759
1760     path = fib_path_get(path_index);
1761
1762     ASSERT(path);
1763     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
1764
1765     FIB_PATH_DBG(path, "contribute");
1766
1767     /*
1768      * The DPO stored in the path was created when the path was resolved.
1769      * This then represents the path's 'native' protocol; IP.
1770      * For all others will need to go find something else.
1771      */
1772     if (fib_path_proto_to_chain_type(path->fp_nh_proto) == fct)
1773     {
1774         dpo_copy(dpo, &path->fp_dpo);
1775     }
1776     else
1777     {
1778         switch (path->fp_type)
1779         {
1780         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1781             switch (fct)
1782             {
1783             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1784             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1785             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1786             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1787             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1788             case FIB_FORW_CHAIN_TYPE_NSH:
1789             {
1790                 adj_index_t ai;
1791
1792                 /*
1793                  * get a appropriate link type adj.
1794                  */
1795                 ai = fib_path_attached_next_hop_get_adj(
1796                          path,
1797                          fib_forw_chain_type_to_link_type(fct));
1798                 dpo_set(dpo, DPO_ADJACENCY,
1799                         fib_forw_chain_type_to_dpo_proto(fct), ai);
1800                 adj_unlock(ai);
1801
1802                 break;
1803             }
1804             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1805             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1806             break;
1807             }
1808             break;
1809         case FIB_PATH_TYPE_RECURSIVE:
1810             switch (fct)
1811             {
1812             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1813             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1814             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1815             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1816                 fib_path_recursive_adj_update(path, fct, dpo);
1817                 break;
1818             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1819             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1820             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1821             case FIB_FORW_CHAIN_TYPE_NSH:
1822                 ASSERT(0);
1823                 break;
1824             }
1825             break;
1826         case FIB_PATH_TYPE_DEAG:
1827             switch (fct)
1828             {
1829             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1830                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
1831                                                   DPO_PROTO_MPLS,
1832                                                   LOOKUP_INPUT_DST_ADDR,
1833                                                   LOOKUP_TABLE_FROM_CONFIG,
1834                                                   dpo);
1835                 break;
1836             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1837             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1838             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1839                 dpo_copy(dpo, &path->fp_dpo);
1840                 break;
1841             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1842             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1843             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1844             case FIB_FORW_CHAIN_TYPE_NSH:
1845                 ASSERT(0);
1846                 break;
1847             }
1848             break;
1849         case FIB_PATH_TYPE_EXCLUSIVE:
1850             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
1851             break;
1852         case FIB_PATH_TYPE_ATTACHED:
1853             switch (fct)
1854             {
1855             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1856             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1857             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1858             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1859             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1860             case FIB_FORW_CHAIN_TYPE_NSH:
1861                 break;
1862             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1863             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1864                 {
1865                     adj_index_t ai;
1866
1867                     /*
1868                      * Create the adj needed for sending IP multicast traffic
1869                      */
1870                     ai = adj_mcast_add_or_lock(path->fp_nh_proto,
1871                                                fib_forw_chain_type_to_link_type(fct),
1872                                                path->attached.fp_interface);
1873                     dpo_set(dpo, DPO_ADJACENCY_MCAST,
1874                             fib_forw_chain_type_to_dpo_proto(fct),
1875                             ai);
1876                     adj_unlock(ai);
1877                 }
1878                 break;
1879             }
1880             break;
1881         case FIB_PATH_TYPE_RECEIVE:
1882         case FIB_PATH_TYPE_SPECIAL:
1883             dpo_copy(dpo, &path->fp_dpo);
1884             break;
1885         }
1886     }
1887 }
1888
1889 load_balance_path_t *
1890 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
1891                                        fib_forward_chain_type_t fct,
1892                                        load_balance_path_t *hash_key)
1893 {
1894     load_balance_path_t *mnh;
1895     fib_path_t *path;
1896
1897     path = fib_path_get(path_index);
1898
1899     ASSERT(path);
1900
1901     if (fib_path_is_resolved(path_index))
1902     {
1903         vec_add2(hash_key, mnh, 1);
1904
1905         mnh->path_weight = path->fp_weight;
1906         mnh->path_index = path_index;
1907         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
1908     }
1909
1910     return (hash_key);
1911 }
1912
1913 int
1914 fib_path_is_recursive (fib_node_index_t path_index)
1915 {
1916     fib_path_t *path;
1917
1918     path = fib_path_get(path_index);
1919
1920     return (FIB_PATH_TYPE_RECURSIVE == path->fp_type);
1921 }
1922
1923 int
1924 fib_path_is_exclusive (fib_node_index_t path_index)
1925 {
1926     fib_path_t *path;
1927
1928     path = fib_path_get(path_index);
1929
1930     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
1931 }
1932
1933 int
1934 fib_path_is_deag (fib_node_index_t path_index)
1935 {
1936     fib_path_t *path;
1937
1938     path = fib_path_get(path_index);
1939
1940     return (FIB_PATH_TYPE_DEAG == path->fp_type);
1941 }
1942
1943 int
1944 fib_path_is_resolved (fib_node_index_t path_index)
1945 {
1946     fib_path_t *path;
1947
1948     path = fib_path_get(path_index);
1949
1950     return (dpo_id_is_valid(&path->fp_dpo) &&
1951             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
1952             !fib_path_is_looped(path_index) &&
1953             !fib_path_is_permanent_drop(path));
1954 }
1955
1956 int
1957 fib_path_is_looped (fib_node_index_t path_index)
1958 {
1959     fib_path_t *path;
1960
1961     path = fib_path_get(path_index);
1962
1963     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
1964 }
1965
1966 int
1967 fib_path_encode (fib_node_index_t path_list_index,
1968                  fib_node_index_t path_index,
1969                  void *ctx)
1970 {
1971     fib_route_path_encode_t **api_rpaths = ctx;
1972     fib_route_path_encode_t *api_rpath;
1973     fib_path_t *path;
1974
1975     path = fib_path_get(path_index);
1976     if (!path)
1977       return (0);
1978     vec_add2(*api_rpaths, api_rpath, 1);
1979     api_rpath->rpath.frp_weight = path->fp_weight;
1980     api_rpath->rpath.frp_proto = path->fp_nh_proto;
1981     api_rpath->rpath.frp_sw_if_index = ~0;
1982     api_rpath->dpo = path->exclusive.fp_ex_dpo;
1983     switch (path->fp_type)
1984       {
1985       case FIB_PATH_TYPE_RECEIVE:
1986         api_rpath->rpath.frp_addr = path->receive.fp_addr;
1987         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
1988         break;
1989       case FIB_PATH_TYPE_ATTACHED:
1990         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
1991         break;
1992       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1993         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
1994         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
1995         break;
1996       case FIB_PATH_TYPE_SPECIAL:
1997         break;
1998       case FIB_PATH_TYPE_DEAG:
1999         break;
2000       case FIB_PATH_TYPE_RECURSIVE:
2001         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2002         break;
2003       default:
2004         break;
2005       }
2006     return (1);
2007 }
2008
2009 fib_protocol_t
2010 fib_path_get_proto (fib_node_index_t path_index)
2011 {
2012     fib_path_t *path;
2013
2014     path = fib_path_get(path_index);
2015
2016     return (path->fp_nh_proto);
2017 }
2018
2019 void
2020 fib_path_module_init (void)
2021 {
2022     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2023 }
2024
2025 static clib_error_t *
2026 show_fib_path_command (vlib_main_t * vm,
2027                         unformat_input_t * input,
2028                         vlib_cli_command_t * cmd)
2029 {
2030     fib_node_index_t pi;
2031     fib_path_t *path;
2032
2033     if (unformat (input, "%d", &pi))
2034     {
2035         /*
2036          * show one in detail
2037          */
2038         if (!pool_is_free_index(fib_path_pool, pi))
2039         {
2040             path = fib_path_get(pi);
2041             u8 *s = fib_path_format(pi, NULL);
2042             s = format(s, "children:");
2043             s = fib_node_children_format(path->fp_node.fn_children, s);
2044             vlib_cli_output (vm, "%s", s);
2045             vec_free(s);
2046         }
2047         else
2048         {
2049             vlib_cli_output (vm, "path %d invalid", pi);
2050         }
2051     }
2052     else
2053     {
2054         vlib_cli_output (vm, "FIB Paths");
2055         pool_foreach(path, fib_path_pool,
2056         ({
2057             vlib_cli_output (vm, "%U", format_fib_path, path);
2058         }));
2059     }
2060
2061     return (NULL);
2062 }
2063
2064 VLIB_CLI_COMMAND (show_fib_path, static) = {
2065   .path = "show fib paths",
2066   .function = show_fib_path_command,
2067   .short_help = "show fib paths",
2068 };