fib: make deag entries urpf extempt
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/l2_bridge_dpo.h>
27 #include <vnet/dpo/drop_dpo.h>
28
29 #include <vnet/adj/adj.h>
30 #include <vnet/adj/adj_mcast.h>
31
32 #include <vnet/fib/fib_path.h>
33 #include <vnet/fib/fib_node.h>
34 #include <vnet/fib/fib_table.h>
35 #include <vnet/fib/fib_entry.h>
36 #include <vnet/fib/fib_path_list.h>
37 #include <vnet/fib/fib_internal.h>
38 #include <vnet/fib/fib_urpf_list.h>
39 #include <vnet/fib/mpls_fib.h>
40 #include <vnet/udp/udp_encap.h>
41 #include <vnet/bier/bier_fmask.h>
42 #include <vnet/bier/bier_table.h>
43 #include <vnet/bier/bier_imp.h>
44 #include <vnet/bier/bier_disp_table.h>
45
46 /**
47  * Enurmeration of path types
48  */
49 typedef enum fib_path_type_t_ {
50     /**
51      * Marker. Add new types after this one.
52      */
53     FIB_PATH_TYPE_FIRST = 0,
54     /**
55      * Attached-nexthop. An interface and a nexthop are known.
56      */
57     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
58     /**
59      * attached. Only the interface is known.
60      */
61     FIB_PATH_TYPE_ATTACHED,
62     /**
63      * recursive. Only the next-hop is known.
64      */
65     FIB_PATH_TYPE_RECURSIVE,
66     /**
67      * special. nothing is known. so we drop.
68      */
69     FIB_PATH_TYPE_SPECIAL,
70     /**
71      * exclusive. user provided adj.
72      */
73     FIB_PATH_TYPE_EXCLUSIVE,
74     /**
75      * deag. Link to a lookup adj in the next table
76      */
77     FIB_PATH_TYPE_DEAG,
78     /**
79      * interface receive.
80      */
81     FIB_PATH_TYPE_INTF_RX,
82     /**
83      * interface receive.
84      */
85     FIB_PATH_TYPE_UDP_ENCAP,
86     /**
87      * receive. it's for-us.
88      */
89     FIB_PATH_TYPE_RECEIVE,
90     /**
91      * bier-imp. it's via a BIER imposition.
92      */
93     FIB_PATH_TYPE_BIER_IMP,
94     /**
95      * bier-fmask. it's via a BIER ECMP-table.
96      */
97     FIB_PATH_TYPE_BIER_TABLE,
98     /**
99      * bier-fmask. it's via a BIER f-mask.
100      */
101     FIB_PATH_TYPE_BIER_FMASK,
102     /**
103      * Marker. Add new types before this one, then update it.
104      */
105     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
106 } __attribute__ ((packed)) fib_path_type_t;
107
108 /**
109  * The maximum number of path_types
110  */
111 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
112
113 #define FIB_PATH_TYPES {                                        \
114     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
115     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
116     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
117     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
118     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
119     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
120     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
121     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
122     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
123     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
124     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
125     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
126 }
127
128 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
129     for (_item = FIB_PATH_TYPE_FIRST;           \
130          _item <= FIB_PATH_TYPE_LAST;           \
131          _item++)
132
133 /**
134  * Enurmeration of path operational (i.e. derived) attributes
135  */
136 typedef enum fib_path_oper_attribute_t_ {
137     /**
138      * Marker. Add new types after this one.
139      */
140     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
141     /**
142      * The path forms part of a recursive loop.
143      */
144     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
145     /**
146      * The path is resolved
147      */
148     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
149     /**
150      * The path is attached, despite what the next-hop may say.
151      */
152     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
153     /**
154      * The path has become a permanent drop.
155      */
156     FIB_PATH_OPER_ATTRIBUTE_DROP,
157     /**
158      * Marker. Add new types before this one, then update it.
159      */
160     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
161 } __attribute__ ((packed)) fib_path_oper_attribute_t;
162
163 /**
164  * The maximum number of path operational attributes
165  */
166 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
167
168 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
169     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
170     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
171     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
172 }
173
174 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
175     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
176          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
177          _item++)
178
179 /**
180  * Path flags from the attributes
181  */
182 typedef enum fib_path_oper_flags_t_ {
183     FIB_PATH_OPER_FLAG_NONE = 0,
184     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
185     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
186     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
187     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
188 } __attribute__ ((packed)) fib_path_oper_flags_t;
189
190 /**
191  * A FIB path
192  */
193 typedef struct fib_path_t_ {
194     /**
195      * A path is a node in the FIB graph.
196      */
197     fib_node_t fp_node;
198
199     /**
200      * The index of the path-list to which this path belongs
201      */
202     u32 fp_pl_index;
203
204     /**
205      * This marks the start of the memory area used to hash
206      * the path
207      */
208     STRUCT_MARK(path_hash_start);
209
210     /**
211      * Configuration Flags
212      */
213     fib_path_cfg_flags_t fp_cfg_flags;
214
215     /**
216      * The type of the path. This is the selector for the union
217      */
218     fib_path_type_t fp_type;
219
220     /**
221      * The protocol of the next-hop, i.e. the address family of the
222      * next-hop's address. We can't derive this from the address itself
223      * since the address can be all zeros
224      */
225     dpo_proto_t fp_nh_proto;
226
227     /**
228      * UCMP [unnormalised] weigth
229      */
230     u8 fp_weight;
231
232     /**
233      * A path preference. 0 is the best.
234      * Only paths of the best preference, that are 'up', are considered
235      * for forwarding.
236      */
237     u8 fp_preference;
238
239     /**
240      * per-type union of the data required to resolve the path
241      */
242     union {
243         struct {
244             /**
245              * The next-hop
246              */
247             ip46_address_t fp_nh;
248             /**
249              * The interface
250              */
251             u32 fp_interface;
252         } attached_next_hop;
253         struct {
254             /**
255              * The interface
256              */
257             u32 fp_interface;
258         } attached;
259         struct {
260             union
261             {
262                 /**
263                  * The next-hop
264                  */
265                 ip46_address_t fp_ip;
266                 struct {
267                     /**
268                      * The local label to resolve through.
269                      */
270                     mpls_label_t fp_local_label;
271                     /**
272                      * The EOS bit of the resolving label
273                      */
274                     mpls_eos_bit_t fp_eos;
275                 };
276             } fp_nh;
277             union {
278                 /**
279                  * The FIB table index in which to find the next-hop.
280                  */
281                 fib_node_index_t fp_tbl_id;
282                 /**
283                  * The BIER FIB the fmask is in
284                  */
285                 index_t fp_bier_fib;
286             };
287         } recursive;
288         struct {
289             /**
290              * BIER FMask ID
291              */
292             index_t fp_bier_fmask;
293         } bier_fmask;
294         struct {
295             /**
296              * The BIER table's ID
297              */
298             bier_table_id_t fp_bier_tbl;
299         } bier_table;
300         struct {
301             /**
302              * The BIER imposition object
303              * this is part of the path's key, since the index_t
304              * of an imposition object is the object's key.
305              */
306             index_t fp_bier_imp;
307         } bier_imp;
308         struct {
309             /**
310              * The FIB index in which to perfom the next lookup
311              */
312             fib_node_index_t fp_tbl_id;
313             /**
314              * The RPF-ID to tag the packets with
315              */
316             fib_rpf_id_t fp_rpf_id;
317         } deag;
318         struct {
319         } special;
320         struct {
321             /**
322              * The user provided 'exclusive' DPO
323              */
324             dpo_id_t fp_ex_dpo;
325         } exclusive;
326         struct {
327             /**
328              * The interface on which the local address is configured
329              */
330             u32 fp_interface;
331             /**
332              * The next-hop
333              */
334             ip46_address_t fp_addr;
335         } receive;
336         struct {
337             /**
338              * The interface on which the packets will be input.
339              */
340             u32 fp_interface;
341         } intf_rx;
342         struct {
343             /**
344              * The UDP Encap object this path resolves through
345              */
346             u32 fp_udp_encap_id;
347         } udp_encap;
348     };
349     STRUCT_MARK(path_hash_end);
350
351     /**
352      * Memebers in this last section represent information that is
353      * dervied during resolution. It should not be copied to new paths
354      * nor compared.
355      */
356
357     /**
358      * Operational Flags
359      */
360     fib_path_oper_flags_t fp_oper_flags;
361
362     union {
363         /**
364          * the resolving via fib. not part of the union, since it it not part
365          * of the path's hash.
366          */
367         fib_node_index_t fp_via_fib;
368         /**
369          * the resolving bier-table
370          */
371         index_t fp_via_bier_tbl;
372         /**
373          * the resolving bier-fmask
374          */
375         index_t fp_via_bier_fmask;
376     };
377
378     /**
379      * The Data-path objects through which this path resolves for IP.
380      */
381     dpo_id_t fp_dpo;
382
383     /**
384      * the index of this path in the parent's child list.
385      */
386     u32 fp_sibling;
387 } fib_path_t;
388
389 /*
390  * Array of strings/names for the path types and attributes
391  */
392 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
393 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
394 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
395
396 /*
397  * The memory pool from which we allocate all the paths
398  */
399 static fib_path_t *fib_path_pool;
400
401 /*
402  * Debug macro
403  */
404 #ifdef FIB_DEBUG
405 #define FIB_PATH_DBG(_p, _fmt, _args...)                        \
406 {                                                               \
407     u8 *_tmp = NULL;                                            \
408     _tmp = fib_path_format(fib_path_get_index(_p), _tmp);       \
409     clib_warning("path:[%d:%U]:" _fmt,                          \
410                  fib_path_get_index(_p), format_fib_path, _p, 0,\
411                  ##_args);                                      \
412     vec_free(_tmp);                                             \
413 }
414 #else
415 #define FIB_PATH_DBG(_p, _fmt, _args...)
416 #endif
417
418 static fib_path_t *
419 fib_path_get (fib_node_index_t index)
420 {
421     return (pool_elt_at_index(fib_path_pool, index));
422 }
423
424 static fib_node_index_t 
425 fib_path_get_index (fib_path_t *path)
426 {
427     return (path - fib_path_pool);
428 }
429
430 static fib_node_t *
431 fib_path_get_node (fib_node_index_t index)
432 {
433     return ((fib_node_t*)fib_path_get(index));
434 }
435
436 static fib_path_t*
437 fib_path_from_fib_node (fib_node_t *node)
438 {
439     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
440     return ((fib_path_t*)node);
441 }
442
443 u8 *
444 format_fib_path (u8 * s, va_list * args)
445 {
446     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
447     u32 indent = va_arg (*args, u32);
448     vnet_main_t * vnm = vnet_get_main();
449     fib_path_oper_attribute_t oattr;
450     fib_path_cfg_attribute_t cattr;
451     fib_path_t *path;
452
453     path = fib_path_get(path_index);
454
455     s = format (s, "%Upath:[%d] ", format_white_space, indent,
456                 fib_path_get_index(path));
457     s = format (s, "pl-index:%d ", path->fp_pl_index);
458     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
459     s = format (s, "weight=%d ", path->fp_weight);
460     s = format (s, "pref=%d ", path->fp_preference);
461     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
462     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
463         s = format(s, " oper-flags:");
464         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
465             if ((1<<oattr) & path->fp_oper_flags) {
466                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
467             }
468         }
469     }
470     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
471         s = format(s, " cfg-flags:");
472         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
473             if ((1<<cattr) & path->fp_cfg_flags) {
474                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
475             }
476         }
477     }
478     s = format(s, "\n%U", format_white_space, indent+2);
479
480     switch (path->fp_type)
481     {
482     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
483         s = format (s, "%U", format_ip46_address,
484                     &path->attached_next_hop.fp_nh,
485                     IP46_TYPE_ANY);
486         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
487         {
488             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
489         }
490         else
491         {
492             s = format (s, " %U",
493                         format_vnet_sw_interface_name,
494                         vnm,
495                         vnet_get_sw_interface(
496                             vnm,
497                             path->attached_next_hop.fp_interface));
498             if (vnet_sw_interface_is_p2p(vnet_get_main(),
499                                          path->attached_next_hop.fp_interface))
500             {
501                 s = format (s, " (p2p)");
502             }
503         }
504         if (!dpo_id_is_valid(&path->fp_dpo))
505         {
506             s = format(s, "\n%Uunresolved", format_white_space, indent+2);
507         }
508         else
509         {
510             s = format(s, "\n%U%U",
511                        format_white_space, indent,
512                        format_dpo_id,
513                        &path->fp_dpo, 13);
514         }
515         break;
516     case FIB_PATH_TYPE_ATTACHED:
517         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
518         {
519             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
520         }
521         else
522         {
523             s = format (s, " %U",
524                         format_vnet_sw_interface_name,
525                         vnm,
526                         vnet_get_sw_interface(
527                             vnm,
528                             path->attached.fp_interface));
529         }
530         break;
531     case FIB_PATH_TYPE_RECURSIVE:
532         if (DPO_PROTO_MPLS == path->fp_nh_proto)
533         {
534             s = format (s, "via %U %U",
535                         format_mpls_unicast_label,
536                         path->recursive.fp_nh.fp_local_label,
537                         format_mpls_eos_bit,
538                         path->recursive.fp_nh.fp_eos);
539         }
540         else
541         {
542             s = format (s, "via %U",
543                         format_ip46_address,
544                         &path->recursive.fp_nh.fp_ip,
545                         IP46_TYPE_ANY);
546         }
547         s = format (s, " in fib:%d",
548                     path->recursive.fp_tbl_id,
549                     path->fp_via_fib); 
550         s = format (s, " via-fib:%d", path->fp_via_fib); 
551         s = format (s, " via-dpo:[%U:%d]",
552                     format_dpo_type, path->fp_dpo.dpoi_type, 
553                     path->fp_dpo.dpoi_index);
554
555         break;
556     case FIB_PATH_TYPE_UDP_ENCAP:
557         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
558         break;
559     case FIB_PATH_TYPE_BIER_TABLE:
560         s = format (s, "via bier-table:[%U}",
561                     format_bier_table_id,
562                     &path->bier_table.fp_bier_tbl);
563         s = format (s, " via-dpo:[%U:%d]",
564                     format_dpo_type, path->fp_dpo.dpoi_type,
565                     path->fp_dpo.dpoi_index);
566         break;
567     case FIB_PATH_TYPE_BIER_FMASK:
568         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
569         s = format (s, " via-dpo:[%U:%d]",
570                     format_dpo_type, path->fp_dpo.dpoi_type, 
571                     path->fp_dpo.dpoi_index);
572         break;
573     case FIB_PATH_TYPE_BIER_IMP:
574         s = format (s, "via %U", format_bier_imp,
575                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
576         break;
577     case FIB_PATH_TYPE_RECEIVE:
578     case FIB_PATH_TYPE_INTF_RX:
579     case FIB_PATH_TYPE_SPECIAL:
580     case FIB_PATH_TYPE_DEAG:
581     case FIB_PATH_TYPE_EXCLUSIVE:
582         if (dpo_id_is_valid(&path->fp_dpo))
583         {
584             s = format(s, "%U", format_dpo_id,
585                        &path->fp_dpo, indent+2);
586         }
587         break;
588     }
589     return (s);
590 }
591
592 u8 *
593 fib_path_format (fib_node_index_t pi, u8 *s)
594 {
595     fib_path_t *path;
596
597     path = fib_path_get(pi);
598     ASSERT(NULL != path);
599
600     return (format (s, "%U", format_fib_path, path));
601 }
602
603 /*
604  * fib_path_last_lock_gone
605  *
606  * We don't share paths, we share path lists, so the [un]lock functions
607  * are no-ops
608  */
609 static void
610 fib_path_last_lock_gone (fib_node_t *node)
611 {
612     ASSERT(0);
613 }
614
615 static const adj_index_t
616 fib_path_attached_next_hop_get_adj (fib_path_t *path,
617                                     vnet_link_t link)
618 {
619     if (vnet_sw_interface_is_p2p(vnet_get_main(),
620                                  path->attached_next_hop.fp_interface))
621     {
622         /*
623          * if the interface is p2p then the adj for the specific
624          * neighbour on that link will never exist. on p2p links
625          * the subnet address (the attached route) links to the
626          * auto-adj (see below), we want that adj here too.
627          */
628         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
629                                     link,
630                                     &zero_addr,
631                                     path->attached_next_hop.fp_interface));
632     }
633     else
634     {
635         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
636                                     link,
637                                     &path->attached_next_hop.fp_nh,
638                                     path->attached_next_hop.fp_interface));
639     }
640 }
641
642 static void
643 fib_path_attached_next_hop_set (fib_path_t *path)
644 {
645     /*
646      * resolve directly via the adjacnecy discribed by the
647      * interface and next-hop
648      */
649     dpo_set(&path->fp_dpo,
650             DPO_ADJACENCY,
651             path->fp_nh_proto,
652             fib_path_attached_next_hop_get_adj(
653                  path,
654                  dpo_proto_to_link(path->fp_nh_proto)));
655
656     /*
657      * become a child of the adjacency so we receive updates
658      * when its rewrite changes
659      */
660     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
661                                      FIB_NODE_TYPE_PATH,
662                                      fib_path_get_index(path));
663
664     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
665                                       path->attached_next_hop.fp_interface) ||
666         !adj_is_up(path->fp_dpo.dpoi_index))
667     {
668         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
669     }
670 }
671
672 static const adj_index_t
673 fib_path_attached_get_adj (fib_path_t *path,
674                            vnet_link_t link)
675 {
676     if (vnet_sw_interface_is_p2p(vnet_get_main(),
677                                  path->attached.fp_interface))
678     {
679         /*
680          * point-2-point interfaces do not require a glean, since
681          * there is nothing to ARP. Install a rewrite/nbr adj instead
682          */
683         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
684                                     link,
685                                     &zero_addr,
686                                     path->attached.fp_interface));
687     }
688     else
689     {
690         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
691                                       path->attached.fp_interface,
692                                       NULL));
693     }
694 }
695
696 /*
697  * create of update the paths recursive adj
698  */
699 static void
700 fib_path_recursive_adj_update (fib_path_t *path,
701                                fib_forward_chain_type_t fct,
702                                dpo_id_t *dpo)
703 {
704     dpo_id_t via_dpo = DPO_INVALID;
705
706     /*
707      * get the DPO to resolve through from the via-entry
708      */
709     fib_entry_contribute_forwarding(path->fp_via_fib,
710                                     fct,
711                                     &via_dpo);
712
713
714     /*
715      * hope for the best - clear if restrictions apply.
716      */
717     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
718
719     /*
720      * Validate any recursion constraints and over-ride the via
721      * adj if not met
722      */
723     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
724     {
725         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
726         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
727     }
728     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
729     {
730         /*
731          * the via FIB must be a host route.
732          * note the via FIB just added will always be a host route
733          * since it is an RR source added host route. So what we need to
734          * check is whether the route has other sources. If it does then
735          * some other source has added it as a host route. If it doesn't
736          * then it was added only here and inherits forwarding from a cover.
737          * the cover is not a host route.
738          * The RR source is the lowest priority source, so we check if it
739          * is the best. if it is there are no other sources.
740          */
741         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
742         {
743             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
744             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
745
746             /*
747              * PIC edge trigger. let the load-balance maps know
748              */
749             load_balance_map_path_state_change(fib_path_get_index(path));
750         }
751     }
752     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
753     {
754         /*
755          * RR source entries inherit the flags from the cover, so
756          * we can check the via directly
757          */
758         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
759         {
760             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
761             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
762
763             /*
764              * PIC edge trigger. let the load-balance maps know
765              */
766             load_balance_map_path_state_change(fib_path_get_index(path));
767         }
768     }
769     /*
770      * check for over-riding factors on the FIB entry itself
771      */
772     if (!fib_entry_is_resolved(path->fp_via_fib))
773     {
774         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
775         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
776
777         /*
778          * PIC edge trigger. let the load-balance maps know
779          */
780         load_balance_map_path_state_change(fib_path_get_index(path));
781     }
782
783     /*
784      * If this path is contributing a drop, then it's not resolved
785      */
786     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
787     {
788         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
789     }
790
791     /*
792      * update the path's contributed DPO
793      */
794     dpo_copy(dpo, &via_dpo);
795
796     FIB_PATH_DBG(path, "recursive update:");
797
798     dpo_reset(&via_dpo);
799 }
800
801 /*
802  * re-evaulate the forwarding state for a via fmask path
803  */
804 static void
805 fib_path_bier_fmask_update (fib_path_t *path,
806                             dpo_id_t *dpo)
807 {
808     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
809
810     /*
811      * if we are stakcing on the drop, then the path is not resolved
812      */
813     if (dpo_is_drop(dpo))
814     {
815         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
816     }
817     else
818     {
819         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
820     }
821 }
822
823 /*
824  * fib_path_is_permanent_drop
825  *
826  * Return !0 if the path is configured to permanently drop,
827  * despite other attributes.
828  */
829 static int
830 fib_path_is_permanent_drop (fib_path_t *path)
831 {
832     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
833             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
834 }
835
836 /*
837  * fib_path_unresolve
838  *
839  * Remove our dependency on the resolution target
840  */
841 static void
842 fib_path_unresolve (fib_path_t *path)
843 {
844     /*
845      * the forced drop path does not need unresolving
846      */
847     if (fib_path_is_permanent_drop(path))
848     {
849         return;
850     }
851
852     switch (path->fp_type)
853     {
854     case FIB_PATH_TYPE_RECURSIVE:
855         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
856         {
857             fib_prefix_t pfx;
858
859             fib_entry_get_prefix(path->fp_via_fib, &pfx);
860             fib_entry_child_remove(path->fp_via_fib,
861                                    path->fp_sibling);
862             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
863                                            &pfx,
864                                            FIB_SOURCE_RR);
865             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
866         }
867         break;
868     case FIB_PATH_TYPE_BIER_FMASK:
869         bier_fmask_child_remove(path->fp_via_bier_fmask,
870                                 path->fp_sibling);
871         break;
872     case FIB_PATH_TYPE_BIER_IMP:
873         bier_imp_unlock(path->fp_dpo.dpoi_index);
874         break;
875     case FIB_PATH_TYPE_BIER_TABLE:
876         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
877         break;
878     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
879         adj_child_remove(path->fp_dpo.dpoi_index,
880                          path->fp_sibling);
881         adj_unlock(path->fp_dpo.dpoi_index);
882         break;
883     case FIB_PATH_TYPE_ATTACHED:
884         if (DPO_PROTO_ETHERNET != path->fp_nh_proto)
885         {
886             adj_child_remove(path->fp_dpo.dpoi_index,
887                              path->fp_sibling);
888             adj_unlock(path->fp_dpo.dpoi_index);
889         }
890         break;
891     case FIB_PATH_TYPE_UDP_ENCAP:
892         udp_encap_unlock_w_index(path->fp_dpo.dpoi_index);
893         break;
894     case FIB_PATH_TYPE_EXCLUSIVE:
895         dpo_reset(&path->exclusive.fp_ex_dpo);
896         break;
897     case FIB_PATH_TYPE_SPECIAL:
898     case FIB_PATH_TYPE_RECEIVE:
899     case FIB_PATH_TYPE_INTF_RX:
900     case FIB_PATH_TYPE_DEAG:
901         /*
902          * these hold only the path's DPO, which is reset below.
903          */
904         break;
905     }
906
907     /*
908      * release the adj we were holding and pick up the
909      * drop just in case.
910      */
911     dpo_reset(&path->fp_dpo);
912     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
913
914     return;
915 }
916
917 static fib_forward_chain_type_t
918 fib_path_to_chain_type (const fib_path_t *path)
919 {
920     if (DPO_PROTO_MPLS == path->fp_nh_proto)
921     {
922         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
923             MPLS_EOS == path->recursive.fp_nh.fp_eos)
924         {
925             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
926         }
927         else
928         {
929             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
930         }
931     }
932     else
933     {
934         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
935     }
936 }
937
938 /*
939  * fib_path_back_walk_notify
940  *
941  * A back walk has reach this path.
942  */
943 static fib_node_back_walk_rc_t
944 fib_path_back_walk_notify (fib_node_t *node,
945                            fib_node_back_walk_ctx_t *ctx)
946 {
947     fib_path_t *path;
948
949     path = fib_path_from_fib_node(node);
950
951     switch (path->fp_type)
952     {
953     case FIB_PATH_TYPE_RECURSIVE:
954         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
955         {
956             /*
957              * modify the recursive adjacency to use the new forwarding
958              * of the via-fib.
959              * this update is visible to packets in flight in the DP.
960              */
961             fib_path_recursive_adj_update(
962                 path,
963                 fib_path_to_chain_type(path),
964                 &path->fp_dpo);
965         }
966         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
967             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
968         {
969             /*
970              * ADJ updates (complete<->incomplete) do not need to propagate to
971              * recursive entries.
972              * The only reason its needed as far back as here, is that the adj
973              * and the incomplete adj are a different DPO type, so the LBs need
974              * to re-stack.
975              * If this walk was quashed in the fib_entry, then any non-fib_path
976              * children (like tunnels that collapse out the LB when they stack)
977              * would not see the update.
978              */
979             return (FIB_NODE_BACK_WALK_CONTINUE);
980         }
981         break;
982     case FIB_PATH_TYPE_BIER_FMASK:
983         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
984         {
985             /*
986              * update to use the BIER fmask's new forwading
987              */
988             fib_path_bier_fmask_update(path, &path->fp_dpo);
989         }
990         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
991             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
992         {
993             /*
994              * ADJ updates (complete<->incomplete) do not need to propagate to
995              * recursive entries.
996              * The only reason its needed as far back as here, is that the adj
997              * and the incomplete adj are a different DPO type, so the LBs need
998              * to re-stack.
999              * If this walk was quashed in the fib_entry, then any non-fib_path
1000              * children (like tunnels that collapse out the LB when they stack)
1001              * would not see the update.
1002              */
1003             return (FIB_NODE_BACK_WALK_CONTINUE);
1004         }
1005         break;
1006     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1007         /*
1008 FIXME comment
1009          * ADJ_UPDATE backwalk pass silently through here and up to
1010          * the path-list when the multipath adj collapse occurs.
1011          * The reason we do this is that the assumtption is that VPP
1012          * runs in an environment where the Control-Plane is remote
1013          * and hence reacts slowly to link up down. In order to remove
1014          * this down link from the ECMP set quickly, we back-walk.
1015          * VPP also has dedicated CPUs, so we are not stealing resources
1016          * from the CP to do so.
1017          */
1018         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1019         {
1020             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1021             {
1022                 /*
1023                  * alreday resolved. no need to walk back again
1024                  */
1025                 return (FIB_NODE_BACK_WALK_CONTINUE);
1026             }
1027             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1028         }
1029         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1030         {
1031             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1032             {
1033                 /*
1034                  * alreday unresolved. no need to walk back again
1035                  */
1036                 return (FIB_NODE_BACK_WALK_CONTINUE);
1037             }
1038             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1039         }
1040         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1041         {
1042             /*
1043              * The interface this path resolves through has been deleted.
1044              * This will leave the path in a permanent drop state. The route
1045              * needs to be removed and readded (and hence the path-list deleted)
1046              * before it can forward again.
1047              */
1048             fib_path_unresolve(path);
1049             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1050         }
1051         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1052         {
1053             /*
1054              * restack the DPO to pick up the correct DPO sub-type
1055              */
1056             uword if_is_up;
1057             adj_index_t ai;
1058
1059             if_is_up = vnet_sw_interface_is_admin_up(
1060                            vnet_get_main(),
1061                            path->attached_next_hop.fp_interface);
1062
1063             ai = fib_path_attached_next_hop_get_adj(
1064                      path,
1065                      dpo_proto_to_link(path->fp_nh_proto));
1066
1067             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1068             if (if_is_up && adj_is_up(ai))
1069             {
1070                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1071             }
1072
1073             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1074             adj_unlock(ai);
1075
1076             if (!if_is_up)
1077             {
1078                 /*
1079                  * If the interface is not up there is no reason to walk
1080                  * back to children. if we did they would only evalute
1081                  * that this path is unresolved and hence it would
1082                  * not contribute the adjacency - so it would be wasted
1083                  * CPU time.
1084                  */
1085                 return (FIB_NODE_BACK_WALK_CONTINUE);
1086             }
1087         }
1088         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1089         {
1090             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1091             {
1092                 /*
1093                  * alreday unresolved. no need to walk back again
1094                  */
1095                 return (FIB_NODE_BACK_WALK_CONTINUE);
1096             }
1097             /*
1098              * the adj has gone down. the path is no longer resolved.
1099              */
1100             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1101         }
1102         break;
1103     case FIB_PATH_TYPE_ATTACHED:
1104         /*
1105          * FIXME; this could schedule a lower priority walk, since attached
1106          * routes are not usually in ECMP configurations so the backwalk to
1107          * the FIB entry does not need to be high priority
1108          */
1109         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1110         {
1111             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1112         }
1113         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1114         {
1115             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1116         }
1117         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1118         {
1119             fib_path_unresolve(path);
1120             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1121         }
1122         break;
1123     case FIB_PATH_TYPE_UDP_ENCAP:
1124     {
1125         dpo_id_t via_dpo = DPO_INVALID;
1126
1127         /*
1128          * hope for the best - clear if restrictions apply.
1129          */
1130         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1131
1132         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1133                                         path->fp_nh_proto,
1134                                         &via_dpo);
1135         /*
1136          * If this path is contributing a drop, then it's not resolved
1137          */
1138         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1139         {
1140             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1141         }
1142
1143         /*
1144          * update the path's contributed DPO
1145          */
1146         dpo_copy(&path->fp_dpo, &via_dpo);
1147         dpo_reset(&via_dpo);
1148         break;
1149     }
1150     case FIB_PATH_TYPE_INTF_RX:
1151         ASSERT(0);
1152     case FIB_PATH_TYPE_DEAG:
1153         /*
1154          * FIXME When VRF delete is allowed this will need a poke.
1155          */
1156     case FIB_PATH_TYPE_SPECIAL:
1157     case FIB_PATH_TYPE_RECEIVE:
1158     case FIB_PATH_TYPE_EXCLUSIVE:
1159     case FIB_PATH_TYPE_BIER_TABLE:
1160     case FIB_PATH_TYPE_BIER_IMP:
1161         /*
1162          * these path types have no parents. so to be
1163          * walked from one is unexpected.
1164          */
1165         ASSERT(0);
1166         break;
1167     }
1168
1169     /*
1170      * propagate the backwalk further to the path-list
1171      */
1172     fib_path_list_back_walk(path->fp_pl_index, ctx);
1173
1174     return (FIB_NODE_BACK_WALK_CONTINUE);
1175 }
1176
1177 static void
1178 fib_path_memory_show (void)
1179 {
1180     fib_show_memory_usage("Path",
1181                           pool_elts(fib_path_pool),
1182                           pool_len(fib_path_pool),
1183                           sizeof(fib_path_t));
1184 }
1185
1186 /*
1187  * The FIB path's graph node virtual function table
1188  */
1189 static const fib_node_vft_t fib_path_vft = {
1190     .fnv_get = fib_path_get_node,
1191     .fnv_last_lock = fib_path_last_lock_gone,
1192     .fnv_back_walk = fib_path_back_walk_notify,
1193     .fnv_mem_show = fib_path_memory_show,
1194 };
1195
1196 static fib_path_cfg_flags_t
1197 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1198 {
1199     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1200
1201     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1202         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1203     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1204         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1205     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1206         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1207     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1208         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1209     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1210         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1211     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1212         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1213     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1214         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1215     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1216         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1217     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1218         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1219
1220     return (cfg_flags);
1221 }
1222
1223 /*
1224  * fib_path_create
1225  *
1226  * Create and initialise a new path object.
1227  * return the index of the path.
1228  */
1229 fib_node_index_t
1230 fib_path_create (fib_node_index_t pl_index,
1231                  const fib_route_path_t *rpath)
1232 {
1233     fib_path_t *path;
1234
1235     pool_get(fib_path_pool, path);
1236     memset(path, 0, sizeof(*path));
1237
1238     fib_node_init(&path->fp_node,
1239                   FIB_NODE_TYPE_PATH);
1240
1241     dpo_reset(&path->fp_dpo);
1242     path->fp_pl_index = pl_index;
1243     path->fp_nh_proto = rpath->frp_proto;
1244     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1245     path->fp_weight = rpath->frp_weight;
1246     if (0 == path->fp_weight)
1247     {
1248         /*
1249          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1250          * clients to always use 1, or we can accept it and fixup approrpiately.
1251          */
1252         path->fp_weight = 1;
1253     }
1254     path->fp_preference = rpath->frp_preference;
1255     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1256
1257     /*
1258      * deduce the path's tpye from the parementers and save what is needed.
1259      */
1260     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1261     {
1262         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1263         path->receive.fp_interface = rpath->frp_sw_if_index;
1264         path->receive.fp_addr = rpath->frp_addr;
1265     }
1266     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1267     {
1268         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1269         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1270     }
1271     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1272     {
1273         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1274         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1275     }
1276     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1277     {
1278         path->fp_type = FIB_PATH_TYPE_DEAG;
1279         path->deag.fp_tbl_id = rpath->frp_fib_index;
1280         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1281     }
1282     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1283     {
1284         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1285         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1286     }
1287     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1288     {
1289         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1290         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1291     }
1292     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1293     {
1294         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1295         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1296     }
1297     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1298     {
1299         path->fp_type = FIB_PATH_TYPE_DEAG;
1300         path->deag.fp_tbl_id = rpath->frp_fib_index;
1301     }
1302     else if (~0 != rpath->frp_sw_if_index)
1303     {
1304         if (ip46_address_is_zero(&rpath->frp_addr))
1305         {
1306             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1307             path->attached.fp_interface = rpath->frp_sw_if_index;
1308         }
1309         else
1310         {
1311             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1312             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1313             path->attached_next_hop.fp_nh = rpath->frp_addr;
1314         }
1315     }
1316     else
1317     {
1318         if (ip46_address_is_zero(&rpath->frp_addr))
1319         {
1320             if (~0 == rpath->frp_fib_index)
1321             {
1322                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1323             }
1324             else
1325             {
1326                 path->fp_type = FIB_PATH_TYPE_DEAG;
1327                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1328             }           
1329         }
1330         else
1331         {
1332             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1333             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1334             {
1335                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1336                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1337             }
1338             else
1339             {
1340                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1341             }
1342             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1343         }
1344     }
1345
1346     FIB_PATH_DBG(path, "create");
1347
1348     return (fib_path_get_index(path));
1349 }
1350
1351 /*
1352  * fib_path_create_special
1353  *
1354  * Create and initialise a new path object.
1355  * return the index of the path.
1356  */
1357 fib_node_index_t
1358 fib_path_create_special (fib_node_index_t pl_index,
1359                          dpo_proto_t nh_proto,
1360                          fib_path_cfg_flags_t flags,
1361                          const dpo_id_t *dpo)
1362 {
1363     fib_path_t *path;
1364
1365     pool_get(fib_path_pool, path);
1366     memset(path, 0, sizeof(*path));
1367
1368     fib_node_init(&path->fp_node,
1369                   FIB_NODE_TYPE_PATH);
1370     dpo_reset(&path->fp_dpo);
1371
1372     path->fp_pl_index = pl_index;
1373     path->fp_weight = 1;
1374     path->fp_preference = 0;
1375     path->fp_nh_proto = nh_proto;
1376     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1377     path->fp_cfg_flags = flags;
1378
1379     if (FIB_PATH_CFG_FLAG_DROP & flags)
1380     {
1381         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1382     }
1383     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1384     {
1385         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1386         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1387     }
1388     else
1389     {
1390         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1391         ASSERT(NULL != dpo);
1392         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1393     }
1394
1395     return (fib_path_get_index(path));
1396 }
1397
1398 /*
1399  * fib_path_copy
1400  *
1401  * Copy a path. return index of new path.
1402  */
1403 fib_node_index_t
1404 fib_path_copy (fib_node_index_t path_index,
1405                fib_node_index_t path_list_index)
1406 {
1407     fib_path_t *path, *orig_path;
1408
1409     pool_get(fib_path_pool, path);
1410
1411     orig_path = fib_path_get(path_index);
1412     ASSERT(NULL != orig_path);
1413
1414     memcpy(path, orig_path, sizeof(*path));
1415
1416     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1417
1418     /*
1419      * reset the dynamic section
1420      */
1421     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1422     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1423     path->fp_pl_index  = path_list_index;
1424     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1425     memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1426     dpo_reset(&path->fp_dpo);
1427
1428     return (fib_path_get_index(path));
1429 }
1430
1431 /*
1432  * fib_path_destroy
1433  *
1434  * destroy a path that is no longer required
1435  */
1436 void
1437 fib_path_destroy (fib_node_index_t path_index)
1438 {
1439     fib_path_t *path;
1440
1441     path = fib_path_get(path_index);
1442
1443     ASSERT(NULL != path);
1444     FIB_PATH_DBG(path, "destroy");
1445
1446     fib_path_unresolve(path);
1447
1448     fib_node_deinit(&path->fp_node);
1449     pool_put(fib_path_pool, path);
1450 }
1451
1452 /*
1453  * fib_path_destroy
1454  *
1455  * destroy a path that is no longer required
1456  */
1457 uword
1458 fib_path_hash (fib_node_index_t path_index)
1459 {
1460     fib_path_t *path;
1461
1462     path = fib_path_get(path_index);
1463
1464     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1465                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1466                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1467                         0));
1468 }
1469
1470 /*
1471  * fib_path_cmp_i
1472  *
1473  * Compare two paths for equivalence.
1474  */
1475 static int
1476 fib_path_cmp_i (const fib_path_t *path1,
1477                 const fib_path_t *path2)
1478 {
1479     int res;
1480
1481     res = 1;
1482
1483     /*
1484      * paths of different types and protocol are not equal.
1485      * different weights and/or preference only are the same path.
1486      */
1487     if (path1->fp_type != path2->fp_type)
1488     {
1489         res = (path1->fp_type - path2->fp_type);
1490     }
1491     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1492     {
1493         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1494     }
1495     else
1496     {
1497         /*
1498          * both paths are of the same type.
1499          * consider each type and its attributes in turn.
1500          */
1501         switch (path1->fp_type)
1502         {
1503         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1504             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1505                                    &path2->attached_next_hop.fp_nh);
1506             if (0 == res) {
1507                 res = (path1->attached_next_hop.fp_interface -
1508                        path2->attached_next_hop.fp_interface);
1509             }
1510             break;
1511         case FIB_PATH_TYPE_ATTACHED:
1512             res = (path1->attached.fp_interface -
1513                    path2->attached.fp_interface);
1514             break;
1515         case FIB_PATH_TYPE_RECURSIVE:
1516             res = ip46_address_cmp(&path1->recursive.fp_nh,
1517                                    &path2->recursive.fp_nh);
1518  
1519             if (0 == res)
1520             {
1521                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1522             }
1523             break;
1524         case FIB_PATH_TYPE_BIER_FMASK:
1525             res = (path1->bier_fmask.fp_bier_fmask -
1526                    path2->bier_fmask.fp_bier_fmask);
1527             break;
1528         case FIB_PATH_TYPE_BIER_IMP:
1529             res = (path1->bier_imp.fp_bier_imp -
1530                    path2->bier_imp.fp_bier_imp);
1531             break;
1532         case FIB_PATH_TYPE_BIER_TABLE:
1533             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1534                                     &path2->bier_table.fp_bier_tbl);
1535             break;
1536         case FIB_PATH_TYPE_DEAG:
1537             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1538             if (0 == res)
1539             {
1540                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1541             }
1542             break;
1543         case FIB_PATH_TYPE_INTF_RX:
1544             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1545             break;
1546         case FIB_PATH_TYPE_UDP_ENCAP:
1547             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1548             break;
1549         case FIB_PATH_TYPE_SPECIAL:
1550         case FIB_PATH_TYPE_RECEIVE:
1551         case FIB_PATH_TYPE_EXCLUSIVE:
1552             res = 0;
1553             break;
1554         }
1555     }
1556     return (res);
1557 }
1558
1559 /*
1560  * fib_path_cmp_for_sort
1561  *
1562  * Compare two paths for equivalence. Used during path sorting.
1563  * As usual 0 means equal.
1564  */
1565 int
1566 fib_path_cmp_for_sort (void * v1,
1567                        void * v2)
1568 {
1569     fib_node_index_t *pi1 = v1, *pi2 = v2;
1570     fib_path_t *path1, *path2;
1571
1572     path1 = fib_path_get(*pi1);
1573     path2 = fib_path_get(*pi2);
1574
1575     /*
1576      * when sorting paths we want the highest preference paths
1577      * first, so that the choices set built is in prefernce order
1578      */
1579     if (path1->fp_preference != path2->fp_preference)
1580     {
1581         return (path1->fp_preference - path2->fp_preference);
1582     }
1583
1584     return (fib_path_cmp_i(path1, path2));
1585 }
1586
1587 /*
1588  * fib_path_cmp
1589  *
1590  * Compare two paths for equivalence.
1591  */
1592 int
1593 fib_path_cmp (fib_node_index_t pi1,
1594               fib_node_index_t pi2)
1595 {
1596     fib_path_t *path1, *path2;
1597
1598     path1 = fib_path_get(pi1);
1599     path2 = fib_path_get(pi2);
1600
1601     return (fib_path_cmp_i(path1, path2));
1602 }
1603
1604 int
1605 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1606                            const fib_route_path_t *rpath)
1607 {
1608     fib_path_t *path;
1609     int res;
1610
1611     path = fib_path_get(path_index);
1612
1613     res = 1;
1614
1615     if (path->fp_weight != rpath->frp_weight)
1616     {
1617         res = (path->fp_weight - rpath->frp_weight);
1618     }
1619     else
1620     {
1621         /*
1622          * both paths are of the same type.
1623          * consider each type and its attributes in turn.
1624          */
1625         switch (path->fp_type)
1626         {
1627         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1628             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1629                                    &rpath->frp_addr);
1630             if (0 == res)
1631             {
1632                 res = (path->attached_next_hop.fp_interface -
1633                        rpath->frp_sw_if_index);
1634             }
1635             break;
1636         case FIB_PATH_TYPE_ATTACHED:
1637             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1638             break;
1639         case FIB_PATH_TYPE_RECURSIVE:
1640             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1641             {
1642                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1643
1644                 if (res == 0)
1645                 {
1646                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1647                 }
1648             }
1649             else
1650             {
1651                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1652                                        &rpath->frp_addr);
1653             }
1654
1655             if (0 == res)
1656             {
1657                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1658             }
1659             break;
1660         case FIB_PATH_TYPE_BIER_FMASK:
1661             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1662             break;
1663         case FIB_PATH_TYPE_BIER_IMP:
1664             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1665             break;
1666         case FIB_PATH_TYPE_BIER_TABLE:
1667             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1668                                     &rpath->frp_bier_tbl);
1669             break;
1670         case FIB_PATH_TYPE_INTF_RX:
1671             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1672             break;
1673         case FIB_PATH_TYPE_UDP_ENCAP:
1674             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1675             break;
1676         case FIB_PATH_TYPE_DEAG:
1677             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1678             if (0 == res)
1679             {
1680                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1681             }
1682             break;
1683         case FIB_PATH_TYPE_SPECIAL:
1684         case FIB_PATH_TYPE_RECEIVE:
1685         case FIB_PATH_TYPE_EXCLUSIVE:
1686             res = 0;
1687             break;
1688         }
1689     }
1690     return (res);
1691 }
1692
1693 /*
1694  * fib_path_recursive_loop_detect
1695  *
1696  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1697  * walk is initiated when an entry is linking to a new path list or from an old.
1698  * The entry vector passed contains all the FIB entrys that are children of this
1699  * path (it is all the entries encountered on the walk so far). If this vector
1700  * contains the entry this path resolve via, then a loop is about to form.
1701  * The loop must be allowed to form, since we need the dependencies in place
1702  * so that we can track when the loop breaks.
1703  * However, we MUST not produce a loop in the forwarding graph (else packets
1704  * would loop around the switch path until the loop breaks), so we mark recursive
1705  * paths as looped so that they do not contribute forwarding information.
1706  * By marking the path as looped, an etry such as;
1707  *    X/Y
1708  *     via a.a.a.a (looped)
1709  *     via b.b.b.b (not looped)
1710  * can still forward using the info provided by b.b.b.b only
1711  */
1712 int
1713 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1714                                 fib_node_index_t **entry_indicies)
1715 {
1716     fib_path_t *path;
1717
1718     path = fib_path_get(path_index);
1719
1720     /*
1721      * the forced drop path is never looped, cos it is never resolved.
1722      */
1723     if (fib_path_is_permanent_drop(path))
1724     {
1725         return (0);
1726     }
1727
1728     switch (path->fp_type)
1729     {
1730     case FIB_PATH_TYPE_RECURSIVE:
1731     {
1732         fib_node_index_t *entry_index, *entries;
1733         int looped = 0;
1734         entries = *entry_indicies;
1735
1736         vec_foreach(entry_index, entries) {
1737             if (*entry_index == path->fp_via_fib)
1738             {
1739                 /*
1740                  * the entry that is about to link to this path-list (or
1741                  * one of this path-list's children) is the same entry that
1742                  * this recursive path resolves through. this is a cycle.
1743                  * abort the walk.
1744                  */
1745                 looped = 1;
1746                 break;
1747             }
1748         }
1749
1750         if (looped)
1751         {
1752             FIB_PATH_DBG(path, "recursive loop formed");
1753             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1754
1755             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1756         }
1757         else
1758         {
1759             /*
1760              * no loop here yet. keep forward walking the graph.
1761              */     
1762             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1763             {
1764                 FIB_PATH_DBG(path, "recursive loop formed");
1765                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1766             }
1767             else
1768             {
1769                 FIB_PATH_DBG(path, "recursive loop cleared");
1770                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1771             }
1772         }
1773         break;
1774     }
1775     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1776     case FIB_PATH_TYPE_ATTACHED:
1777     case FIB_PATH_TYPE_SPECIAL:
1778     case FIB_PATH_TYPE_DEAG:
1779     case FIB_PATH_TYPE_RECEIVE:
1780     case FIB_PATH_TYPE_INTF_RX:
1781     case FIB_PATH_TYPE_UDP_ENCAP:
1782     case FIB_PATH_TYPE_EXCLUSIVE:
1783     case FIB_PATH_TYPE_BIER_FMASK:
1784     case FIB_PATH_TYPE_BIER_TABLE:
1785     case FIB_PATH_TYPE_BIER_IMP:
1786         /*
1787          * these path types cannot be part of a loop, since they are the leaves
1788          * of the graph.
1789          */
1790         break;
1791     }
1792
1793     return (fib_path_is_looped(path_index));
1794 }
1795
1796 int
1797 fib_path_resolve (fib_node_index_t path_index)
1798 {
1799     fib_path_t *path;
1800
1801     path = fib_path_get(path_index);
1802
1803     /*
1804      * hope for the best.
1805      */
1806     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1807
1808     /*
1809      * the forced drop path resolves via the drop adj
1810      */
1811     if (fib_path_is_permanent_drop(path))
1812     {
1813         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1814         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1815         return (fib_path_is_resolved(path_index));
1816     }
1817
1818     switch (path->fp_type)
1819     {
1820     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1821         fib_path_attached_next_hop_set(path);
1822         break;
1823     case FIB_PATH_TYPE_ATTACHED:
1824         if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
1825         {
1826             l2_bridge_dpo_add_or_lock(path->attached.fp_interface,
1827                                       &path->fp_dpo);
1828         }
1829         else
1830         {
1831             /*
1832              * path->attached.fp_interface
1833              */
1834             if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1835                                                path->attached.fp_interface))
1836             {
1837                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1838             }
1839             dpo_set(&path->fp_dpo,
1840                     DPO_ADJACENCY,
1841                     path->fp_nh_proto,
1842                     fib_path_attached_get_adj(path,
1843                                               dpo_proto_to_link(path->fp_nh_proto)));
1844
1845             /*
1846              * become a child of the adjacency so we receive updates
1847              * when the interface state changes
1848              */
1849             path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1850                                              FIB_NODE_TYPE_PATH,
1851                                              fib_path_get_index(path));
1852         }
1853         break;
1854     case FIB_PATH_TYPE_RECURSIVE:
1855     {
1856         /*
1857          * Create a RR source entry in the table for the address
1858          * that this path recurses through.
1859          * This resolve action is recursive, hence we may create
1860          * more paths in the process. more creates mean maybe realloc
1861          * of this path.
1862          */
1863         fib_node_index_t fei;
1864         fib_prefix_t pfx;
1865
1866         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1867
1868         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1869         {
1870             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1871                                        path->recursive.fp_nh.fp_eos,
1872                                        &pfx);
1873         }
1874         else
1875         {
1876             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1877         }
1878
1879         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1880                                           &pfx,
1881                                           FIB_SOURCE_RR,
1882                                           FIB_ENTRY_FLAG_NONE);
1883
1884         path = fib_path_get(path_index);
1885         path->fp_via_fib = fei;
1886
1887         /*
1888          * become a dependent child of the entry so the path is 
1889          * informed when the forwarding for the entry changes.
1890          */
1891         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1892                                                FIB_NODE_TYPE_PATH,
1893                                                fib_path_get_index(path));
1894
1895         /*
1896          * create and configure the IP DPO
1897          */
1898         fib_path_recursive_adj_update(
1899             path,
1900             fib_path_to_chain_type(path),
1901             &path->fp_dpo);
1902
1903         break;
1904     }
1905     case FIB_PATH_TYPE_BIER_FMASK:
1906     {
1907         /*
1908          * become a dependent child of the entry so the path is
1909          * informed when the forwarding for the entry changes.
1910          */
1911         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
1912                                                 FIB_NODE_TYPE_PATH,
1913                                                 fib_path_get_index(path));
1914
1915         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
1916         fib_path_bier_fmask_update(path, &path->fp_dpo);
1917
1918         break;
1919     }
1920     case FIB_PATH_TYPE_BIER_IMP:
1921         bier_imp_lock(path->bier_imp.fp_bier_imp);
1922         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
1923                                        DPO_PROTO_IP4,
1924                                        &path->fp_dpo);
1925         break;
1926     case FIB_PATH_TYPE_BIER_TABLE:
1927     {
1928         /*
1929          * Find/create the BIER table to link to
1930          */
1931         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
1932
1933         path->fp_via_bier_tbl =
1934             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
1935
1936         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
1937                                          &path->fp_dpo);
1938         break;
1939     }
1940     case FIB_PATH_TYPE_SPECIAL:
1941         /*
1942          * Resolve via the drop
1943          */
1944         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1945         break;
1946     case FIB_PATH_TYPE_DEAG:
1947     {
1948         if (DPO_PROTO_BIER == path->fp_nh_proto)
1949         {
1950             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
1951                                                   &path->fp_dpo);
1952         }
1953         else
1954         {
1955             /*
1956              * Resolve via a lookup DPO.
1957              * FIXME. control plane should add routes with a table ID
1958              */
1959             lookup_input_t input;
1960             lookup_cast_t cast;
1961
1962             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1963                     LOOKUP_MULTICAST :
1964                     LOOKUP_UNICAST);
1965             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
1966                      LOOKUP_INPUT_SRC_ADDR :
1967                      LOOKUP_INPUT_DST_ADDR);
1968
1969             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
1970                                                path->fp_nh_proto,
1971                                                cast,
1972                                                input,
1973                                                LOOKUP_TABLE_FROM_CONFIG,
1974                                                &path->fp_dpo);
1975         }
1976         break;
1977     }
1978     case FIB_PATH_TYPE_RECEIVE:
1979         /*
1980          * Resolve via a receive DPO.
1981          */
1982         receive_dpo_add_or_lock(path->fp_nh_proto,
1983                                 path->receive.fp_interface,
1984                                 &path->receive.fp_addr,
1985                                 &path->fp_dpo);
1986         break;
1987     case FIB_PATH_TYPE_UDP_ENCAP:
1988         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
1989         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1990                                         path->fp_nh_proto,
1991                                         &path->fp_dpo);
1992         break;
1993     case FIB_PATH_TYPE_INTF_RX: {
1994         /*
1995          * Resolve via a receive DPO.
1996          */
1997         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
1998                                      path->intf_rx.fp_interface,
1999                                      &path->fp_dpo);
2000         break;
2001     }
2002     case FIB_PATH_TYPE_EXCLUSIVE:
2003         /*
2004          * Resolve via the user provided DPO
2005          */
2006         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2007         break;
2008     }
2009
2010     return (fib_path_is_resolved(path_index));
2011 }
2012
2013 u32
2014 fib_path_get_resolving_interface (fib_node_index_t path_index)
2015 {
2016     fib_path_t *path;
2017
2018     path = fib_path_get(path_index);
2019
2020     switch (path->fp_type)
2021     {
2022     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2023         return (path->attached_next_hop.fp_interface);
2024     case FIB_PATH_TYPE_ATTACHED:
2025         return (path->attached.fp_interface);
2026     case FIB_PATH_TYPE_RECEIVE:
2027         return (path->receive.fp_interface);
2028     case FIB_PATH_TYPE_RECURSIVE:
2029         if (fib_path_is_resolved(path_index))
2030         {
2031             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2032         }
2033         break;
2034     case FIB_PATH_TYPE_INTF_RX:
2035     case FIB_PATH_TYPE_UDP_ENCAP:
2036     case FIB_PATH_TYPE_SPECIAL:
2037     case FIB_PATH_TYPE_DEAG:
2038     case FIB_PATH_TYPE_EXCLUSIVE:
2039     case FIB_PATH_TYPE_BIER_FMASK:
2040     case FIB_PATH_TYPE_BIER_TABLE:
2041     case FIB_PATH_TYPE_BIER_IMP:
2042         break;
2043     }
2044     return (~0);
2045 }
2046
2047 index_t
2048 fib_path_get_resolving_index (fib_node_index_t path_index)
2049 {
2050     fib_path_t *path;
2051
2052     path = fib_path_get(path_index);
2053
2054     switch (path->fp_type)
2055     {
2056     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2057     case FIB_PATH_TYPE_ATTACHED:
2058     case FIB_PATH_TYPE_RECEIVE:
2059     case FIB_PATH_TYPE_INTF_RX:
2060     case FIB_PATH_TYPE_SPECIAL:
2061     case FIB_PATH_TYPE_DEAG:
2062     case FIB_PATH_TYPE_EXCLUSIVE:
2063         break;
2064     case FIB_PATH_TYPE_UDP_ENCAP:
2065         return (path->udp_encap.fp_udp_encap_id);
2066     case FIB_PATH_TYPE_RECURSIVE:
2067         return (path->fp_via_fib);
2068     case FIB_PATH_TYPE_BIER_FMASK:
2069         return (path->bier_fmask.fp_bier_fmask);
2070    case FIB_PATH_TYPE_BIER_TABLE:
2071        return (path->fp_via_bier_tbl);
2072    case FIB_PATH_TYPE_BIER_IMP:
2073        return (path->bier_imp.fp_bier_imp);
2074     }
2075     return (~0);
2076 }
2077
2078 adj_index_t
2079 fib_path_get_adj (fib_node_index_t path_index)
2080 {
2081     fib_path_t *path;
2082
2083     path = fib_path_get(path_index);
2084
2085     ASSERT(dpo_is_adj(&path->fp_dpo));
2086     if (dpo_is_adj(&path->fp_dpo))
2087     {
2088         return (path->fp_dpo.dpoi_index);
2089     }
2090     return (ADJ_INDEX_INVALID);
2091 }
2092
2093 u16
2094 fib_path_get_weight (fib_node_index_t path_index)
2095 {
2096     fib_path_t *path;
2097
2098     path = fib_path_get(path_index);
2099
2100     ASSERT(path);
2101
2102     return (path->fp_weight);
2103 }
2104
2105 u16
2106 fib_path_get_preference (fib_node_index_t path_index)
2107 {
2108     fib_path_t *path;
2109
2110     path = fib_path_get(path_index);
2111
2112     ASSERT(path);
2113
2114     return (path->fp_preference);
2115 }
2116
2117 u32
2118 fib_path_get_rpf_id (fib_node_index_t path_index)
2119 {
2120     fib_path_t *path;
2121
2122     path = fib_path_get(path_index);
2123
2124     ASSERT(path);
2125
2126     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2127     {
2128         return (path->deag.fp_rpf_id);
2129     }
2130
2131     return (~0);
2132 }
2133
2134 /**
2135  * @brief Contribute the path's adjacency to the list passed.
2136  * By calling this function over all paths, recursively, a child
2137  * can construct its full set of forwarding adjacencies, and hence its
2138  * uRPF list.
2139  */
2140 void
2141 fib_path_contribute_urpf (fib_node_index_t path_index,
2142                           index_t urpf)
2143 {
2144     fib_path_t *path;
2145
2146     path = fib_path_get(path_index);
2147
2148     /*
2149      * resolved and unresolved paths contribute to the RPF list.
2150      */
2151     switch (path->fp_type)
2152     {
2153     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2154         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2155         break;
2156
2157     case FIB_PATH_TYPE_ATTACHED:
2158         fib_urpf_list_append(urpf, path->attached.fp_interface);
2159         break;
2160
2161     case FIB_PATH_TYPE_RECURSIVE:
2162         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2163             !fib_path_is_looped(path_index))
2164         {
2165             /*
2166              * there's unresolved due to constraints, and there's unresolved
2167              * due to ain't got no via. can't do nowt w'out via.
2168              */
2169             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2170         }
2171         break;
2172
2173     case FIB_PATH_TYPE_EXCLUSIVE:
2174     case FIB_PATH_TYPE_SPECIAL:
2175     {
2176         /*
2177          * these path types may link to an adj, if that's what
2178          * the clinet gave
2179          */
2180         u32 rpf_sw_if_index;
2181
2182         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2183
2184         if (~0 != rpf_sw_if_index)
2185         {
2186             fib_urpf_list_append(urpf, rpf_sw_if_index);
2187         }
2188         break;
2189     }
2190     case FIB_PATH_TYPE_DEAG:
2191     case FIB_PATH_TYPE_RECEIVE:
2192     case FIB_PATH_TYPE_INTF_RX:
2193     case FIB_PATH_TYPE_UDP_ENCAP:
2194     case FIB_PATH_TYPE_BIER_FMASK:
2195     case FIB_PATH_TYPE_BIER_TABLE:
2196     case FIB_PATH_TYPE_BIER_IMP:
2197         /*
2198          * these path types don't link to an adj
2199          */
2200         break;
2201     }
2202 }
2203
2204 void
2205 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2206                           dpo_proto_t payload_proto,
2207                           dpo_id_t *dpo)
2208 {
2209     fib_path_t *path;
2210
2211     path = fib_path_get(path_index);
2212
2213     ASSERT(path);
2214
2215     switch (path->fp_type)
2216     {
2217     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2218     {
2219         dpo_id_t tmp = DPO_INVALID;
2220
2221         dpo_copy(&tmp, dpo);
2222         dpo_set(dpo,
2223                 DPO_MPLS_DISPOSITION,
2224                 payload_proto,
2225                 mpls_disp_dpo_create(payload_proto, ~0, &tmp));
2226         dpo_reset(&tmp);
2227         break;
2228     }                
2229     case FIB_PATH_TYPE_DEAG:
2230     {
2231         dpo_id_t tmp = DPO_INVALID;
2232
2233         dpo_copy(&tmp, dpo);
2234         dpo_set(dpo,
2235                 DPO_MPLS_DISPOSITION,
2236                 payload_proto,
2237                 mpls_disp_dpo_create(payload_proto,
2238                                      path->deag.fp_rpf_id,
2239                                      &tmp));
2240         dpo_reset(&tmp);
2241         break;
2242     }
2243     case FIB_PATH_TYPE_RECEIVE:
2244     case FIB_PATH_TYPE_ATTACHED:
2245     case FIB_PATH_TYPE_RECURSIVE:
2246     case FIB_PATH_TYPE_INTF_RX:
2247     case FIB_PATH_TYPE_UDP_ENCAP:
2248     case FIB_PATH_TYPE_EXCLUSIVE:
2249     case FIB_PATH_TYPE_SPECIAL:
2250     case FIB_PATH_TYPE_BIER_FMASK:
2251     case FIB_PATH_TYPE_BIER_TABLE:
2252     case FIB_PATH_TYPE_BIER_IMP:
2253         break;
2254     }
2255 }
2256
2257 void
2258 fib_path_contribute_forwarding (fib_node_index_t path_index,
2259                                 fib_forward_chain_type_t fct,
2260                                 dpo_id_t *dpo)
2261 {
2262     fib_path_t *path;
2263
2264     path = fib_path_get(path_index);
2265
2266     ASSERT(path);
2267     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2268
2269     FIB_PATH_DBG(path, "contribute");
2270
2271     /*
2272      * The DPO stored in the path was created when the path was resolved.
2273      * This then represents the path's 'native' protocol; IP.
2274      * For all others will need to go find something else.
2275      */
2276     if (fib_path_to_chain_type(path) == fct)
2277     {
2278         dpo_copy(dpo, &path->fp_dpo);
2279     }
2280     else
2281     {
2282         switch (path->fp_type)
2283         {
2284         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2285             switch (fct)
2286             {
2287             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2288             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2289             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2290             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2291             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2292             case FIB_FORW_CHAIN_TYPE_NSH:
2293             {
2294                 adj_index_t ai;
2295
2296                 /*
2297                  * get a appropriate link type adj.
2298                  */
2299                 ai = fib_path_attached_next_hop_get_adj(
2300                          path,
2301                          fib_forw_chain_type_to_link_type(fct));
2302                 dpo_set(dpo, DPO_ADJACENCY,
2303                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2304                 adj_unlock(ai);
2305
2306                 break;
2307             }
2308             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2309             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2310             case FIB_FORW_CHAIN_TYPE_BIER:
2311                 break;
2312             }
2313             break;
2314         case FIB_PATH_TYPE_RECURSIVE:
2315             switch (fct)
2316             {
2317             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2318             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2319             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2320             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2321             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2322             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2323             case FIB_FORW_CHAIN_TYPE_BIER:
2324                 fib_path_recursive_adj_update(path, fct, dpo);
2325                 break;
2326             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2327             case FIB_FORW_CHAIN_TYPE_NSH:
2328                 ASSERT(0);
2329                 break;
2330             }
2331             break;
2332         case FIB_PATH_TYPE_BIER_TABLE:
2333             switch (fct)
2334             {
2335             case FIB_FORW_CHAIN_TYPE_BIER:
2336                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2337                 break;
2338             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2339             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2340             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2341             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2342             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2343             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2344             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2345             case FIB_FORW_CHAIN_TYPE_NSH:
2346                 ASSERT(0);
2347                 break;
2348             }
2349             break;
2350         case FIB_PATH_TYPE_BIER_FMASK:
2351             switch (fct)
2352             {
2353             case FIB_FORW_CHAIN_TYPE_BIER:
2354                 fib_path_bier_fmask_update(path, dpo);
2355                 break;
2356             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2357             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2358             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2359             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2360             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2361             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2362             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2363             case FIB_FORW_CHAIN_TYPE_NSH:
2364                 ASSERT(0);
2365                 break;
2366             }
2367             break;
2368         case FIB_PATH_TYPE_BIER_IMP:
2369             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2370                                            fib_forw_chain_type_to_dpo_proto(fct),
2371                                            dpo);
2372             break;
2373         case FIB_PATH_TYPE_DEAG:
2374             switch (fct)
2375             {
2376             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2377                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2378                                                   DPO_PROTO_MPLS,
2379                                                   LOOKUP_UNICAST,
2380                                                   LOOKUP_INPUT_DST_ADDR,
2381                                                   LOOKUP_TABLE_FROM_CONFIG,
2382                                                   dpo);
2383                 break;
2384             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2385             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2386             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2387                 dpo_copy(dpo, &path->fp_dpo);
2388                 break;
2389             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2390             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2391             case FIB_FORW_CHAIN_TYPE_BIER:
2392                 break;
2393             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2394             case FIB_FORW_CHAIN_TYPE_NSH:
2395                 ASSERT(0);
2396                 break;
2397             }
2398             break;
2399         case FIB_PATH_TYPE_EXCLUSIVE:
2400             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2401             break;
2402         case FIB_PATH_TYPE_ATTACHED:
2403             if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
2404             {
2405                 dpo_copy(dpo, &path->fp_dpo);
2406                 break;
2407             }
2408             switch (fct)
2409             {
2410             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2411             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2412             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2413             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2414             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2415             case FIB_FORW_CHAIN_TYPE_NSH:
2416             case FIB_FORW_CHAIN_TYPE_BIER:
2417                 {
2418                     adj_index_t ai;
2419
2420                     /*
2421                      * get a appropriate link type adj.
2422                      */
2423                     ai = fib_path_attached_get_adj(
2424                             path,
2425                             fib_forw_chain_type_to_link_type(fct));
2426                     dpo_set(dpo, DPO_ADJACENCY,
2427                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2428                     adj_unlock(ai);
2429                     break;
2430                 }
2431             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2432             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2433                 {
2434                     adj_index_t ai;
2435
2436                     /*
2437                      * Create the adj needed for sending IP multicast traffic
2438                      */
2439                     ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2440                                                fib_forw_chain_type_to_link_type(fct),
2441                                                path->attached.fp_interface);
2442                     dpo_set(dpo, DPO_ADJACENCY,
2443                             fib_forw_chain_type_to_dpo_proto(fct),
2444                             ai);
2445                     adj_unlock(ai);
2446                 }
2447                 break;
2448             }
2449             break;
2450         case FIB_PATH_TYPE_INTF_RX:
2451             /*
2452              * Create the adj needed for sending IP multicast traffic
2453              */
2454             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2455                                          path->attached.fp_interface,
2456                                          dpo);
2457             break;
2458         case FIB_PATH_TYPE_UDP_ENCAP:
2459             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2460                                             path->fp_nh_proto,
2461                                             dpo);
2462             break;
2463         case FIB_PATH_TYPE_RECEIVE:
2464         case FIB_PATH_TYPE_SPECIAL:
2465             dpo_copy(dpo, &path->fp_dpo);
2466             break;
2467         }
2468     }
2469 }
2470
2471 load_balance_path_t *
2472 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2473                                        fib_forward_chain_type_t fct,
2474                                        load_balance_path_t *hash_key)
2475 {
2476     load_balance_path_t *mnh;
2477     fib_path_t *path;
2478
2479     path = fib_path_get(path_index);
2480
2481     ASSERT(path);
2482
2483     if (fib_path_is_resolved(path_index))
2484     {
2485         vec_add2(hash_key, mnh, 1);
2486
2487         mnh->path_weight = path->fp_weight;
2488         mnh->path_index = path_index;
2489         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2490     }
2491
2492     return (hash_key);
2493 }
2494
2495 int
2496 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2497 {
2498     fib_path_t *path;
2499
2500     path = fib_path_get(path_index);
2501
2502     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2503             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2504              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2505 }
2506
2507 int
2508 fib_path_is_exclusive (fib_node_index_t path_index)
2509 {
2510     fib_path_t *path;
2511
2512     path = fib_path_get(path_index);
2513
2514     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2515 }
2516
2517 int
2518 fib_path_is_deag (fib_node_index_t path_index)
2519 {
2520     fib_path_t *path;
2521
2522     path = fib_path_get(path_index);
2523
2524     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2525 }
2526
2527 int
2528 fib_path_is_resolved (fib_node_index_t path_index)
2529 {
2530     fib_path_t *path;
2531
2532     path = fib_path_get(path_index);
2533
2534     return (dpo_id_is_valid(&path->fp_dpo) &&
2535             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2536             !fib_path_is_looped(path_index) &&
2537             !fib_path_is_permanent_drop(path));
2538 }
2539
2540 int
2541 fib_path_is_looped (fib_node_index_t path_index)
2542 {
2543     fib_path_t *path;
2544
2545     path = fib_path_get(path_index);
2546
2547     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2548 }
2549
2550 fib_path_list_walk_rc_t
2551 fib_path_encode (fib_node_index_t path_list_index,
2552                  fib_node_index_t path_index,
2553                  void *ctx)
2554 {
2555     fib_route_path_encode_t **api_rpaths = ctx;
2556     fib_route_path_encode_t *api_rpath;
2557     fib_path_t *path;
2558
2559     path = fib_path_get(path_index);
2560     if (!path)
2561       return (FIB_PATH_LIST_WALK_CONTINUE);
2562     vec_add2(*api_rpaths, api_rpath, 1);
2563     api_rpath->rpath.frp_weight = path->fp_weight;
2564     api_rpath->rpath.frp_preference = path->fp_preference;
2565     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2566     api_rpath->rpath.frp_sw_if_index = ~0;
2567     api_rpath->dpo = path->fp_dpo;
2568
2569     switch (path->fp_type)
2570       {
2571       case FIB_PATH_TYPE_RECEIVE:
2572         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2573         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2574         break;
2575       case FIB_PATH_TYPE_ATTACHED:
2576         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2577         break;
2578       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2579         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2580         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2581         break;
2582       case FIB_PATH_TYPE_BIER_FMASK:
2583         api_rpath->rpath.frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2584         break;
2585       case FIB_PATH_TYPE_SPECIAL:
2586         break;
2587       case FIB_PATH_TYPE_DEAG:
2588         api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2589         break;
2590       case FIB_PATH_TYPE_RECURSIVE:
2591         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2592         break;
2593       default:
2594         break;
2595       }
2596
2597     return (FIB_PATH_LIST_WALK_CONTINUE);
2598 }
2599
2600 dpo_proto_t
2601 fib_path_get_proto (fib_node_index_t path_index)
2602 {
2603     fib_path_t *path;
2604
2605     path = fib_path_get(path_index);
2606
2607     return (path->fp_nh_proto);
2608 }
2609
2610 void
2611 fib_path_module_init (void)
2612 {
2613     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2614 }
2615
2616 static clib_error_t *
2617 show_fib_path_command (vlib_main_t * vm,
2618                         unformat_input_t * input,
2619                         vlib_cli_command_t * cmd)
2620 {
2621     fib_node_index_t pi;
2622     fib_path_t *path;
2623
2624     if (unformat (input, "%d", &pi))
2625     {
2626         /*
2627          * show one in detail
2628          */
2629         if (!pool_is_free_index(fib_path_pool, pi))
2630         {
2631             path = fib_path_get(pi);
2632             u8 *s = format(NULL, "%U", format_fib_path, pi, 1);
2633             s = format(s, "children:");
2634             s = fib_node_children_format(path->fp_node.fn_children, s);
2635             vlib_cli_output (vm, "%s", s);
2636             vec_free(s);
2637         }
2638         else
2639         {
2640             vlib_cli_output (vm, "path %d invalid", pi);
2641         }
2642     }
2643     else
2644     {
2645         vlib_cli_output (vm, "FIB Paths");
2646         pool_foreach_index (pi, fib_path_pool,
2647         ({
2648             vlib_cli_output (vm, "%U", format_fib_path, pi, 0);
2649         }));
2650     }
2651
2652     return (NULL);
2653 }
2654
2655 VLIB_CLI_COMMAND (show_fib_path, static) = {
2656   .path = "show fib paths",
2657   .function = show_fib_path_command,
2658   .short_help = "show fib paths",
2659 };