BIER in non-MPLS netowrks
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/l2_bridge_dpo.h>
27 #include <vnet/dpo/drop_dpo.h>
28
29 #include <vnet/adj/adj.h>
30 #include <vnet/adj/adj_mcast.h>
31
32 #include <vnet/fib/fib_path.h>
33 #include <vnet/fib/fib_node.h>
34 #include <vnet/fib/fib_table.h>
35 #include <vnet/fib/fib_entry.h>
36 #include <vnet/fib/fib_path_list.h>
37 #include <vnet/fib/fib_internal.h>
38 #include <vnet/fib/fib_urpf_list.h>
39 #include <vnet/fib/mpls_fib.h>
40 #include <vnet/udp/udp_encap.h>
41 #include <vnet/bier/bier_fmask.h>
42 #include <vnet/bier/bier_table.h>
43 #include <vnet/bier/bier_imp.h>
44 #include <vnet/bier/bier_disp_table.h>
45
46 /**
47  * Enurmeration of path types
48  */
49 typedef enum fib_path_type_t_ {
50     /**
51      * Marker. Add new types after this one.
52      */
53     FIB_PATH_TYPE_FIRST = 0,
54     /**
55      * Attached-nexthop. An interface and a nexthop are known.
56      */
57     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
58     /**
59      * attached. Only the interface is known.
60      */
61     FIB_PATH_TYPE_ATTACHED,
62     /**
63      * recursive. Only the next-hop is known.
64      */
65     FIB_PATH_TYPE_RECURSIVE,
66     /**
67      * special. nothing is known. so we drop.
68      */
69     FIB_PATH_TYPE_SPECIAL,
70     /**
71      * exclusive. user provided adj.
72      */
73     FIB_PATH_TYPE_EXCLUSIVE,
74     /**
75      * deag. Link to a lookup adj in the next table
76      */
77     FIB_PATH_TYPE_DEAG,
78     /**
79      * interface receive.
80      */
81     FIB_PATH_TYPE_INTF_RX,
82     /**
83      * interface receive.
84      */
85     FIB_PATH_TYPE_UDP_ENCAP,
86     /**
87      * receive. it's for-us.
88      */
89     FIB_PATH_TYPE_RECEIVE,
90     /**
91      * bier-imp. it's via a BIER imposition.
92      */
93     FIB_PATH_TYPE_BIER_IMP,
94     /**
95      * bier-fmask. it's via a BIER ECMP-table.
96      */
97     FIB_PATH_TYPE_BIER_TABLE,
98     /**
99      * bier-fmask. it's via a BIER f-mask.
100      */
101     FIB_PATH_TYPE_BIER_FMASK,
102     /**
103      * Marker. Add new types before this one, then update it.
104      */
105     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
106 } __attribute__ ((packed)) fib_path_type_t;
107
108 /**
109  * The maximum number of path_types
110  */
111 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
112
113 #define FIB_PATH_TYPES {                                        \
114     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
115     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
116     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
117     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
118     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
119     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
120     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
121     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
122     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
123     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
124     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
125     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
126 }
127
128 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
129     for (_item = FIB_PATH_TYPE_FIRST;           \
130          _item <= FIB_PATH_TYPE_LAST;           \
131          _item++)
132
133 /**
134  * Enurmeration of path operational (i.e. derived) attributes
135  */
136 typedef enum fib_path_oper_attribute_t_ {
137     /**
138      * Marker. Add new types after this one.
139      */
140     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
141     /**
142      * The path forms part of a recursive loop.
143      */
144     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
145     /**
146      * The path is resolved
147      */
148     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
149     /**
150      * The path is attached, despite what the next-hop may say.
151      */
152     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
153     /**
154      * The path has become a permanent drop.
155      */
156     FIB_PATH_OPER_ATTRIBUTE_DROP,
157     /**
158      * Marker. Add new types before this one, then update it.
159      */
160     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
161 } __attribute__ ((packed)) fib_path_oper_attribute_t;
162
163 /**
164  * The maximum number of path operational attributes
165  */
166 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
167
168 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
169     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
170     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
171     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
172 }
173
174 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
175     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
176          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
177          _item++)
178
179 /**
180  * Path flags from the attributes
181  */
182 typedef enum fib_path_oper_flags_t_ {
183     FIB_PATH_OPER_FLAG_NONE = 0,
184     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
185     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
186     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
187     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
188 } __attribute__ ((packed)) fib_path_oper_flags_t;
189
190 /**
191  * A FIB path
192  */
193 typedef struct fib_path_t_ {
194     /**
195      * A path is a node in the FIB graph.
196      */
197     fib_node_t fp_node;
198
199     /**
200      * The index of the path-list to which this path belongs
201      */
202     u32 fp_pl_index;
203
204     /**
205      * This marks the start of the memory area used to hash
206      * the path
207      */
208     STRUCT_MARK(path_hash_start);
209
210     /**
211      * Configuration Flags
212      */
213     fib_path_cfg_flags_t fp_cfg_flags;
214
215     /**
216      * The type of the path. This is the selector for the union
217      */
218     fib_path_type_t fp_type;
219
220     /**
221      * The protocol of the next-hop, i.e. the address family of the
222      * next-hop's address. We can't derive this from the address itself
223      * since the address can be all zeros
224      */
225     dpo_proto_t fp_nh_proto;
226
227     /**
228      * UCMP [unnormalised] weigth
229      */
230     u8 fp_weight;
231
232     /**
233      * A path preference. 0 is the best.
234      * Only paths of the best preference, that are 'up', are considered
235      * for forwarding.
236      */
237     u8 fp_preference;
238
239     /**
240      * per-type union of the data required to resolve the path
241      */
242     union {
243         struct {
244             /**
245              * The next-hop
246              */
247             ip46_address_t fp_nh;
248             /**
249              * The interface
250              */
251             u32 fp_interface;
252         } attached_next_hop;
253         struct {
254             /**
255              * The interface
256              */
257             u32 fp_interface;
258         } attached;
259         struct {
260             union
261             {
262                 /**
263                  * The next-hop
264                  */
265                 ip46_address_t fp_ip;
266                 struct {
267                     /**
268                      * The local label to resolve through.
269                      */
270                     mpls_label_t fp_local_label;
271                     /**
272                      * The EOS bit of the resolving label
273                      */
274                     mpls_eos_bit_t fp_eos;
275                 };
276             } fp_nh;
277             union {
278                 /**
279                  * The FIB table index in which to find the next-hop.
280                  */
281                 fib_node_index_t fp_tbl_id;
282                 /**
283                  * The BIER FIB the fmask is in
284                  */
285                 index_t fp_bier_fib;
286             };
287         } recursive;
288         struct {
289             /**
290              * BIER FMask ID
291              */
292             index_t fp_bier_fmask;
293         } bier_fmask;
294         struct {
295             /**
296              * The BIER table's ID
297              */
298             bier_table_id_t fp_bier_tbl;
299         } bier_table;
300         struct {
301             /**
302              * The BIER imposition object
303              * this is part of the path's key, since the index_t
304              * of an imposition object is the object's key.
305              */
306             index_t fp_bier_imp;
307         } bier_imp;
308         struct {
309             /**
310              * The FIB index in which to perfom the next lookup
311              */
312             fib_node_index_t fp_tbl_id;
313             /**
314              * The RPF-ID to tag the packets with
315              */
316             fib_rpf_id_t fp_rpf_id;
317         } deag;
318         struct {
319         } special;
320         struct {
321             /**
322              * The user provided 'exclusive' DPO
323              */
324             dpo_id_t fp_ex_dpo;
325         } exclusive;
326         struct {
327             /**
328              * The interface on which the local address is configured
329              */
330             u32 fp_interface;
331             /**
332              * The next-hop
333              */
334             ip46_address_t fp_addr;
335         } receive;
336         struct {
337             /**
338              * The interface on which the packets will be input.
339              */
340             u32 fp_interface;
341         } intf_rx;
342         struct {
343             /**
344              * The UDP Encap object this path resolves through
345              */
346             u32 fp_udp_encap_id;
347         } udp_encap;
348     };
349     STRUCT_MARK(path_hash_end);
350
351     /**
352      * Memebers in this last section represent information that is
353      * dervied during resolution. It should not be copied to new paths
354      * nor compared.
355      */
356
357     /**
358      * Operational Flags
359      */
360     fib_path_oper_flags_t fp_oper_flags;
361
362     union {
363         /**
364          * the resolving via fib. not part of the union, since it it not part
365          * of the path's hash.
366          */
367         fib_node_index_t fp_via_fib;
368         /**
369          * the resolving bier-table
370          */
371         index_t fp_via_bier_tbl;
372         /**
373          * the resolving bier-fmask
374          */
375         index_t fp_via_bier_fmask;
376     };
377
378     /**
379      * The Data-path objects through which this path resolves for IP.
380      */
381     dpo_id_t fp_dpo;
382
383     /**
384      * the index of this path in the parent's child list.
385      */
386     u32 fp_sibling;
387 } fib_path_t;
388
389 /*
390  * Array of strings/names for the path types and attributes
391  */
392 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
393 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
394 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
395
396 /*
397  * The memory pool from which we allocate all the paths
398  */
399 static fib_path_t *fib_path_pool;
400
401 /*
402  * Debug macro
403  */
404 #ifdef FIB_DEBUG
405 #define FIB_PATH_DBG(_p, _fmt, _args...)                        \
406 {                                                               \
407     u8 *_tmp = NULL;                                            \
408     _tmp = fib_path_format(fib_path_get_index(_p), _tmp);       \
409     clib_warning("path:[%d:%U]:" _fmt,                          \
410                  fib_path_get_index(_p), format_fib_path, _p, 0,\
411                  ##_args);                                      \
412     vec_free(_tmp);                                             \
413 }
414 #else
415 #define FIB_PATH_DBG(_p, _fmt, _args...)
416 #endif
417
418 static fib_path_t *
419 fib_path_get (fib_node_index_t index)
420 {
421     return (pool_elt_at_index(fib_path_pool, index));
422 }
423
424 static fib_node_index_t 
425 fib_path_get_index (fib_path_t *path)
426 {
427     return (path - fib_path_pool);
428 }
429
430 static fib_node_t *
431 fib_path_get_node (fib_node_index_t index)
432 {
433     return ((fib_node_t*)fib_path_get(index));
434 }
435
436 static fib_path_t*
437 fib_path_from_fib_node (fib_node_t *node)
438 {
439     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
440     return ((fib_path_t*)node);
441 }
442
443 u8 *
444 format_fib_path (u8 * s, va_list * args)
445 {
446     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
447     u32 indent = va_arg (*args, u32);
448     vnet_main_t * vnm = vnet_get_main();
449     fib_path_oper_attribute_t oattr;
450     fib_path_cfg_attribute_t cattr;
451     fib_path_t *path;
452
453     path = fib_path_get(path_index);
454
455     s = format (s, "%Upath:[%d] ", format_white_space, indent,
456                 fib_path_get_index(path));
457     s = format (s, "pl-index:%d ", path->fp_pl_index);
458     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
459     s = format (s, "weight=%d ", path->fp_weight);
460     s = format (s, "pref=%d ", path->fp_preference);
461     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
462     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
463         s = format(s, " oper-flags:");
464         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
465             if ((1<<oattr) & path->fp_oper_flags) {
466                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
467             }
468         }
469     }
470     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
471         s = format(s, " cfg-flags:");
472         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
473             if ((1<<cattr) & path->fp_cfg_flags) {
474                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
475             }
476         }
477     }
478     s = format(s, "\n%U", format_white_space, indent+2);
479
480     switch (path->fp_type)
481     {
482     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
483         s = format (s, "%U", format_ip46_address,
484                     &path->attached_next_hop.fp_nh,
485                     IP46_TYPE_ANY);
486         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
487         {
488             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
489         }
490         else
491         {
492             s = format (s, " %U",
493                         format_vnet_sw_interface_name,
494                         vnm,
495                         vnet_get_sw_interface(
496                             vnm,
497                             path->attached_next_hop.fp_interface));
498             if (vnet_sw_interface_is_p2p(vnet_get_main(),
499                                          path->attached_next_hop.fp_interface))
500             {
501                 s = format (s, " (p2p)");
502             }
503         }
504         if (!dpo_id_is_valid(&path->fp_dpo))
505         {
506             s = format(s, "\n%Uunresolved", format_white_space, indent+2);
507         }
508         else
509         {
510             s = format(s, "\n%U%U",
511                        format_white_space, indent,
512                        format_dpo_id,
513                        &path->fp_dpo, 13);
514         }
515         break;
516     case FIB_PATH_TYPE_ATTACHED:
517         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
518         {
519             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
520         }
521         else
522         {
523             s = format (s, " %U",
524                         format_vnet_sw_interface_name,
525                         vnm,
526                         vnet_get_sw_interface(
527                             vnm,
528                             path->attached.fp_interface));
529         }
530         break;
531     case FIB_PATH_TYPE_RECURSIVE:
532         if (DPO_PROTO_MPLS == path->fp_nh_proto)
533         {
534             s = format (s, "via %U %U",
535                         format_mpls_unicast_label,
536                         path->recursive.fp_nh.fp_local_label,
537                         format_mpls_eos_bit,
538                         path->recursive.fp_nh.fp_eos);
539         }
540         else
541         {
542             s = format (s, "via %U",
543                         format_ip46_address,
544                         &path->recursive.fp_nh.fp_ip,
545                         IP46_TYPE_ANY);
546         }
547         s = format (s, " in fib:%d",
548                     path->recursive.fp_tbl_id,
549                     path->fp_via_fib); 
550         s = format (s, " via-fib:%d", path->fp_via_fib); 
551         s = format (s, " via-dpo:[%U:%d]",
552                     format_dpo_type, path->fp_dpo.dpoi_type, 
553                     path->fp_dpo.dpoi_index);
554
555         break;
556     case FIB_PATH_TYPE_UDP_ENCAP:
557         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
558         break;
559     case FIB_PATH_TYPE_BIER_TABLE:
560         s = format (s, "via bier-table:[%U}",
561                     format_bier_table_id,
562                     &path->bier_table.fp_bier_tbl);
563         s = format (s, " via-dpo:[%U:%d]",
564                     format_dpo_type, path->fp_dpo.dpoi_type,
565                     path->fp_dpo.dpoi_index);
566         break;
567     case FIB_PATH_TYPE_BIER_FMASK:
568         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
569         s = format (s, " via-dpo:[%U:%d]",
570                     format_dpo_type, path->fp_dpo.dpoi_type, 
571                     path->fp_dpo.dpoi_index);
572         break;
573     case FIB_PATH_TYPE_BIER_IMP:
574         s = format (s, "via %U", format_bier_imp,
575                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
576         break;
577     case FIB_PATH_TYPE_RECEIVE:
578     case FIB_PATH_TYPE_INTF_RX:
579     case FIB_PATH_TYPE_SPECIAL:
580     case FIB_PATH_TYPE_DEAG:
581     case FIB_PATH_TYPE_EXCLUSIVE:
582         if (dpo_id_is_valid(&path->fp_dpo))
583         {
584             s = format(s, "%U", format_dpo_id,
585                        &path->fp_dpo, indent+2);
586         }
587         break;
588     }
589     return (s);
590 }
591
592 u8 *
593 fib_path_format (fib_node_index_t pi, u8 *s)
594 {
595     fib_path_t *path;
596
597     path = fib_path_get(pi);
598     ASSERT(NULL != path);
599
600     return (format (s, "%U", format_fib_path, path));
601 }
602
603 /*
604  * fib_path_last_lock_gone
605  *
606  * We don't share paths, we share path lists, so the [un]lock functions
607  * are no-ops
608  */
609 static void
610 fib_path_last_lock_gone (fib_node_t *node)
611 {
612     ASSERT(0);
613 }
614
615 static const adj_index_t
616 fib_path_attached_next_hop_get_adj (fib_path_t *path,
617                                     vnet_link_t link)
618 {
619     if (vnet_sw_interface_is_p2p(vnet_get_main(),
620                                  path->attached_next_hop.fp_interface))
621     {
622         /*
623          * if the interface is p2p then the adj for the specific
624          * neighbour on that link will never exist. on p2p links
625          * the subnet address (the attached route) links to the
626          * auto-adj (see below), we want that adj here too.
627          */
628         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
629                                     link,
630                                     &zero_addr,
631                                     path->attached_next_hop.fp_interface));
632     }
633     else
634     {
635         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
636                                     link,
637                                     &path->attached_next_hop.fp_nh,
638                                     path->attached_next_hop.fp_interface));
639     }
640 }
641
642 static void
643 fib_path_attached_next_hop_set (fib_path_t *path)
644 {
645     /*
646      * resolve directly via the adjacnecy discribed by the
647      * interface and next-hop
648      */
649     dpo_set(&path->fp_dpo,
650             DPO_ADJACENCY,
651             path->fp_nh_proto,
652             fib_path_attached_next_hop_get_adj(
653                  path,
654                  dpo_proto_to_link(path->fp_nh_proto)));
655
656     /*
657      * become a child of the adjacency so we receive updates
658      * when its rewrite changes
659      */
660     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
661                                      FIB_NODE_TYPE_PATH,
662                                      fib_path_get_index(path));
663
664     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
665                                       path->attached_next_hop.fp_interface) ||
666         !adj_is_up(path->fp_dpo.dpoi_index))
667     {
668         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
669     }
670 }
671
672 static const adj_index_t
673 fib_path_attached_get_adj (fib_path_t *path,
674                            vnet_link_t link)
675 {
676     if (vnet_sw_interface_is_p2p(vnet_get_main(),
677                                  path->attached.fp_interface))
678     {
679         /*
680          * point-2-point interfaces do not require a glean, since
681          * there is nothing to ARP. Install a rewrite/nbr adj instead
682          */
683         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
684                                     link,
685                                     &zero_addr,
686                                     path->attached.fp_interface));
687     }
688     else
689     {
690         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
691                                       path->attached.fp_interface,
692                                       NULL));
693     }
694 }
695
696 /*
697  * create of update the paths recursive adj
698  */
699 static void
700 fib_path_recursive_adj_update (fib_path_t *path,
701                                fib_forward_chain_type_t fct,
702                                dpo_id_t *dpo)
703 {
704     dpo_id_t via_dpo = DPO_INVALID;
705
706     /*
707      * get the DPO to resolve through from the via-entry
708      */
709     fib_entry_contribute_forwarding(path->fp_via_fib,
710                                     fct,
711                                     &via_dpo);
712
713
714     /*
715      * hope for the best - clear if restrictions apply.
716      */
717     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
718
719     /*
720      * Validate any recursion constraints and over-ride the via
721      * adj if not met
722      */
723     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
724     {
725         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
726         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
727     }
728     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
729     {
730         /*
731          * the via FIB must be a host route.
732          * note the via FIB just added will always be a host route
733          * since it is an RR source added host route. So what we need to
734          * check is whether the route has other sources. If it does then
735          * some other source has added it as a host route. If it doesn't
736          * then it was added only here and inherits forwarding from a cover.
737          * the cover is not a host route.
738          * The RR source is the lowest priority source, so we check if it
739          * is the best. if it is there are no other sources.
740          */
741         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
742         {
743             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
744             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
745
746             /*
747              * PIC edge trigger. let the load-balance maps know
748              */
749             load_balance_map_path_state_change(fib_path_get_index(path));
750         }
751     }
752     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
753     {
754         /*
755          * RR source entries inherit the flags from the cover, so
756          * we can check the via directly
757          */
758         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
759         {
760             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
761             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
762
763             /*
764              * PIC edge trigger. let the load-balance maps know
765              */
766             load_balance_map_path_state_change(fib_path_get_index(path));
767         }
768     }
769     /*
770      * check for over-riding factors on the FIB entry itself
771      */
772     if (!fib_entry_is_resolved(path->fp_via_fib))
773     {
774         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
775         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
776
777         /*
778          * PIC edge trigger. let the load-balance maps know
779          */
780         load_balance_map_path_state_change(fib_path_get_index(path));
781     }
782
783     /*
784      * If this path is contributing a drop, then it's not resolved
785      */
786     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
787     {
788         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
789     }
790
791     /*
792      * update the path's contributed DPO
793      */
794     dpo_copy(dpo, &via_dpo);
795
796     FIB_PATH_DBG(path, "recursive update:");
797
798     dpo_reset(&via_dpo);
799 }
800
801 /*
802  * re-evaulate the forwarding state for a via fmask path
803  */
804 static void
805 fib_path_bier_fmask_update (fib_path_t *path,
806                             dpo_id_t *dpo)
807 {
808     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
809
810     /*
811      * if we are stakcing on the drop, then the path is not resolved
812      */
813     if (dpo_is_drop(dpo))
814     {
815         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
816     }
817     else
818     {
819         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
820     }
821 }
822
823 /*
824  * fib_path_is_permanent_drop
825  *
826  * Return !0 if the path is configured to permanently drop,
827  * despite other attributes.
828  */
829 static int
830 fib_path_is_permanent_drop (fib_path_t *path)
831 {
832     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
833             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
834 }
835
836 /*
837  * fib_path_unresolve
838  *
839  * Remove our dependency on the resolution target
840  */
841 static void
842 fib_path_unresolve (fib_path_t *path)
843 {
844     /*
845      * the forced drop path does not need unresolving
846      */
847     if (fib_path_is_permanent_drop(path))
848     {
849         return;
850     }
851
852     switch (path->fp_type)
853     {
854     case FIB_PATH_TYPE_RECURSIVE:
855         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
856         {
857             fib_prefix_t pfx;
858
859             fib_entry_get_prefix(path->fp_via_fib, &pfx);
860             fib_entry_child_remove(path->fp_via_fib,
861                                    path->fp_sibling);
862             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
863                                            &pfx,
864                                            FIB_SOURCE_RR);
865             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
866         }
867         break;
868     case FIB_PATH_TYPE_BIER_FMASK:
869         bier_fmask_child_remove(path->fp_via_bier_fmask,
870                                 path->fp_sibling);
871         break;
872     case FIB_PATH_TYPE_BIER_IMP:
873         bier_imp_unlock(path->fp_dpo.dpoi_index);
874         break;
875     case FIB_PATH_TYPE_BIER_TABLE:
876         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
877         break;
878     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
879         adj_child_remove(path->fp_dpo.dpoi_index,
880                          path->fp_sibling);
881         adj_unlock(path->fp_dpo.dpoi_index);
882         break;
883     case FIB_PATH_TYPE_ATTACHED:
884         if (DPO_PROTO_ETHERNET != path->fp_nh_proto)
885         {
886             adj_child_remove(path->fp_dpo.dpoi_index,
887                              path->fp_sibling);
888             adj_unlock(path->fp_dpo.dpoi_index);
889         }
890         break;
891     case FIB_PATH_TYPE_UDP_ENCAP:
892         udp_encap_unlock_w_index(path->fp_dpo.dpoi_index);
893         break;
894     case FIB_PATH_TYPE_EXCLUSIVE:
895         dpo_reset(&path->exclusive.fp_ex_dpo);
896         break;
897     case FIB_PATH_TYPE_SPECIAL:
898     case FIB_PATH_TYPE_RECEIVE:
899     case FIB_PATH_TYPE_INTF_RX:
900     case FIB_PATH_TYPE_DEAG:
901         /*
902          * these hold only the path's DPO, which is reset below.
903          */
904         break;
905     }
906
907     /*
908      * release the adj we were holding and pick up the
909      * drop just in case.
910      */
911     dpo_reset(&path->fp_dpo);
912     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
913
914     return;
915 }
916
917 static fib_forward_chain_type_t
918 fib_path_to_chain_type (const fib_path_t *path)
919 {
920     if (DPO_PROTO_MPLS == path->fp_nh_proto)
921     {
922         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
923             MPLS_EOS == path->recursive.fp_nh.fp_eos)
924         {
925             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
926         }
927         else
928         {
929             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
930         }
931     }
932     else
933     {
934         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
935     }
936 }
937
938 /*
939  * fib_path_back_walk_notify
940  *
941  * A back walk has reach this path.
942  */
943 static fib_node_back_walk_rc_t
944 fib_path_back_walk_notify (fib_node_t *node,
945                            fib_node_back_walk_ctx_t *ctx)
946 {
947     fib_path_t *path;
948
949     path = fib_path_from_fib_node(node);
950
951     switch (path->fp_type)
952     {
953     case FIB_PATH_TYPE_RECURSIVE:
954         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
955         {
956             /*
957              * modify the recursive adjacency to use the new forwarding
958              * of the via-fib.
959              * this update is visible to packets in flight in the DP.
960              */
961             fib_path_recursive_adj_update(
962                 path,
963                 fib_path_to_chain_type(path),
964                 &path->fp_dpo);
965         }
966         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
967             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
968         {
969             /*
970              * ADJ updates (complete<->incomplete) do not need to propagate to
971              * recursive entries.
972              * The only reason its needed as far back as here, is that the adj
973              * and the incomplete adj are a different DPO type, so the LBs need
974              * to re-stack.
975              * If this walk was quashed in the fib_entry, then any non-fib_path
976              * children (like tunnels that collapse out the LB when they stack)
977              * would not see the update.
978              */
979             return (FIB_NODE_BACK_WALK_CONTINUE);
980         }
981         break;
982     case FIB_PATH_TYPE_BIER_FMASK:
983         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
984         {
985             /*
986              * update to use the BIER fmask's new forwading
987              */
988             fib_path_bier_fmask_update(path, &path->fp_dpo);
989         }
990         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
991             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
992         {
993             /*
994              * ADJ updates (complete<->incomplete) do not need to propagate to
995              * recursive entries.
996              * The only reason its needed as far back as here, is that the adj
997              * and the incomplete adj are a different DPO type, so the LBs need
998              * to re-stack.
999              * If this walk was quashed in the fib_entry, then any non-fib_path
1000              * children (like tunnels that collapse out the LB when they stack)
1001              * would not see the update.
1002              */
1003             return (FIB_NODE_BACK_WALK_CONTINUE);
1004         }
1005         break;
1006     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1007         /*
1008 FIXME comment
1009          * ADJ_UPDATE backwalk pass silently through here and up to
1010          * the path-list when the multipath adj collapse occurs.
1011          * The reason we do this is that the assumtption is that VPP
1012          * runs in an environment where the Control-Plane is remote
1013          * and hence reacts slowly to link up down. In order to remove
1014          * this down link from the ECMP set quickly, we back-walk.
1015          * VPP also has dedicated CPUs, so we are not stealing resources
1016          * from the CP to do so.
1017          */
1018         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1019         {
1020             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1021             {
1022                 /*
1023                  * alreday resolved. no need to walk back again
1024                  */
1025                 return (FIB_NODE_BACK_WALK_CONTINUE);
1026             }
1027             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1028         }
1029         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1030         {
1031             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1032             {
1033                 /*
1034                  * alreday unresolved. no need to walk back again
1035                  */
1036                 return (FIB_NODE_BACK_WALK_CONTINUE);
1037             }
1038             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1039         }
1040         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1041         {
1042             /*
1043              * The interface this path resolves through has been deleted.
1044              * This will leave the path in a permanent drop state. The route
1045              * needs to be removed and readded (and hence the path-list deleted)
1046              * before it can forward again.
1047              */
1048             fib_path_unresolve(path);
1049             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1050         }
1051         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1052         {
1053             /*
1054              * restack the DPO to pick up the correct DPO sub-type
1055              */
1056             uword if_is_up;
1057             adj_index_t ai;
1058
1059             if_is_up = vnet_sw_interface_is_admin_up(
1060                            vnet_get_main(),
1061                            path->attached_next_hop.fp_interface);
1062
1063             ai = fib_path_attached_next_hop_get_adj(
1064                      path,
1065                      dpo_proto_to_link(path->fp_nh_proto));
1066
1067             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1068             if (if_is_up && adj_is_up(ai))
1069             {
1070                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1071             }
1072
1073             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1074             adj_unlock(ai);
1075
1076             if (!if_is_up)
1077             {
1078                 /*
1079                  * If the interface is not up there is no reason to walk
1080                  * back to children. if we did they would only evalute
1081                  * that this path is unresolved and hence it would
1082                  * not contribute the adjacency - so it would be wasted
1083                  * CPU time.
1084                  */
1085                 return (FIB_NODE_BACK_WALK_CONTINUE);
1086             }
1087         }
1088         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1089         {
1090             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1091             {
1092                 /*
1093                  * alreday unresolved. no need to walk back again
1094                  */
1095                 return (FIB_NODE_BACK_WALK_CONTINUE);
1096             }
1097             /*
1098              * the adj has gone down. the path is no longer resolved.
1099              */
1100             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1101         }
1102         break;
1103     case FIB_PATH_TYPE_ATTACHED:
1104         /*
1105          * FIXME; this could schedule a lower priority walk, since attached
1106          * routes are not usually in ECMP configurations so the backwalk to
1107          * the FIB entry does not need to be high priority
1108          */
1109         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1110         {
1111             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1112         }
1113         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1114         {
1115             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1116         }
1117         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1118         {
1119             fib_path_unresolve(path);
1120             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1121         }
1122         break;
1123     case FIB_PATH_TYPE_UDP_ENCAP:
1124     {
1125         dpo_id_t via_dpo = DPO_INVALID;
1126
1127         /*
1128          * hope for the best - clear if restrictions apply.
1129          */
1130         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1131
1132         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1133                                         path->fp_nh_proto,
1134                                         &via_dpo);
1135         /*
1136          * If this path is contributing a drop, then it's not resolved
1137          */
1138         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1139         {
1140             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1141         }
1142
1143         /*
1144          * update the path's contributed DPO
1145          */
1146         dpo_copy(&path->fp_dpo, &via_dpo);
1147         dpo_reset(&via_dpo);
1148         break;
1149     }
1150     case FIB_PATH_TYPE_INTF_RX:
1151         ASSERT(0);
1152     case FIB_PATH_TYPE_DEAG:
1153         /*
1154          * FIXME When VRF delete is allowed this will need a poke.
1155          */
1156     case FIB_PATH_TYPE_SPECIAL:
1157     case FIB_PATH_TYPE_RECEIVE:
1158     case FIB_PATH_TYPE_EXCLUSIVE:
1159     case FIB_PATH_TYPE_BIER_TABLE:
1160     case FIB_PATH_TYPE_BIER_IMP:
1161         /*
1162          * these path types have no parents. so to be
1163          * walked from one is unexpected.
1164          */
1165         ASSERT(0);
1166         break;
1167     }
1168
1169     /*
1170      * propagate the backwalk further to the path-list
1171      */
1172     fib_path_list_back_walk(path->fp_pl_index, ctx);
1173
1174     return (FIB_NODE_BACK_WALK_CONTINUE);
1175 }
1176
1177 static void
1178 fib_path_memory_show (void)
1179 {
1180     fib_show_memory_usage("Path",
1181                           pool_elts(fib_path_pool),
1182                           pool_len(fib_path_pool),
1183                           sizeof(fib_path_t));
1184 }
1185
1186 /*
1187  * The FIB path's graph node virtual function table
1188  */
1189 static const fib_node_vft_t fib_path_vft = {
1190     .fnv_get = fib_path_get_node,
1191     .fnv_last_lock = fib_path_last_lock_gone,
1192     .fnv_back_walk = fib_path_back_walk_notify,
1193     .fnv_mem_show = fib_path_memory_show,
1194 };
1195
1196 static fib_path_cfg_flags_t
1197 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1198 {
1199     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1200
1201     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1202         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1203     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1204         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1205     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1206         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1207     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1208         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1209     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1210         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1211     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1212         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1213     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1214         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1215     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1216         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1217     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1218         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1219
1220     return (cfg_flags);
1221 }
1222
1223 /*
1224  * fib_path_create
1225  *
1226  * Create and initialise a new path object.
1227  * return the index of the path.
1228  */
1229 fib_node_index_t
1230 fib_path_create (fib_node_index_t pl_index,
1231                  const fib_route_path_t *rpath)
1232 {
1233     fib_path_t *path;
1234
1235     pool_get(fib_path_pool, path);
1236     memset(path, 0, sizeof(*path));
1237
1238     fib_node_init(&path->fp_node,
1239                   FIB_NODE_TYPE_PATH);
1240
1241     dpo_reset(&path->fp_dpo);
1242     path->fp_pl_index = pl_index;
1243     path->fp_nh_proto = rpath->frp_proto;
1244     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1245     path->fp_weight = rpath->frp_weight;
1246     if (0 == path->fp_weight)
1247     {
1248         /*
1249          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1250          * clients to always use 1, or we can accept it and fixup approrpiately.
1251          */
1252         path->fp_weight = 1;
1253     }
1254     path->fp_preference = rpath->frp_preference;
1255     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1256
1257     /*
1258      * deduce the path's tpye from the parementers and save what is needed.
1259      */
1260     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1261     {
1262         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1263         path->receive.fp_interface = rpath->frp_sw_if_index;
1264         path->receive.fp_addr = rpath->frp_addr;
1265     }
1266     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1267     {
1268         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1269         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1270     }
1271     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1272     {
1273         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1274         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1275     }
1276     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1277     {
1278         path->fp_type = FIB_PATH_TYPE_DEAG;
1279         path->deag.fp_tbl_id = rpath->frp_fib_index;
1280         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1281     }
1282     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1283     {
1284         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1285         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1286     }
1287     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1288     {
1289         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1290         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1291     }
1292     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1293     {
1294         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1295         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1296     }
1297     else if (~0 != rpath->frp_sw_if_index)
1298     {
1299         if (ip46_address_is_zero(&rpath->frp_addr))
1300         {
1301             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1302             path->attached.fp_interface = rpath->frp_sw_if_index;
1303         }
1304         else
1305         {
1306             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1307             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1308             path->attached_next_hop.fp_nh = rpath->frp_addr;
1309         }
1310     }
1311     else
1312     {
1313         if (ip46_address_is_zero(&rpath->frp_addr))
1314         {
1315             if (~0 == rpath->frp_fib_index)
1316             {
1317                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1318             }
1319             else
1320             {
1321                 path->fp_type = FIB_PATH_TYPE_DEAG;
1322                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1323             }           
1324         }
1325         else
1326         {
1327             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1328             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1329             {
1330                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1331                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1332             }
1333             else
1334             {
1335                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1336             }
1337             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1338         }
1339     }
1340
1341     FIB_PATH_DBG(path, "create");
1342
1343     return (fib_path_get_index(path));
1344 }
1345
1346 /*
1347  * fib_path_create_special
1348  *
1349  * Create and initialise a new path object.
1350  * return the index of the path.
1351  */
1352 fib_node_index_t
1353 fib_path_create_special (fib_node_index_t pl_index,
1354                          dpo_proto_t nh_proto,
1355                          fib_path_cfg_flags_t flags,
1356                          const dpo_id_t *dpo)
1357 {
1358     fib_path_t *path;
1359
1360     pool_get(fib_path_pool, path);
1361     memset(path, 0, sizeof(*path));
1362
1363     fib_node_init(&path->fp_node,
1364                   FIB_NODE_TYPE_PATH);
1365     dpo_reset(&path->fp_dpo);
1366
1367     path->fp_pl_index = pl_index;
1368     path->fp_weight = 1;
1369     path->fp_preference = 0;
1370     path->fp_nh_proto = nh_proto;
1371     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1372     path->fp_cfg_flags = flags;
1373
1374     if (FIB_PATH_CFG_FLAG_DROP & flags)
1375     {
1376         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1377     }
1378     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1379     {
1380         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1381         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1382     }
1383     else
1384     {
1385         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1386         ASSERT(NULL != dpo);
1387         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1388     }
1389
1390     return (fib_path_get_index(path));
1391 }
1392
1393 /*
1394  * fib_path_copy
1395  *
1396  * Copy a path. return index of new path.
1397  */
1398 fib_node_index_t
1399 fib_path_copy (fib_node_index_t path_index,
1400                fib_node_index_t path_list_index)
1401 {
1402     fib_path_t *path, *orig_path;
1403
1404     pool_get(fib_path_pool, path);
1405
1406     orig_path = fib_path_get(path_index);
1407     ASSERT(NULL != orig_path);
1408
1409     memcpy(path, orig_path, sizeof(*path));
1410
1411     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1412
1413     /*
1414      * reset the dynamic section
1415      */
1416     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1417     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1418     path->fp_pl_index  = path_list_index;
1419     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1420     memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1421     dpo_reset(&path->fp_dpo);
1422
1423     return (fib_path_get_index(path));
1424 }
1425
1426 /*
1427  * fib_path_destroy
1428  *
1429  * destroy a path that is no longer required
1430  */
1431 void
1432 fib_path_destroy (fib_node_index_t path_index)
1433 {
1434     fib_path_t *path;
1435
1436     path = fib_path_get(path_index);
1437
1438     ASSERT(NULL != path);
1439     FIB_PATH_DBG(path, "destroy");
1440
1441     fib_path_unresolve(path);
1442
1443     fib_node_deinit(&path->fp_node);
1444     pool_put(fib_path_pool, path);
1445 }
1446
1447 /*
1448  * fib_path_destroy
1449  *
1450  * destroy a path that is no longer required
1451  */
1452 uword
1453 fib_path_hash (fib_node_index_t path_index)
1454 {
1455     fib_path_t *path;
1456
1457     path = fib_path_get(path_index);
1458
1459     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1460                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1461                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1462                         0));
1463 }
1464
1465 /*
1466  * fib_path_cmp_i
1467  *
1468  * Compare two paths for equivalence.
1469  */
1470 static int
1471 fib_path_cmp_i (const fib_path_t *path1,
1472                 const fib_path_t *path2)
1473 {
1474     int res;
1475
1476     res = 1;
1477
1478     /*
1479      * paths of different types and protocol are not equal.
1480      * different weights and/or preference only are the same path.
1481      */
1482     if (path1->fp_type != path2->fp_type)
1483     {
1484         res = (path1->fp_type - path2->fp_type);
1485     }
1486     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1487     {
1488         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1489     }
1490     else
1491     {
1492         /*
1493          * both paths are of the same type.
1494          * consider each type and its attributes in turn.
1495          */
1496         switch (path1->fp_type)
1497         {
1498         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1499             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1500                                    &path2->attached_next_hop.fp_nh);
1501             if (0 == res) {
1502                 res = (path1->attached_next_hop.fp_interface -
1503                        path2->attached_next_hop.fp_interface);
1504             }
1505             break;
1506         case FIB_PATH_TYPE_ATTACHED:
1507             res = (path1->attached.fp_interface -
1508                    path2->attached.fp_interface);
1509             break;
1510         case FIB_PATH_TYPE_RECURSIVE:
1511             res = ip46_address_cmp(&path1->recursive.fp_nh,
1512                                    &path2->recursive.fp_nh);
1513  
1514             if (0 == res)
1515             {
1516                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1517             }
1518             break;
1519         case FIB_PATH_TYPE_BIER_FMASK:
1520             res = (path1->bier_fmask.fp_bier_fmask -
1521                    path2->bier_fmask.fp_bier_fmask);
1522             break;
1523         case FIB_PATH_TYPE_BIER_IMP:
1524             res = (path1->bier_imp.fp_bier_imp -
1525                    path2->bier_imp.fp_bier_imp);
1526             break;
1527         case FIB_PATH_TYPE_BIER_TABLE:
1528             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1529                                     &path2->bier_table.fp_bier_tbl);
1530             break;
1531         case FIB_PATH_TYPE_DEAG:
1532             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1533             if (0 == res)
1534             {
1535                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1536             }
1537             break;
1538         case FIB_PATH_TYPE_INTF_RX:
1539             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1540             break;
1541         case FIB_PATH_TYPE_UDP_ENCAP:
1542             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1543             break;
1544         case FIB_PATH_TYPE_SPECIAL:
1545         case FIB_PATH_TYPE_RECEIVE:
1546         case FIB_PATH_TYPE_EXCLUSIVE:
1547             res = 0;
1548             break;
1549         }
1550     }
1551     return (res);
1552 }
1553
1554 /*
1555  * fib_path_cmp_for_sort
1556  *
1557  * Compare two paths for equivalence. Used during path sorting.
1558  * As usual 0 means equal.
1559  */
1560 int
1561 fib_path_cmp_for_sort (void * v1,
1562                        void * v2)
1563 {
1564     fib_node_index_t *pi1 = v1, *pi2 = v2;
1565     fib_path_t *path1, *path2;
1566
1567     path1 = fib_path_get(*pi1);
1568     path2 = fib_path_get(*pi2);
1569
1570     /*
1571      * when sorting paths we want the highest preference paths
1572      * first, so that the choices set built is in prefernce order
1573      */
1574     if (path1->fp_preference != path2->fp_preference)
1575     {
1576         return (path1->fp_preference - path2->fp_preference);
1577     }
1578
1579     return (fib_path_cmp_i(path1, path2));
1580 }
1581
1582 /*
1583  * fib_path_cmp
1584  *
1585  * Compare two paths for equivalence.
1586  */
1587 int
1588 fib_path_cmp (fib_node_index_t pi1,
1589               fib_node_index_t pi2)
1590 {
1591     fib_path_t *path1, *path2;
1592
1593     path1 = fib_path_get(pi1);
1594     path2 = fib_path_get(pi2);
1595
1596     return (fib_path_cmp_i(path1, path2));
1597 }
1598
1599 int
1600 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1601                            const fib_route_path_t *rpath)
1602 {
1603     fib_path_t *path;
1604     int res;
1605
1606     path = fib_path_get(path_index);
1607
1608     res = 1;
1609
1610     if (path->fp_weight != rpath->frp_weight)
1611     {
1612         res = (path->fp_weight - rpath->frp_weight);
1613     }
1614     else
1615     {
1616         /*
1617          * both paths are of the same type.
1618          * consider each type and its attributes in turn.
1619          */
1620         switch (path->fp_type)
1621         {
1622         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1623             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1624                                    &rpath->frp_addr);
1625             if (0 == res)
1626             {
1627                 res = (path->attached_next_hop.fp_interface -
1628                        rpath->frp_sw_if_index);
1629             }
1630             break;
1631         case FIB_PATH_TYPE_ATTACHED:
1632             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1633             break;
1634         case FIB_PATH_TYPE_RECURSIVE:
1635             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1636             {
1637                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1638
1639                 if (res == 0)
1640                 {
1641                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1642                 }
1643             }
1644             else
1645             {
1646                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1647                                        &rpath->frp_addr);
1648             }
1649
1650             if (0 == res)
1651             {
1652                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1653             }
1654             break;
1655         case FIB_PATH_TYPE_BIER_FMASK:
1656             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1657             break;
1658         case FIB_PATH_TYPE_BIER_IMP:
1659             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1660             break;
1661         case FIB_PATH_TYPE_BIER_TABLE:
1662             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1663                                     &rpath->frp_bier_tbl);
1664             break;
1665         case FIB_PATH_TYPE_INTF_RX:
1666             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1667             break;
1668         case FIB_PATH_TYPE_UDP_ENCAP:
1669             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1670             break;
1671         case FIB_PATH_TYPE_DEAG:
1672             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1673             if (0 == res)
1674             {
1675                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1676             }
1677             break;
1678         case FIB_PATH_TYPE_SPECIAL:
1679         case FIB_PATH_TYPE_RECEIVE:
1680         case FIB_PATH_TYPE_EXCLUSIVE:
1681             res = 0;
1682             break;
1683         }
1684     }
1685     return (res);
1686 }
1687
1688 /*
1689  * fib_path_recursive_loop_detect
1690  *
1691  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1692  * walk is initiated when an entry is linking to a new path list or from an old.
1693  * The entry vector passed contains all the FIB entrys that are children of this
1694  * path (it is all the entries encountered on the walk so far). If this vector
1695  * contains the entry this path resolve via, then a loop is about to form.
1696  * The loop must be allowed to form, since we need the dependencies in place
1697  * so that we can track when the loop breaks.
1698  * However, we MUST not produce a loop in the forwarding graph (else packets
1699  * would loop around the switch path until the loop breaks), so we mark recursive
1700  * paths as looped so that they do not contribute forwarding information.
1701  * By marking the path as looped, an etry such as;
1702  *    X/Y
1703  *     via a.a.a.a (looped)
1704  *     via b.b.b.b (not looped)
1705  * can still forward using the info provided by b.b.b.b only
1706  */
1707 int
1708 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1709                                 fib_node_index_t **entry_indicies)
1710 {
1711     fib_path_t *path;
1712
1713     path = fib_path_get(path_index);
1714
1715     /*
1716      * the forced drop path is never looped, cos it is never resolved.
1717      */
1718     if (fib_path_is_permanent_drop(path))
1719     {
1720         return (0);
1721     }
1722
1723     switch (path->fp_type)
1724     {
1725     case FIB_PATH_TYPE_RECURSIVE:
1726     {
1727         fib_node_index_t *entry_index, *entries;
1728         int looped = 0;
1729         entries = *entry_indicies;
1730
1731         vec_foreach(entry_index, entries) {
1732             if (*entry_index == path->fp_via_fib)
1733             {
1734                 /*
1735                  * the entry that is about to link to this path-list (or
1736                  * one of this path-list's children) is the same entry that
1737                  * this recursive path resolves through. this is a cycle.
1738                  * abort the walk.
1739                  */
1740                 looped = 1;
1741                 break;
1742             }
1743         }
1744
1745         if (looped)
1746         {
1747             FIB_PATH_DBG(path, "recursive loop formed");
1748             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1749
1750             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1751         }
1752         else
1753         {
1754             /*
1755              * no loop here yet. keep forward walking the graph.
1756              */     
1757             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1758             {
1759                 FIB_PATH_DBG(path, "recursive loop formed");
1760                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1761             }
1762             else
1763             {
1764                 FIB_PATH_DBG(path, "recursive loop cleared");
1765                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1766             }
1767         }
1768         break;
1769     }
1770     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1771     case FIB_PATH_TYPE_ATTACHED:
1772     case FIB_PATH_TYPE_SPECIAL:
1773     case FIB_PATH_TYPE_DEAG:
1774     case FIB_PATH_TYPE_RECEIVE:
1775     case FIB_PATH_TYPE_INTF_RX:
1776     case FIB_PATH_TYPE_UDP_ENCAP:
1777     case FIB_PATH_TYPE_EXCLUSIVE:
1778     case FIB_PATH_TYPE_BIER_FMASK:
1779     case FIB_PATH_TYPE_BIER_TABLE:
1780     case FIB_PATH_TYPE_BIER_IMP:
1781         /*
1782          * these path types cannot be part of a loop, since they are the leaves
1783          * of the graph.
1784          */
1785         break;
1786     }
1787
1788     return (fib_path_is_looped(path_index));
1789 }
1790
1791 int
1792 fib_path_resolve (fib_node_index_t path_index)
1793 {
1794     fib_path_t *path;
1795
1796     path = fib_path_get(path_index);
1797
1798     /*
1799      * hope for the best.
1800      */
1801     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1802
1803     /*
1804      * the forced drop path resolves via the drop adj
1805      */
1806     if (fib_path_is_permanent_drop(path))
1807     {
1808         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1809         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1810         return (fib_path_is_resolved(path_index));
1811     }
1812
1813     switch (path->fp_type)
1814     {
1815     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1816         fib_path_attached_next_hop_set(path);
1817         break;
1818     case FIB_PATH_TYPE_ATTACHED:
1819         if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
1820         {
1821             l2_bridge_dpo_add_or_lock(path->attached.fp_interface,
1822                                       &path->fp_dpo);
1823         }
1824         else
1825         {
1826             /*
1827              * path->attached.fp_interface
1828              */
1829             if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1830                                                path->attached.fp_interface))
1831             {
1832                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1833             }
1834             dpo_set(&path->fp_dpo,
1835                     DPO_ADJACENCY,
1836                     path->fp_nh_proto,
1837                     fib_path_attached_get_adj(path,
1838                                               dpo_proto_to_link(path->fp_nh_proto)));
1839
1840             /*
1841              * become a child of the adjacency so we receive updates
1842              * when the interface state changes
1843              */
1844             path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1845                                              FIB_NODE_TYPE_PATH,
1846                                              fib_path_get_index(path));
1847         }
1848         break;
1849     case FIB_PATH_TYPE_RECURSIVE:
1850     {
1851         /*
1852          * Create a RR source entry in the table for the address
1853          * that this path recurses through.
1854          * This resolve action is recursive, hence we may create
1855          * more paths in the process. more creates mean maybe realloc
1856          * of this path.
1857          */
1858         fib_node_index_t fei;
1859         fib_prefix_t pfx;
1860
1861         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1862
1863         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1864         {
1865             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1866                                        path->recursive.fp_nh.fp_eos,
1867                                        &pfx);
1868         }
1869         else
1870         {
1871             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1872         }
1873
1874         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1875                                           &pfx,
1876                                           FIB_SOURCE_RR,
1877                                           FIB_ENTRY_FLAG_NONE);
1878
1879         path = fib_path_get(path_index);
1880         path->fp_via_fib = fei;
1881
1882         /*
1883          * become a dependent child of the entry so the path is 
1884          * informed when the forwarding for the entry changes.
1885          */
1886         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1887                                                FIB_NODE_TYPE_PATH,
1888                                                fib_path_get_index(path));
1889
1890         /*
1891          * create and configure the IP DPO
1892          */
1893         fib_path_recursive_adj_update(
1894             path,
1895             fib_path_to_chain_type(path),
1896             &path->fp_dpo);
1897
1898         break;
1899     }
1900     case FIB_PATH_TYPE_BIER_FMASK:
1901     {
1902         /*
1903          * become a dependent child of the entry so the path is
1904          * informed when the forwarding for the entry changes.
1905          */
1906         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
1907                                                 FIB_NODE_TYPE_PATH,
1908                                                 fib_path_get_index(path));
1909
1910         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
1911         fib_path_bier_fmask_update(path, &path->fp_dpo);
1912
1913         break;
1914     }
1915     case FIB_PATH_TYPE_BIER_IMP:
1916         bier_imp_lock(path->bier_imp.fp_bier_imp);
1917         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
1918                                        DPO_PROTO_IP4,
1919                                        &path->fp_dpo);
1920         break;
1921     case FIB_PATH_TYPE_BIER_TABLE:
1922     {
1923         /*
1924          * Find/create the BIER table to link to
1925          */
1926         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
1927
1928         path->fp_via_bier_tbl =
1929             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
1930
1931         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
1932                                          &path->fp_dpo);
1933         break;
1934     }
1935     case FIB_PATH_TYPE_SPECIAL:
1936         /*
1937          * Resolve via the drop
1938          */
1939         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1940         break;
1941     case FIB_PATH_TYPE_DEAG:
1942     {
1943         if (DPO_PROTO_BIER == path->fp_nh_proto)
1944         {
1945             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
1946                                                   &path->fp_dpo);
1947         }
1948         else
1949         {
1950             /*
1951              * Resolve via a lookup DPO.
1952              * FIXME. control plane should add routes with a table ID
1953              */
1954             lookup_input_t input;
1955             lookup_cast_t cast;
1956
1957             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1958                     LOOKUP_MULTICAST :
1959                     LOOKUP_UNICAST);
1960             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
1961                      LOOKUP_INPUT_SRC_ADDR :
1962                      LOOKUP_INPUT_DST_ADDR);
1963
1964             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
1965                                                path->fp_nh_proto,
1966                                                cast,
1967                                                input,
1968                                                LOOKUP_TABLE_FROM_CONFIG,
1969                                                &path->fp_dpo);
1970         }
1971         break;
1972     }
1973     case FIB_PATH_TYPE_RECEIVE:
1974         /*
1975          * Resolve via a receive DPO.
1976          */
1977         receive_dpo_add_or_lock(path->fp_nh_proto,
1978                                 path->receive.fp_interface,
1979                                 &path->receive.fp_addr,
1980                                 &path->fp_dpo);
1981         break;
1982     case FIB_PATH_TYPE_UDP_ENCAP:
1983         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
1984         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1985                                         path->fp_nh_proto,
1986                                         &path->fp_dpo);
1987         break;
1988     case FIB_PATH_TYPE_INTF_RX: {
1989         /*
1990          * Resolve via a receive DPO.
1991          */
1992         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
1993                                      path->intf_rx.fp_interface,
1994                                      &path->fp_dpo);
1995         break;
1996     }
1997     case FIB_PATH_TYPE_EXCLUSIVE:
1998         /*
1999          * Resolve via the user provided DPO
2000          */
2001         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2002         break;
2003     }
2004
2005     return (fib_path_is_resolved(path_index));
2006 }
2007
2008 u32
2009 fib_path_get_resolving_interface (fib_node_index_t path_index)
2010 {
2011     fib_path_t *path;
2012
2013     path = fib_path_get(path_index);
2014
2015     switch (path->fp_type)
2016     {
2017     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2018         return (path->attached_next_hop.fp_interface);
2019     case FIB_PATH_TYPE_ATTACHED:
2020         return (path->attached.fp_interface);
2021     case FIB_PATH_TYPE_RECEIVE:
2022         return (path->receive.fp_interface);
2023     case FIB_PATH_TYPE_RECURSIVE:
2024         if (fib_path_is_resolved(path_index))
2025         {
2026             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2027         }
2028         break;
2029     case FIB_PATH_TYPE_INTF_RX:
2030     case FIB_PATH_TYPE_UDP_ENCAP:
2031     case FIB_PATH_TYPE_SPECIAL:
2032     case FIB_PATH_TYPE_DEAG:
2033     case FIB_PATH_TYPE_EXCLUSIVE:
2034     case FIB_PATH_TYPE_BIER_FMASK:
2035     case FIB_PATH_TYPE_BIER_TABLE:
2036     case FIB_PATH_TYPE_BIER_IMP:
2037         break;
2038     }
2039     return (~0);
2040 }
2041
2042 index_t
2043 fib_path_get_resolving_index (fib_node_index_t path_index)
2044 {
2045     fib_path_t *path;
2046
2047     path = fib_path_get(path_index);
2048
2049     switch (path->fp_type)
2050     {
2051     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2052     case FIB_PATH_TYPE_ATTACHED:
2053     case FIB_PATH_TYPE_RECEIVE:
2054     case FIB_PATH_TYPE_INTF_RX:
2055     case FIB_PATH_TYPE_SPECIAL:
2056     case FIB_PATH_TYPE_DEAG:
2057     case FIB_PATH_TYPE_EXCLUSIVE:
2058         break;
2059     case FIB_PATH_TYPE_UDP_ENCAP:
2060         return (path->udp_encap.fp_udp_encap_id);
2061     case FIB_PATH_TYPE_RECURSIVE:
2062         return (path->fp_via_fib);
2063     case FIB_PATH_TYPE_BIER_FMASK:
2064         return (path->bier_fmask.fp_bier_fmask);
2065    case FIB_PATH_TYPE_BIER_TABLE:
2066        return (path->fp_via_bier_tbl);
2067    case FIB_PATH_TYPE_BIER_IMP:
2068        return (path->bier_imp.fp_bier_imp);
2069     }
2070     return (~0);
2071 }
2072
2073 adj_index_t
2074 fib_path_get_adj (fib_node_index_t path_index)
2075 {
2076     fib_path_t *path;
2077
2078     path = fib_path_get(path_index);
2079
2080     ASSERT(dpo_is_adj(&path->fp_dpo));
2081     if (dpo_is_adj(&path->fp_dpo))
2082     {
2083         return (path->fp_dpo.dpoi_index);
2084     }
2085     return (ADJ_INDEX_INVALID);
2086 }
2087
2088 u16
2089 fib_path_get_weight (fib_node_index_t path_index)
2090 {
2091     fib_path_t *path;
2092
2093     path = fib_path_get(path_index);
2094
2095     ASSERT(path);
2096
2097     return (path->fp_weight);
2098 }
2099
2100 u16
2101 fib_path_get_preference (fib_node_index_t path_index)
2102 {
2103     fib_path_t *path;
2104
2105     path = fib_path_get(path_index);
2106
2107     ASSERT(path);
2108
2109     return (path->fp_preference);
2110 }
2111
2112 u32
2113 fib_path_get_rpf_id (fib_node_index_t path_index)
2114 {
2115     fib_path_t *path;
2116
2117     path = fib_path_get(path_index);
2118
2119     ASSERT(path);
2120
2121     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2122     {
2123         return (path->deag.fp_rpf_id);
2124     }
2125
2126     return (~0);
2127 }
2128
2129 /**
2130  * @brief Contribute the path's adjacency to the list passed.
2131  * By calling this function over all paths, recursively, a child
2132  * can construct its full set of forwarding adjacencies, and hence its
2133  * uRPF list.
2134  */
2135 void
2136 fib_path_contribute_urpf (fib_node_index_t path_index,
2137                           index_t urpf)
2138 {
2139     fib_path_t *path;
2140
2141     path = fib_path_get(path_index);
2142
2143     /*
2144      * resolved and unresolved paths contribute to the RPF list.
2145      */
2146     switch (path->fp_type)
2147     {
2148     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2149         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2150         break;
2151
2152     case FIB_PATH_TYPE_ATTACHED:
2153         fib_urpf_list_append(urpf, path->attached.fp_interface);
2154         break;
2155
2156     case FIB_PATH_TYPE_RECURSIVE:
2157         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2158             !fib_path_is_looped(path_index))
2159         {
2160             /*
2161              * there's unresolved due to constraints, and there's unresolved
2162              * due to ain't got no via. can't do nowt w'out via.
2163              */
2164             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2165         }
2166         break;
2167
2168     case FIB_PATH_TYPE_EXCLUSIVE:
2169     case FIB_PATH_TYPE_SPECIAL:
2170     {
2171         /*
2172          * these path types may link to an adj, if that's what
2173          * the clinet gave
2174          */
2175         u32 rpf_sw_if_index;
2176
2177         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2178
2179         if (~0 != rpf_sw_if_index)
2180         {
2181             fib_urpf_list_append(urpf, rpf_sw_if_index);
2182         }
2183         break;
2184     }
2185     case FIB_PATH_TYPE_DEAG:
2186     case FIB_PATH_TYPE_RECEIVE:
2187     case FIB_PATH_TYPE_INTF_RX:
2188     case FIB_PATH_TYPE_UDP_ENCAP:
2189     case FIB_PATH_TYPE_BIER_FMASK:
2190     case FIB_PATH_TYPE_BIER_TABLE:
2191     case FIB_PATH_TYPE_BIER_IMP:
2192         /*
2193          * these path types don't link to an adj
2194          */
2195         break;
2196     }
2197 }
2198
2199 void
2200 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2201                           dpo_proto_t payload_proto,
2202                           dpo_id_t *dpo)
2203 {
2204     fib_path_t *path;
2205
2206     path = fib_path_get(path_index);
2207
2208     ASSERT(path);
2209
2210     switch (path->fp_type)
2211     {
2212     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2213     {
2214         dpo_id_t tmp = DPO_INVALID;
2215
2216         dpo_copy(&tmp, dpo);
2217         dpo_set(dpo,
2218                 DPO_MPLS_DISPOSITION,
2219                 payload_proto,
2220                 mpls_disp_dpo_create(payload_proto, ~0, &tmp));
2221         dpo_reset(&tmp);
2222         break;
2223     }                
2224     case FIB_PATH_TYPE_DEAG:
2225     {
2226         dpo_id_t tmp = DPO_INVALID;
2227
2228         dpo_copy(&tmp, dpo);
2229         dpo_set(dpo,
2230                 DPO_MPLS_DISPOSITION,
2231                 payload_proto,
2232                 mpls_disp_dpo_create(payload_proto,
2233                                      path->deag.fp_rpf_id,
2234                                      &tmp));
2235         dpo_reset(&tmp);
2236         break;
2237     }
2238     case FIB_PATH_TYPE_RECEIVE:
2239     case FIB_PATH_TYPE_ATTACHED:
2240     case FIB_PATH_TYPE_RECURSIVE:
2241     case FIB_PATH_TYPE_INTF_RX:
2242     case FIB_PATH_TYPE_UDP_ENCAP:
2243     case FIB_PATH_TYPE_EXCLUSIVE:
2244     case FIB_PATH_TYPE_SPECIAL:
2245     case FIB_PATH_TYPE_BIER_FMASK:
2246     case FIB_PATH_TYPE_BIER_TABLE:
2247     case FIB_PATH_TYPE_BIER_IMP:
2248         break;
2249     }
2250 }
2251
2252 void
2253 fib_path_contribute_forwarding (fib_node_index_t path_index,
2254                                 fib_forward_chain_type_t fct,
2255                                 dpo_id_t *dpo)
2256 {
2257     fib_path_t *path;
2258
2259     path = fib_path_get(path_index);
2260
2261     ASSERT(path);
2262     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2263
2264     FIB_PATH_DBG(path, "contribute");
2265
2266     /*
2267      * The DPO stored in the path was created when the path was resolved.
2268      * This then represents the path's 'native' protocol; IP.
2269      * For all others will need to go find something else.
2270      */
2271     if (fib_path_to_chain_type(path) == fct)
2272     {
2273         dpo_copy(dpo, &path->fp_dpo);
2274     }
2275     else
2276     {
2277         switch (path->fp_type)
2278         {
2279         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2280             switch (fct)
2281             {
2282             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2283             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2284             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2285             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2286             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2287             case FIB_FORW_CHAIN_TYPE_NSH:
2288             {
2289                 adj_index_t ai;
2290
2291                 /*
2292                  * get a appropriate link type adj.
2293                  */
2294                 ai = fib_path_attached_next_hop_get_adj(
2295                          path,
2296                          fib_forw_chain_type_to_link_type(fct));
2297                 dpo_set(dpo, DPO_ADJACENCY,
2298                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2299                 adj_unlock(ai);
2300
2301                 break;
2302             }
2303             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2304             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2305             case FIB_FORW_CHAIN_TYPE_BIER:
2306                 break;
2307             }
2308             break;
2309         case FIB_PATH_TYPE_RECURSIVE:
2310             switch (fct)
2311             {
2312             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2313             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2314             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2315             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2316             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2317             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2318             case FIB_FORW_CHAIN_TYPE_BIER:
2319                 fib_path_recursive_adj_update(path, fct, dpo);
2320                 break;
2321             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2322             case FIB_FORW_CHAIN_TYPE_NSH:
2323                 ASSERT(0);
2324                 break;
2325             }
2326             break;
2327         case FIB_PATH_TYPE_BIER_TABLE:
2328             switch (fct)
2329             {
2330             case FIB_FORW_CHAIN_TYPE_BIER:
2331                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2332                 break;
2333             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2334             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2335             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2336             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2337             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2338             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2339             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2340             case FIB_FORW_CHAIN_TYPE_NSH:
2341                 ASSERT(0);
2342                 break;
2343             }
2344             break;
2345         case FIB_PATH_TYPE_BIER_FMASK:
2346             switch (fct)
2347             {
2348             case FIB_FORW_CHAIN_TYPE_BIER:
2349                 fib_path_bier_fmask_update(path, dpo);
2350                 break;
2351             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2352             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2353             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2354             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2355             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2356             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2357             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2358             case FIB_FORW_CHAIN_TYPE_NSH:
2359                 ASSERT(0);
2360                 break;
2361             }
2362             break;
2363         case FIB_PATH_TYPE_BIER_IMP:
2364             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2365                                            fib_forw_chain_type_to_dpo_proto(fct),
2366                                            dpo);
2367             break;
2368         case FIB_PATH_TYPE_DEAG:
2369             switch (fct)
2370             {
2371             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2372                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2373                                                   DPO_PROTO_MPLS,
2374                                                   LOOKUP_UNICAST,
2375                                                   LOOKUP_INPUT_DST_ADDR,
2376                                                   LOOKUP_TABLE_FROM_CONFIG,
2377                                                   dpo);
2378                 break;
2379             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2380             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2381             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2382                 dpo_copy(dpo, &path->fp_dpo);
2383                 break;
2384             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2385             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2386             case FIB_FORW_CHAIN_TYPE_BIER:
2387                 break;
2388             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2389             case FIB_FORW_CHAIN_TYPE_NSH:
2390                 ASSERT(0);
2391                 break;
2392             }
2393             break;
2394         case FIB_PATH_TYPE_EXCLUSIVE:
2395             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2396             break;
2397         case FIB_PATH_TYPE_ATTACHED:
2398             if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
2399             {
2400                 dpo_copy(dpo, &path->fp_dpo);
2401                 break;
2402             }
2403             switch (fct)
2404             {
2405             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2406             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2407             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2408             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2409             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2410             case FIB_FORW_CHAIN_TYPE_NSH:
2411             case FIB_FORW_CHAIN_TYPE_BIER:
2412                 {
2413                     adj_index_t ai;
2414
2415                     /*
2416                      * get a appropriate link type adj.
2417                      */
2418                     ai = fib_path_attached_get_adj(
2419                             path,
2420                             fib_forw_chain_type_to_link_type(fct));
2421                     dpo_set(dpo, DPO_ADJACENCY,
2422                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2423                     adj_unlock(ai);
2424                     break;
2425                 }
2426             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2427             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2428                 {
2429                     adj_index_t ai;
2430
2431                     /*
2432                      * Create the adj needed for sending IP multicast traffic
2433                      */
2434                     ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2435                                                fib_forw_chain_type_to_link_type(fct),
2436                                                path->attached.fp_interface);
2437                     dpo_set(dpo, DPO_ADJACENCY,
2438                             fib_forw_chain_type_to_dpo_proto(fct),
2439                             ai);
2440                     adj_unlock(ai);
2441                 }
2442                 break;
2443             }
2444             break;
2445         case FIB_PATH_TYPE_INTF_RX:
2446             /*
2447              * Create the adj needed for sending IP multicast traffic
2448              */
2449             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2450                                          path->attached.fp_interface,
2451                                          dpo);
2452             break;
2453         case FIB_PATH_TYPE_UDP_ENCAP:
2454             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2455                                             path->fp_nh_proto,
2456                                             dpo);
2457             break;
2458         case FIB_PATH_TYPE_RECEIVE:
2459         case FIB_PATH_TYPE_SPECIAL:
2460             dpo_copy(dpo, &path->fp_dpo);
2461             break;
2462         }
2463     }
2464 }
2465
2466 load_balance_path_t *
2467 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2468                                        fib_forward_chain_type_t fct,
2469                                        load_balance_path_t *hash_key)
2470 {
2471     load_balance_path_t *mnh;
2472     fib_path_t *path;
2473
2474     path = fib_path_get(path_index);
2475
2476     ASSERT(path);
2477
2478     if (fib_path_is_resolved(path_index))
2479     {
2480         vec_add2(hash_key, mnh, 1);
2481
2482         mnh->path_weight = path->fp_weight;
2483         mnh->path_index = path_index;
2484         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2485     }
2486
2487     return (hash_key);
2488 }
2489
2490 int
2491 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2492 {
2493     fib_path_t *path;
2494
2495     path = fib_path_get(path_index);
2496
2497     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2498             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2499              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2500 }
2501
2502 int
2503 fib_path_is_exclusive (fib_node_index_t path_index)
2504 {
2505     fib_path_t *path;
2506
2507     path = fib_path_get(path_index);
2508
2509     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2510 }
2511
2512 int
2513 fib_path_is_deag (fib_node_index_t path_index)
2514 {
2515     fib_path_t *path;
2516
2517     path = fib_path_get(path_index);
2518
2519     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2520 }
2521
2522 int
2523 fib_path_is_resolved (fib_node_index_t path_index)
2524 {
2525     fib_path_t *path;
2526
2527     path = fib_path_get(path_index);
2528
2529     return (dpo_id_is_valid(&path->fp_dpo) &&
2530             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2531             !fib_path_is_looped(path_index) &&
2532             !fib_path_is_permanent_drop(path));
2533 }
2534
2535 int
2536 fib_path_is_looped (fib_node_index_t path_index)
2537 {
2538     fib_path_t *path;
2539
2540     path = fib_path_get(path_index);
2541
2542     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2543 }
2544
2545 fib_path_list_walk_rc_t
2546 fib_path_encode (fib_node_index_t path_list_index,
2547                  fib_node_index_t path_index,
2548                  void *ctx)
2549 {
2550     fib_route_path_encode_t **api_rpaths = ctx;
2551     fib_route_path_encode_t *api_rpath;
2552     fib_path_t *path;
2553
2554     path = fib_path_get(path_index);
2555     if (!path)
2556       return (FIB_PATH_LIST_WALK_CONTINUE);
2557     vec_add2(*api_rpaths, api_rpath, 1);
2558     api_rpath->rpath.frp_weight = path->fp_weight;
2559     api_rpath->rpath.frp_preference = path->fp_preference;
2560     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2561     api_rpath->rpath.frp_sw_if_index = ~0;
2562     api_rpath->dpo = path->fp_dpo;
2563
2564     switch (path->fp_type)
2565       {
2566       case FIB_PATH_TYPE_RECEIVE:
2567         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2568         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2569         break;
2570       case FIB_PATH_TYPE_ATTACHED:
2571         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2572         break;
2573       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2574         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2575         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2576         break;
2577       case FIB_PATH_TYPE_BIER_FMASK:
2578         api_rpath->rpath.frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2579         break;
2580       case FIB_PATH_TYPE_SPECIAL:
2581         break;
2582       case FIB_PATH_TYPE_DEAG:
2583         api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2584         break;
2585       case FIB_PATH_TYPE_RECURSIVE:
2586         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2587         break;
2588       default:
2589         break;
2590       }
2591
2592     return (FIB_PATH_LIST_WALK_CONTINUE);
2593 }
2594
2595 dpo_proto_t
2596 fib_path_get_proto (fib_node_index_t path_index)
2597 {
2598     fib_path_t *path;
2599
2600     path = fib_path_get(path_index);
2601
2602     return (path->fp_nh_proto);
2603 }
2604
2605 void
2606 fib_path_module_init (void)
2607 {
2608     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2609 }
2610
2611 static clib_error_t *
2612 show_fib_path_command (vlib_main_t * vm,
2613                         unformat_input_t * input,
2614                         vlib_cli_command_t * cmd)
2615 {
2616     fib_node_index_t pi;
2617     fib_path_t *path;
2618
2619     if (unformat (input, "%d", &pi))
2620     {
2621         /*
2622          * show one in detail
2623          */
2624         if (!pool_is_free_index(fib_path_pool, pi))
2625         {
2626             path = fib_path_get(pi);
2627             u8 *s = format(NULL, "%U", format_fib_path, pi, 1);
2628             s = format(s, "children:");
2629             s = fib_node_children_format(path->fp_node.fn_children, s);
2630             vlib_cli_output (vm, "%s", s);
2631             vec_free(s);
2632         }
2633         else
2634         {
2635             vlib_cli_output (vm, "path %d invalid", pi);
2636         }
2637     }
2638     else
2639     {
2640         vlib_cli_output (vm, "FIB Paths");
2641         pool_foreach(path, fib_path_pool,
2642         ({
2643             vlib_cli_output (vm, "%U", format_fib_path, path);
2644         }));
2645     }
2646
2647     return (NULL);
2648 }
2649
2650 VLIB_CLI_COMMAND (show_fib_path, static) = {
2651   .path = "show fib paths",
2652   .function = show_fib_path_command,
2653   .short_help = "show fib paths",
2654 };