test/vpp_ip.py: Correct usage of 'is'
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/drop_dpo.h>
28
29 #include <vnet/adj/adj.h>
30 #include <vnet/adj/adj_mcast.h>
31
32 #include <vnet/fib/fib_path.h>
33 #include <vnet/fib/fib_node.h>
34 #include <vnet/fib/fib_table.h>
35 #include <vnet/fib/fib_entry.h>
36 #include <vnet/fib/fib_path_list.h>
37 #include <vnet/fib/fib_internal.h>
38 #include <vnet/fib/fib_urpf_list.h>
39 #include <vnet/fib/mpls_fib.h>
40 #include <vnet/udp/udp_encap.h>
41 #include <vnet/bier/bier_fmask.h>
42 #include <vnet/bier/bier_table.h>
43 #include <vnet/bier/bier_imp.h>
44 #include <vnet/bier/bier_disp_table.h>
45
46 /**
47  * Enurmeration of path types
48  */
49 typedef enum fib_path_type_t_ {
50     /**
51      * Marker. Add new types after this one.
52      */
53     FIB_PATH_TYPE_FIRST = 0,
54     /**
55      * Attached-nexthop. An interface and a nexthop are known.
56      */
57     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
58     /**
59      * attached. Only the interface is known.
60      */
61     FIB_PATH_TYPE_ATTACHED,
62     /**
63      * recursive. Only the next-hop is known.
64      */
65     FIB_PATH_TYPE_RECURSIVE,
66     /**
67      * special. nothing is known. so we drop.
68      */
69     FIB_PATH_TYPE_SPECIAL,
70     /**
71      * exclusive. user provided adj.
72      */
73     FIB_PATH_TYPE_EXCLUSIVE,
74     /**
75      * deag. Link to a lookup adj in the next table
76      */
77     FIB_PATH_TYPE_DEAG,
78     /**
79      * interface receive.
80      */
81     FIB_PATH_TYPE_INTF_RX,
82     /**
83      * Path resolves via a UDP encap object.
84      */
85     FIB_PATH_TYPE_UDP_ENCAP,
86     /**
87      * receive. it's for-us.
88      */
89     FIB_PATH_TYPE_RECEIVE,
90     /**
91      * bier-imp. it's via a BIER imposition.
92      */
93     FIB_PATH_TYPE_BIER_IMP,
94     /**
95      * bier-fmask. it's via a BIER ECMP-table.
96      */
97     FIB_PATH_TYPE_BIER_TABLE,
98     /**
99      * bier-fmask. it's via a BIER f-mask.
100      */
101     FIB_PATH_TYPE_BIER_FMASK,
102     /**
103      * via a DVR.
104      */
105     FIB_PATH_TYPE_DVR,
106     /**
107      * Marker. Add new types before this one, then update it.
108      */
109     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
110 } __attribute__ ((packed)) fib_path_type_t;
111
112 /**
113  * The maximum number of path_types
114  */
115 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
116
117 #define FIB_PATH_TYPES {                                        \
118     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
119     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
120     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
121     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
122     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
123     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
124     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
125     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
126     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
127     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
128     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
129     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
130     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
131 }
132
133 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
134     for (_item = FIB_PATH_TYPE_FIRST;           \
135          _item <= FIB_PATH_TYPE_LAST;           \
136          _item++)
137
138 /**
139  * Enurmeration of path operational (i.e. derived) attributes
140  */
141 typedef enum fib_path_oper_attribute_t_ {
142     /**
143      * Marker. Add new types after this one.
144      */
145     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
146     /**
147      * The path forms part of a recursive loop.
148      */
149     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
150     /**
151      * The path is resolved
152      */
153     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
154     /**
155      * The path is attached, despite what the next-hop may say.
156      */
157     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
158     /**
159      * The path has become a permanent drop.
160      */
161     FIB_PATH_OPER_ATTRIBUTE_DROP,
162     /**
163      * Marker. Add new types before this one, then update it.
164      */
165     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
166 } __attribute__ ((packed)) fib_path_oper_attribute_t;
167
168 /**
169  * The maximum number of path operational attributes
170  */
171 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
172
173 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
174     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
175     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
176     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
177 }
178
179 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
180     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
181          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
182          _item++)
183
184 /**
185  * Path flags from the attributes
186  */
187 typedef enum fib_path_oper_flags_t_ {
188     FIB_PATH_OPER_FLAG_NONE = 0,
189     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
190     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
191     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
192     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
193 } __attribute__ ((packed)) fib_path_oper_flags_t;
194
195 /**
196  * A FIB path
197  */
198 typedef struct fib_path_t_ {
199     /**
200      * A path is a node in the FIB graph.
201      */
202     fib_node_t fp_node;
203
204     /**
205      * The index of the path-list to which this path belongs
206      */
207     u32 fp_pl_index;
208
209     /**
210      * This marks the start of the memory area used to hash
211      * the path
212      */
213     STRUCT_MARK(path_hash_start);
214
215     /**
216      * Configuration Flags
217      */
218     fib_path_cfg_flags_t fp_cfg_flags;
219
220     /**
221      * The type of the path. This is the selector for the union
222      */
223     fib_path_type_t fp_type;
224
225     /**
226      * The protocol of the next-hop, i.e. the address family of the
227      * next-hop's address. We can't derive this from the address itself
228      * since the address can be all zeros
229      */
230     dpo_proto_t fp_nh_proto;
231
232     /**
233      * UCMP [unnormalised] weigth
234      */
235     u8 fp_weight;
236
237     /**
238      * A path preference. 0 is the best.
239      * Only paths of the best preference, that are 'up', are considered
240      * for forwarding.
241      */
242     u8 fp_preference;
243
244     /**
245      * per-type union of the data required to resolve the path
246      */
247     union {
248         struct {
249             /**
250              * The next-hop
251              */
252             ip46_address_t fp_nh;
253             /**
254              * The interface
255              */
256             u32 fp_interface;
257         } attached_next_hop;
258         struct {
259             /**
260              * The interface
261              */
262             u32 fp_interface;
263         } attached;
264         struct {
265             union
266             {
267                 /**
268                  * The next-hop
269                  */
270                 ip46_address_t fp_ip;
271                 struct {
272                     /**
273                      * The local label to resolve through.
274                      */
275                     mpls_label_t fp_local_label;
276                     /**
277                      * The EOS bit of the resolving label
278                      */
279                     mpls_eos_bit_t fp_eos;
280                 };
281             } fp_nh;
282             union {
283                 /**
284                  * The FIB table index in which to find the next-hop.
285                  */
286                 fib_node_index_t fp_tbl_id;
287                 /**
288                  * The BIER FIB the fmask is in
289                  */
290                 index_t fp_bier_fib;
291             };
292         } recursive;
293         struct {
294             /**
295              * BIER FMask ID
296              */
297             index_t fp_bier_fmask;
298         } bier_fmask;
299         struct {
300             /**
301              * The BIER table's ID
302              */
303             bier_table_id_t fp_bier_tbl;
304         } bier_table;
305         struct {
306             /**
307              * The BIER imposition object
308              * this is part of the path's key, since the index_t
309              * of an imposition object is the object's key.
310              */
311             index_t fp_bier_imp;
312         } bier_imp;
313         struct {
314             /**
315              * The FIB index in which to perfom the next lookup
316              */
317             fib_node_index_t fp_tbl_id;
318             /**
319              * The RPF-ID to tag the packets with
320              */
321             fib_rpf_id_t fp_rpf_id;
322         } deag;
323         struct {
324         } special;
325         struct {
326             /**
327              * The user provided 'exclusive' DPO
328              */
329             dpo_id_t fp_ex_dpo;
330         } exclusive;
331         struct {
332             /**
333              * The interface on which the local address is configured
334              */
335             u32 fp_interface;
336             /**
337              * The next-hop
338              */
339             ip46_address_t fp_addr;
340         } receive;
341         struct {
342             /**
343              * The interface on which the packets will be input.
344              */
345             u32 fp_interface;
346         } intf_rx;
347         struct {
348             /**
349              * The UDP Encap object this path resolves through
350              */
351             u32 fp_udp_encap_id;
352         } udp_encap;
353         struct {
354             /**
355              * The interface
356              */
357             u32 fp_interface;
358         } dvr;
359     };
360     STRUCT_MARK(path_hash_end);
361
362     /**
363      * Memebers in this last section represent information that is
364      * dervied during resolution. It should not be copied to new paths
365      * nor compared.
366      */
367
368     /**
369      * Operational Flags
370      */
371     fib_path_oper_flags_t fp_oper_flags;
372
373     union {
374         /**
375          * the resolving via fib. not part of the union, since it it not part
376          * of the path's hash.
377          */
378         fib_node_index_t fp_via_fib;
379         /**
380          * the resolving bier-table
381          */
382         index_t fp_via_bier_tbl;
383         /**
384          * the resolving bier-fmask
385          */
386         index_t fp_via_bier_fmask;
387     };
388
389     /**
390      * The Data-path objects through which this path resolves for IP.
391      */
392     dpo_id_t fp_dpo;
393
394     /**
395      * the index of this path in the parent's child list.
396      */
397     u32 fp_sibling;
398 } fib_path_t;
399
400 /*
401  * Array of strings/names for the path types and attributes
402  */
403 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
404 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
405 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
406
407 /*
408  * The memory pool from which we allocate all the paths
409  */
410 static fib_path_t *fib_path_pool;
411
412 /**
413  * the logger
414  */
415 vlib_log_class_t fib_path_logger;
416
417 /*
418  * Debug macro
419  */
420 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
421 {                                                                       \
422     vlib_log_debug (fib_path_logger,                                    \
423                     "[%U]: " _fmt,                                      \
424                     format_fib_path, fib_path_get_index(_p), 0,         \
425                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
426                     ##_args);                                           \
427 }
428
429 static fib_path_t *
430 fib_path_get (fib_node_index_t index)
431 {
432     return (pool_elt_at_index(fib_path_pool, index));
433 }
434
435 static fib_node_index_t 
436 fib_path_get_index (fib_path_t *path)
437 {
438     return (path - fib_path_pool);
439 }
440
441 static fib_node_t *
442 fib_path_get_node (fib_node_index_t index)
443 {
444     return ((fib_node_t*)fib_path_get(index));
445 }
446
447 static fib_path_t*
448 fib_path_from_fib_node (fib_node_t *node)
449 {
450     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
451     return ((fib_path_t*)node);
452 }
453
454 u8 *
455 format_fib_path (u8 * s, va_list * args)
456 {
457     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
458     u32 indent = va_arg (*args, u32);
459     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
460     vnet_main_t * vnm = vnet_get_main();
461     fib_path_oper_attribute_t oattr;
462     fib_path_cfg_attribute_t cattr;
463     fib_path_t *path;
464     const char *eol;
465
466     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
467     {
468         eol = "";
469     }
470     else
471     {
472         eol = "\n";
473     }
474
475     path = fib_path_get(path_index);
476
477     s = format (s, "%Upath:[%d] ", format_white_space, indent,
478                 fib_path_get_index(path));
479     s = format (s, "pl-index:%d ", path->fp_pl_index);
480     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
481     s = format (s, "weight=%d ", path->fp_weight);
482     s = format (s, "pref=%d ", path->fp_preference);
483     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
484     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
485         s = format(s, " oper-flags:");
486         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
487             if ((1<<oattr) & path->fp_oper_flags) {
488                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
489             }
490         }
491     }
492     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
493         s = format(s, " cfg-flags:");
494         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
495             if ((1<<cattr) & path->fp_cfg_flags) {
496                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
497             }
498         }
499     }
500     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
501         s = format(s, "\n%U", format_white_space, indent+2);
502
503     switch (path->fp_type)
504     {
505     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
506         s = format (s, "%U", format_ip46_address,
507                     &path->attached_next_hop.fp_nh,
508                     IP46_TYPE_ANY);
509         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
510         {
511             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
512         }
513         else
514         {
515             s = format (s, " %U",
516                         format_vnet_sw_interface_name,
517                         vnm,
518                         vnet_get_sw_interface(
519                             vnm,
520                             path->attached_next_hop.fp_interface));
521             if (vnet_sw_interface_is_p2p(vnet_get_main(),
522                                          path->attached_next_hop.fp_interface))
523             {
524                 s = format (s, " (p2p)");
525             }
526         }
527         if (!dpo_id_is_valid(&path->fp_dpo))
528         {
529             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
530         }
531         else
532         {
533             s = format(s, "%s%U%U", eol,
534                        format_white_space, indent,
535                        format_dpo_id,
536                        &path->fp_dpo, 13);
537         }
538         break;
539     case FIB_PATH_TYPE_ATTACHED:
540         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
541         {
542             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
543         }
544         else
545         {
546             s = format (s, " %U",
547                         format_vnet_sw_interface_name,
548                         vnm,
549                         vnet_get_sw_interface(
550                             vnm,
551                             path->attached.fp_interface));
552         }
553         break;
554     case FIB_PATH_TYPE_RECURSIVE:
555         if (DPO_PROTO_MPLS == path->fp_nh_proto)
556         {
557             s = format (s, "via %U %U",
558                         format_mpls_unicast_label,
559                         path->recursive.fp_nh.fp_local_label,
560                         format_mpls_eos_bit,
561                         path->recursive.fp_nh.fp_eos);
562         }
563         else
564         {
565             s = format (s, "via %U",
566                         format_ip46_address,
567                         &path->recursive.fp_nh.fp_ip,
568                         IP46_TYPE_ANY);
569         }
570         s = format (s, " in fib:%d",
571                     path->recursive.fp_tbl_id,
572                     path->fp_via_fib); 
573         s = format (s, " via-fib:%d", path->fp_via_fib); 
574         s = format (s, " via-dpo:[%U:%d]",
575                     format_dpo_type, path->fp_dpo.dpoi_type, 
576                     path->fp_dpo.dpoi_index);
577
578         break;
579     case FIB_PATH_TYPE_UDP_ENCAP:
580         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
581         break;
582     case FIB_PATH_TYPE_BIER_TABLE:
583         s = format (s, "via bier-table:[%U}",
584                     format_bier_table_id,
585                     &path->bier_table.fp_bier_tbl);
586         s = format (s, " via-dpo:[%U:%d]",
587                     format_dpo_type, path->fp_dpo.dpoi_type,
588                     path->fp_dpo.dpoi_index);
589         break;
590     case FIB_PATH_TYPE_BIER_FMASK:
591         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
592         s = format (s, " via-dpo:[%U:%d]",
593                     format_dpo_type, path->fp_dpo.dpoi_type, 
594                     path->fp_dpo.dpoi_index);
595         break;
596     case FIB_PATH_TYPE_BIER_IMP:
597         s = format (s, "via %U", format_bier_imp,
598                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
599         break;
600     case FIB_PATH_TYPE_DVR:
601         s = format (s, " %U",
602                     format_vnet_sw_interface_name,
603                     vnm,
604                     vnet_get_sw_interface(
605                         vnm,
606                         path->dvr.fp_interface));
607         break;
608     case FIB_PATH_TYPE_RECEIVE:
609     case FIB_PATH_TYPE_INTF_RX:
610     case FIB_PATH_TYPE_SPECIAL:
611     case FIB_PATH_TYPE_DEAG:
612     case FIB_PATH_TYPE_EXCLUSIVE:
613         if (dpo_id_is_valid(&path->fp_dpo))
614         {
615             s = format(s, "%U", format_dpo_id,
616                        &path->fp_dpo, indent+2);
617         }
618         break;
619     }
620     return (s);
621 }
622
623 /*
624  * fib_path_last_lock_gone
625  *
626  * We don't share paths, we share path lists, so the [un]lock functions
627  * are no-ops
628  */
629 static void
630 fib_path_last_lock_gone (fib_node_t *node)
631 {
632     ASSERT(0);
633 }
634
635 static const adj_index_t
636 fib_path_attached_next_hop_get_adj (fib_path_t *path,
637                                     vnet_link_t link)
638 {
639     if (vnet_sw_interface_is_p2p(vnet_get_main(),
640                                  path->attached_next_hop.fp_interface))
641     {
642         /*
643          * if the interface is p2p then the adj for the specific
644          * neighbour on that link will never exist. on p2p links
645          * the subnet address (the attached route) links to the
646          * auto-adj (see below), we want that adj here too.
647          */
648         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
649                                     link,
650                                     &zero_addr,
651                                     path->attached_next_hop.fp_interface));
652     }
653     else
654     {
655         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
656                                     link,
657                                     &path->attached_next_hop.fp_nh,
658                                     path->attached_next_hop.fp_interface));
659     }
660 }
661
662 static void
663 fib_path_attached_next_hop_set (fib_path_t *path)
664 {
665     /*
666      * resolve directly via the adjacnecy discribed by the
667      * interface and next-hop
668      */
669     dpo_set(&path->fp_dpo,
670             DPO_ADJACENCY,
671             path->fp_nh_proto,
672             fib_path_attached_next_hop_get_adj(
673                  path,
674                  dpo_proto_to_link(path->fp_nh_proto)));
675
676     /*
677      * become a child of the adjacency so we receive updates
678      * when its rewrite changes
679      */
680     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
681                                      FIB_NODE_TYPE_PATH,
682                                      fib_path_get_index(path));
683
684     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
685                                       path->attached_next_hop.fp_interface) ||
686         !adj_is_up(path->fp_dpo.dpoi_index))
687     {
688         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
689     }
690 }
691
692 static const adj_index_t
693 fib_path_attached_get_adj (fib_path_t *path,
694                            vnet_link_t link)
695 {
696     if (vnet_sw_interface_is_p2p(vnet_get_main(),
697                                  path->attached.fp_interface))
698     {
699         /*
700          * point-2-point interfaces do not require a glean, since
701          * there is nothing to ARP. Install a rewrite/nbr adj instead
702          */
703         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
704                                     link,
705                                     &zero_addr,
706                                     path->attached.fp_interface));
707     }
708     else
709     {
710         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
711                                       link,
712                                       path->attached.fp_interface,
713                                       NULL));
714     }
715 }
716
717 /*
718  * create of update the paths recursive adj
719  */
720 static void
721 fib_path_recursive_adj_update (fib_path_t *path,
722                                fib_forward_chain_type_t fct,
723                                dpo_id_t *dpo)
724 {
725     dpo_id_t via_dpo = DPO_INVALID;
726
727     /*
728      * get the DPO to resolve through from the via-entry
729      */
730     fib_entry_contribute_forwarding(path->fp_via_fib,
731                                     fct,
732                                     &via_dpo);
733
734
735     /*
736      * hope for the best - clear if restrictions apply.
737      */
738     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
739
740     /*
741      * Validate any recursion constraints and over-ride the via
742      * adj if not met
743      */
744     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
745     {
746         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
747         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
748     }
749     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
750     {
751         /*
752          * the via FIB must be a host route.
753          * note the via FIB just added will always be a host route
754          * since it is an RR source added host route. So what we need to
755          * check is whether the route has other sources. If it does then
756          * some other source has added it as a host route. If it doesn't
757          * then it was added only here and inherits forwarding from a cover.
758          * the cover is not a host route.
759          * The RR source is the lowest priority source, so we check if it
760          * is the best. if it is there are no other sources.
761          */
762         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
763         {
764             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
765             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
766
767             /*
768              * PIC edge trigger. let the load-balance maps know
769              */
770             load_balance_map_path_state_change(fib_path_get_index(path));
771         }
772     }
773     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
774     {
775         /*
776          * RR source entries inherit the flags from the cover, so
777          * we can check the via directly
778          */
779         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
780         {
781             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
782             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
783
784             /*
785              * PIC edge trigger. let the load-balance maps know
786              */
787             load_balance_map_path_state_change(fib_path_get_index(path));
788         }
789     }
790     /*
791      * check for over-riding factors on the FIB entry itself
792      */
793     if (!fib_entry_is_resolved(path->fp_via_fib))
794     {
795         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
796         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
797
798         /*
799          * PIC edge trigger. let the load-balance maps know
800          */
801         load_balance_map_path_state_change(fib_path_get_index(path));
802     }
803
804     /*
805      * If this path is contributing a drop, then it's not resolved
806      */
807     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
808     {
809         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
810     }
811
812     /*
813      * update the path's contributed DPO
814      */
815     dpo_copy(dpo, &via_dpo);
816
817     FIB_PATH_DBG(path, "recursive update:");
818
819     dpo_reset(&via_dpo);
820 }
821
822 /*
823  * re-evaulate the forwarding state for a via fmask path
824  */
825 static void
826 fib_path_bier_fmask_update (fib_path_t *path,
827                             dpo_id_t *dpo)
828 {
829     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
830
831     /*
832      * if we are stakcing on the drop, then the path is not resolved
833      */
834     if (dpo_is_drop(dpo))
835     {
836         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
837     }
838     else
839     {
840         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
841     }
842 }
843
844 /*
845  * fib_path_is_permanent_drop
846  *
847  * Return !0 if the path is configured to permanently drop,
848  * despite other attributes.
849  */
850 static int
851 fib_path_is_permanent_drop (fib_path_t *path)
852 {
853     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
854             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
855 }
856
857 /*
858  * fib_path_unresolve
859  *
860  * Remove our dependency on the resolution target
861  */
862 static void
863 fib_path_unresolve (fib_path_t *path)
864 {
865     /*
866      * the forced drop path does not need unresolving
867      */
868     if (fib_path_is_permanent_drop(path))
869     {
870         return;
871     }
872
873     switch (path->fp_type)
874     {
875     case FIB_PATH_TYPE_RECURSIVE:
876         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
877         {
878             fib_entry_child_remove(path->fp_via_fib,
879                                    path->fp_sibling);
880             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
881                                            fib_entry_get_prefix(path->fp_via_fib),
882                                            FIB_SOURCE_RR);
883             fib_table_unlock(path->recursive.fp_tbl_id,
884                              dpo_proto_to_fib(path->fp_nh_proto),
885                              FIB_SOURCE_RR);
886             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
887         }
888         break;
889     case FIB_PATH_TYPE_BIER_FMASK:
890         bier_fmask_child_remove(path->fp_via_bier_fmask,
891                                 path->fp_sibling);
892         break;
893     case FIB_PATH_TYPE_BIER_IMP:
894         bier_imp_unlock(path->fp_dpo.dpoi_index);
895         break;
896     case FIB_PATH_TYPE_BIER_TABLE:
897         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
898         break;
899     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
900         adj_child_remove(path->fp_dpo.dpoi_index,
901                          path->fp_sibling);
902         adj_unlock(path->fp_dpo.dpoi_index);
903         break;
904     case FIB_PATH_TYPE_ATTACHED:
905         adj_child_remove(path->fp_dpo.dpoi_index,
906                          path->fp_sibling);
907         adj_unlock(path->fp_dpo.dpoi_index);
908         break;
909     case FIB_PATH_TYPE_UDP_ENCAP:
910         udp_encap_unlock(path->fp_dpo.dpoi_index);
911         break;
912     case FIB_PATH_TYPE_EXCLUSIVE:
913         dpo_reset(&path->exclusive.fp_ex_dpo);
914         break;
915     case FIB_PATH_TYPE_SPECIAL:
916     case FIB_PATH_TYPE_RECEIVE:
917     case FIB_PATH_TYPE_INTF_RX:
918     case FIB_PATH_TYPE_DEAG:
919     case FIB_PATH_TYPE_DVR:
920         /*
921          * these hold only the path's DPO, which is reset below.
922          */
923         break;
924     }
925
926     /*
927      * release the adj we were holding and pick up the
928      * drop just in case.
929      */
930     dpo_reset(&path->fp_dpo);
931     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
932
933     return;
934 }
935
936 static fib_forward_chain_type_t
937 fib_path_to_chain_type (const fib_path_t *path)
938 {
939     if (DPO_PROTO_MPLS == path->fp_nh_proto)
940     {
941         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
942             MPLS_EOS == path->recursive.fp_nh.fp_eos)
943         {
944             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
945         }
946         else
947         {
948             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
949         }
950     }
951     else
952     {
953         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
954     }
955 }
956
957 /*
958  * fib_path_back_walk_notify
959  *
960  * A back walk has reach this path.
961  */
962 static fib_node_back_walk_rc_t
963 fib_path_back_walk_notify (fib_node_t *node,
964                            fib_node_back_walk_ctx_t *ctx)
965 {
966     fib_path_t *path;
967
968     path = fib_path_from_fib_node(node);
969
970     FIB_PATH_DBG(path, "bw:%U",
971                  format_fib_node_bw_reason, ctx->fnbw_reason);
972
973     switch (path->fp_type)
974     {
975     case FIB_PATH_TYPE_RECURSIVE:
976         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
977         {
978             /*
979              * modify the recursive adjacency to use the new forwarding
980              * of the via-fib.
981              * this update is visible to packets in flight in the DP.
982              */
983             fib_path_recursive_adj_update(
984                 path,
985                 fib_path_to_chain_type(path),
986                 &path->fp_dpo);
987         }
988         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
989             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
990         {
991             /*
992              * ADJ updates (complete<->incomplete) do not need to propagate to
993              * recursive entries.
994              * The only reason its needed as far back as here, is that the adj
995              * and the incomplete adj are a different DPO type, so the LBs need
996              * to re-stack.
997              * If this walk was quashed in the fib_entry, then any non-fib_path
998              * children (like tunnels that collapse out the LB when they stack)
999              * would not see the update.
1000              */
1001             return (FIB_NODE_BACK_WALK_CONTINUE);
1002         }
1003         break;
1004     case FIB_PATH_TYPE_BIER_FMASK:
1005         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1006         {
1007             /*
1008              * update to use the BIER fmask's new forwading
1009              */
1010             fib_path_bier_fmask_update(path, &path->fp_dpo);
1011         }
1012         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1013             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1014         {
1015             /*
1016              * ADJ updates (complete<->incomplete) do not need to propagate to
1017              * recursive entries.
1018              * The only reason its needed as far back as here, is that the adj
1019              * and the incomplete adj are a different DPO type, so the LBs need
1020              * to re-stack.
1021              * If this walk was quashed in the fib_entry, then any non-fib_path
1022              * children (like tunnels that collapse out the LB when they stack)
1023              * would not see the update.
1024              */
1025             return (FIB_NODE_BACK_WALK_CONTINUE);
1026         }
1027         break;
1028     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1029         /*
1030 FIXME comment
1031          * ADJ_UPDATE backwalk pass silently through here and up to
1032          * the path-list when the multipath adj collapse occurs.
1033          * The reason we do this is that the assumtption is that VPP
1034          * runs in an environment where the Control-Plane is remote
1035          * and hence reacts slowly to link up down. In order to remove
1036          * this down link from the ECMP set quickly, we back-walk.
1037          * VPP also has dedicated CPUs, so we are not stealing resources
1038          * from the CP to do so.
1039          */
1040         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1041         {
1042             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1043             {
1044                 /*
1045                  * alreday resolved. no need to walk back again
1046                  */
1047                 return (FIB_NODE_BACK_WALK_CONTINUE);
1048             }
1049             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1050         }
1051         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1052         {
1053             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1054             {
1055                 /*
1056                  * alreday unresolved. no need to walk back again
1057                  */
1058                 return (FIB_NODE_BACK_WALK_CONTINUE);
1059             }
1060             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1061         }
1062         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1063         {
1064             /*
1065              * The interface this path resolves through has been deleted.
1066              * This will leave the path in a permanent drop state. The route
1067              * needs to be removed and readded (and hence the path-list deleted)
1068              * before it can forward again.
1069              */
1070             fib_path_unresolve(path);
1071             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1072         }
1073         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1074         {
1075             /*
1076              * restack the DPO to pick up the correct DPO sub-type
1077              */
1078             uword if_is_up;
1079             adj_index_t ai;
1080
1081             if_is_up = vnet_sw_interface_is_admin_up(
1082                            vnet_get_main(),
1083                            path->attached_next_hop.fp_interface);
1084
1085             ai = fib_path_attached_next_hop_get_adj(
1086                      path,
1087                      dpo_proto_to_link(path->fp_nh_proto));
1088
1089             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1090             if (if_is_up && adj_is_up(ai))
1091             {
1092                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1093             }
1094
1095             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1096             adj_unlock(ai);
1097
1098             if (!if_is_up)
1099             {
1100                 /*
1101                  * If the interface is not up there is no reason to walk
1102                  * back to children. if we did they would only evalute
1103                  * that this path is unresolved and hence it would
1104                  * not contribute the adjacency - so it would be wasted
1105                  * CPU time.
1106                  */
1107                 return (FIB_NODE_BACK_WALK_CONTINUE);
1108             }
1109         }
1110         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1111         {
1112             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1113             {
1114                 /*
1115                  * alreday unresolved. no need to walk back again
1116                  */
1117                 return (FIB_NODE_BACK_WALK_CONTINUE);
1118             }
1119             /*
1120              * the adj has gone down. the path is no longer resolved.
1121              */
1122             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1123         }
1124         break;
1125     case FIB_PATH_TYPE_ATTACHED:
1126     case FIB_PATH_TYPE_DVR:
1127         /*
1128          * FIXME; this could schedule a lower priority walk, since attached
1129          * routes are not usually in ECMP configurations so the backwalk to
1130          * the FIB entry does not need to be high priority
1131          */
1132         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1133         {
1134             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1135         }
1136         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1137         {
1138             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1139         }
1140         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1141         {
1142             fib_path_unresolve(path);
1143             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1144         }
1145         break;
1146     case FIB_PATH_TYPE_UDP_ENCAP:
1147     {
1148         dpo_id_t via_dpo = DPO_INVALID;
1149
1150         /*
1151          * hope for the best - clear if restrictions apply.
1152          */
1153         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1154
1155         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1156                                         path->fp_nh_proto,
1157                                         &via_dpo);
1158         /*
1159          * If this path is contributing a drop, then it's not resolved
1160          */
1161         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1162         {
1163             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1164         }
1165
1166         /*
1167          * update the path's contributed DPO
1168          */
1169         dpo_copy(&path->fp_dpo, &via_dpo);
1170         dpo_reset(&via_dpo);
1171         break;
1172     }
1173     case FIB_PATH_TYPE_INTF_RX:
1174         ASSERT(0);
1175     case FIB_PATH_TYPE_DEAG:
1176         /*
1177          * FIXME When VRF delete is allowed this will need a poke.
1178          */
1179     case FIB_PATH_TYPE_SPECIAL:
1180     case FIB_PATH_TYPE_RECEIVE:
1181     case FIB_PATH_TYPE_EXCLUSIVE:
1182     case FIB_PATH_TYPE_BIER_TABLE:
1183     case FIB_PATH_TYPE_BIER_IMP:
1184         /*
1185          * these path types have no parents. so to be
1186          * walked from one is unexpected.
1187          */
1188         ASSERT(0);
1189         break;
1190     }
1191
1192     /*
1193      * propagate the backwalk further to the path-list
1194      */
1195     fib_path_list_back_walk(path->fp_pl_index, ctx);
1196
1197     return (FIB_NODE_BACK_WALK_CONTINUE);
1198 }
1199
1200 static void
1201 fib_path_memory_show (void)
1202 {
1203     fib_show_memory_usage("Path",
1204                           pool_elts(fib_path_pool),
1205                           pool_len(fib_path_pool),
1206                           sizeof(fib_path_t));
1207 }
1208
1209 /*
1210  * The FIB path's graph node virtual function table
1211  */
1212 static const fib_node_vft_t fib_path_vft = {
1213     .fnv_get = fib_path_get_node,
1214     .fnv_last_lock = fib_path_last_lock_gone,
1215     .fnv_back_walk = fib_path_back_walk_notify,
1216     .fnv_mem_show = fib_path_memory_show,
1217 };
1218
1219 static fib_path_cfg_flags_t
1220 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1221 {
1222     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1223
1224     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1225         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1226     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1227         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1228     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1229         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1230     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1231         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1232     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1233         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1234     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1235         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1236     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1237         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1238     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1239         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1240     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1241         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1242
1243     return (cfg_flags);
1244 }
1245
1246 /*
1247  * fib_path_create
1248  *
1249  * Create and initialise a new path object.
1250  * return the index of the path.
1251  */
1252 fib_node_index_t
1253 fib_path_create (fib_node_index_t pl_index,
1254                  const fib_route_path_t *rpath)
1255 {
1256     fib_path_t *path;
1257
1258     pool_get(fib_path_pool, path);
1259     clib_memset(path, 0, sizeof(*path));
1260
1261     fib_node_init(&path->fp_node,
1262                   FIB_NODE_TYPE_PATH);
1263
1264     dpo_reset(&path->fp_dpo);
1265     path->fp_pl_index = pl_index;
1266     path->fp_nh_proto = rpath->frp_proto;
1267     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1268     path->fp_weight = rpath->frp_weight;
1269     if (0 == path->fp_weight)
1270     {
1271         /*
1272          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1273          * clients to always use 1, or we can accept it and fixup approrpiately.
1274          */
1275         path->fp_weight = 1;
1276     }
1277     path->fp_preference = rpath->frp_preference;
1278     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1279
1280     /*
1281      * deduce the path's tpye from the parementers and save what is needed.
1282      */
1283     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1284     {
1285         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1286         path->receive.fp_interface = rpath->frp_sw_if_index;
1287         path->receive.fp_addr = rpath->frp_addr;
1288     }
1289     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1290     {
1291         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1292         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1293     }
1294     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1295     {
1296         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1297         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1298     }
1299     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1300     {
1301         path->fp_type = FIB_PATH_TYPE_DEAG;
1302         path->deag.fp_tbl_id = rpath->frp_fib_index;
1303         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1304     }
1305     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1306     {
1307         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1308         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1309     }
1310     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1311     {
1312         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1313         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1314     }
1315     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1316     {
1317         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1318         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1319     }
1320     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1321     {
1322         path->fp_type = FIB_PATH_TYPE_DEAG;
1323         path->deag.fp_tbl_id = rpath->frp_fib_index;
1324     }
1325     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1326     {
1327         path->fp_type = FIB_PATH_TYPE_DVR;
1328         path->dvr.fp_interface = rpath->frp_sw_if_index;
1329     }
1330     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1331     {
1332         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1333         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1334     }
1335     else if (~0 != rpath->frp_sw_if_index)
1336     {
1337         if (ip46_address_is_zero(&rpath->frp_addr))
1338         {
1339             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1340             path->attached.fp_interface = rpath->frp_sw_if_index;
1341         }
1342         else
1343         {
1344             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1345             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1346             path->attached_next_hop.fp_nh = rpath->frp_addr;
1347         }
1348     }
1349     else
1350     {
1351         if (ip46_address_is_zero(&rpath->frp_addr))
1352         {
1353             if (~0 == rpath->frp_fib_index)
1354             {
1355                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1356             }
1357             else
1358             {
1359                 path->fp_type = FIB_PATH_TYPE_DEAG;
1360                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1361                 path->deag.fp_rpf_id = ~0;
1362             }
1363         }
1364         else
1365         {
1366             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1367             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1368             {
1369                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1370                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1371             }
1372             else
1373             {
1374                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1375             }
1376             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1377         }
1378     }
1379
1380     FIB_PATH_DBG(path, "create");
1381
1382     return (fib_path_get_index(path));
1383 }
1384
1385 /*
1386  * fib_path_create_special
1387  *
1388  * Create and initialise a new path object.
1389  * return the index of the path.
1390  */
1391 fib_node_index_t
1392 fib_path_create_special (fib_node_index_t pl_index,
1393                          dpo_proto_t nh_proto,
1394                          fib_path_cfg_flags_t flags,
1395                          const dpo_id_t *dpo)
1396 {
1397     fib_path_t *path;
1398
1399     pool_get(fib_path_pool, path);
1400     clib_memset(path, 0, sizeof(*path));
1401
1402     fib_node_init(&path->fp_node,
1403                   FIB_NODE_TYPE_PATH);
1404     dpo_reset(&path->fp_dpo);
1405
1406     path->fp_pl_index = pl_index;
1407     path->fp_weight = 1;
1408     path->fp_preference = 0;
1409     path->fp_nh_proto = nh_proto;
1410     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1411     path->fp_cfg_flags = flags;
1412
1413     if (FIB_PATH_CFG_FLAG_DROP & flags)
1414     {
1415         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1416     }
1417     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1418     {
1419         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1420         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1421     }
1422     else
1423     {
1424         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1425         ASSERT(NULL != dpo);
1426         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1427     }
1428
1429     return (fib_path_get_index(path));
1430 }
1431
1432 /*
1433  * fib_path_copy
1434  *
1435  * Copy a path. return index of new path.
1436  */
1437 fib_node_index_t
1438 fib_path_copy (fib_node_index_t path_index,
1439                fib_node_index_t path_list_index)
1440 {
1441     fib_path_t *path, *orig_path;
1442
1443     pool_get(fib_path_pool, path);
1444
1445     orig_path = fib_path_get(path_index);
1446     ASSERT(NULL != orig_path);
1447
1448     memcpy(path, orig_path, sizeof(*path));
1449
1450     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1451
1452     /*
1453      * reset the dynamic section
1454      */
1455     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1456     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1457     path->fp_pl_index  = path_list_index;
1458     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1459     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1460     dpo_reset(&path->fp_dpo);
1461
1462     return (fib_path_get_index(path));
1463 }
1464
1465 /*
1466  * fib_path_destroy
1467  *
1468  * destroy a path that is no longer required
1469  */
1470 void
1471 fib_path_destroy (fib_node_index_t path_index)
1472 {
1473     fib_path_t *path;
1474
1475     path = fib_path_get(path_index);
1476
1477     ASSERT(NULL != path);
1478     FIB_PATH_DBG(path, "destroy");
1479
1480     fib_path_unresolve(path);
1481
1482     fib_node_deinit(&path->fp_node);
1483     pool_put(fib_path_pool, path);
1484 }
1485
1486 /*
1487  * fib_path_destroy
1488  *
1489  * destroy a path that is no longer required
1490  */
1491 uword
1492 fib_path_hash (fib_node_index_t path_index)
1493 {
1494     fib_path_t *path;
1495
1496     path = fib_path_get(path_index);
1497
1498     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1499                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1500                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1501                         0));
1502 }
1503
1504 /*
1505  * fib_path_cmp_i
1506  *
1507  * Compare two paths for equivalence.
1508  */
1509 static int
1510 fib_path_cmp_i (const fib_path_t *path1,
1511                 const fib_path_t *path2)
1512 {
1513     int res;
1514
1515     res = 1;
1516
1517     /*
1518      * paths of different types and protocol are not equal.
1519      * different weights and/or preference only are the same path.
1520      */
1521     if (path1->fp_type != path2->fp_type)
1522     {
1523         res = (path1->fp_type - path2->fp_type);
1524     }
1525     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1526     {
1527         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1528     }
1529     else
1530     {
1531         /*
1532          * both paths are of the same type.
1533          * consider each type and its attributes in turn.
1534          */
1535         switch (path1->fp_type)
1536         {
1537         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1538             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1539                                    &path2->attached_next_hop.fp_nh);
1540             if (0 == res) {
1541                 res = (path1->attached_next_hop.fp_interface -
1542                        path2->attached_next_hop.fp_interface);
1543             }
1544             break;
1545         case FIB_PATH_TYPE_ATTACHED:
1546             res = (path1->attached.fp_interface -
1547                    path2->attached.fp_interface);
1548             break;
1549         case FIB_PATH_TYPE_RECURSIVE:
1550             res = ip46_address_cmp(&path1->recursive.fp_nh,
1551                                    &path2->recursive.fp_nh);
1552  
1553             if (0 == res)
1554             {
1555                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1556             }
1557             break;
1558         case FIB_PATH_TYPE_BIER_FMASK:
1559             res = (path1->bier_fmask.fp_bier_fmask -
1560                    path2->bier_fmask.fp_bier_fmask);
1561             break;
1562         case FIB_PATH_TYPE_BIER_IMP:
1563             res = (path1->bier_imp.fp_bier_imp -
1564                    path2->bier_imp.fp_bier_imp);
1565             break;
1566         case FIB_PATH_TYPE_BIER_TABLE:
1567             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1568                                     &path2->bier_table.fp_bier_tbl);
1569             break;
1570         case FIB_PATH_TYPE_DEAG:
1571             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1572             if (0 == res)
1573             {
1574                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1575             }
1576             break;
1577         case FIB_PATH_TYPE_INTF_RX:
1578             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1579             break;
1580         case FIB_PATH_TYPE_UDP_ENCAP:
1581             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1582             break;
1583         case FIB_PATH_TYPE_DVR:
1584             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1585             break;
1586         case FIB_PATH_TYPE_EXCLUSIVE:
1587             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1588             break;
1589         case FIB_PATH_TYPE_SPECIAL:
1590         case FIB_PATH_TYPE_RECEIVE:
1591             res = 0;
1592             break;
1593         }
1594     }
1595     return (res);
1596 }
1597
1598 /*
1599  * fib_path_cmp_for_sort
1600  *
1601  * Compare two paths for equivalence. Used during path sorting.
1602  * As usual 0 means equal.
1603  */
1604 int
1605 fib_path_cmp_for_sort (void * v1,
1606                        void * v2)
1607 {
1608     fib_node_index_t *pi1 = v1, *pi2 = v2;
1609     fib_path_t *path1, *path2;
1610
1611     path1 = fib_path_get(*pi1);
1612     path2 = fib_path_get(*pi2);
1613
1614     /*
1615      * when sorting paths we want the highest preference paths
1616      * first, so that the choices set built is in prefernce order
1617      */
1618     if (path1->fp_preference != path2->fp_preference)
1619     {
1620         return (path1->fp_preference - path2->fp_preference);
1621     }
1622
1623     return (fib_path_cmp_i(path1, path2));
1624 }
1625
1626 /*
1627  * fib_path_cmp
1628  *
1629  * Compare two paths for equivalence.
1630  */
1631 int
1632 fib_path_cmp (fib_node_index_t pi1,
1633               fib_node_index_t pi2)
1634 {
1635     fib_path_t *path1, *path2;
1636
1637     path1 = fib_path_get(pi1);
1638     path2 = fib_path_get(pi2);
1639
1640     return (fib_path_cmp_i(path1, path2));
1641 }
1642
1643 int
1644 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1645                            const fib_route_path_t *rpath)
1646 {
1647     fib_path_t *path;
1648     int res;
1649
1650     path = fib_path_get(path_index);
1651
1652     res = 1;
1653
1654     if (path->fp_weight != rpath->frp_weight)
1655     {
1656         res = (path->fp_weight - rpath->frp_weight);
1657     }
1658     else
1659     {
1660         /*
1661          * both paths are of the same type.
1662          * consider each type and its attributes in turn.
1663          */
1664         switch (path->fp_type)
1665         {
1666         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1667             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1668                                    &rpath->frp_addr);
1669             if (0 == res)
1670             {
1671                 res = (path->attached_next_hop.fp_interface -
1672                        rpath->frp_sw_if_index);
1673             }
1674             break;
1675         case FIB_PATH_TYPE_ATTACHED:
1676             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1677             break;
1678         case FIB_PATH_TYPE_RECURSIVE:
1679             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1680             {
1681                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1682
1683                 if (res == 0)
1684                 {
1685                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1686                 }
1687             }
1688             else
1689             {
1690                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1691                                        &rpath->frp_addr);
1692             }
1693
1694             if (0 == res)
1695             {
1696                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1697             }
1698             break;
1699         case FIB_PATH_TYPE_BIER_FMASK:
1700             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1701             break;
1702         case FIB_PATH_TYPE_BIER_IMP:
1703             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1704             break;
1705         case FIB_PATH_TYPE_BIER_TABLE:
1706             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1707                                     &rpath->frp_bier_tbl);
1708             break;
1709         case FIB_PATH_TYPE_INTF_RX:
1710             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1711             break;
1712         case FIB_PATH_TYPE_UDP_ENCAP:
1713             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1714             break;
1715         case FIB_PATH_TYPE_DEAG:
1716             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1717             if (0 == res)
1718             {
1719                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1720             }
1721             break;
1722         case FIB_PATH_TYPE_DVR:
1723             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1724             break;
1725         case FIB_PATH_TYPE_EXCLUSIVE:
1726             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1727             break;
1728         case FIB_PATH_TYPE_SPECIAL:
1729         case FIB_PATH_TYPE_RECEIVE:
1730             res = 0;
1731             break;
1732         }
1733     }
1734     return (res);
1735 }
1736
1737 /*
1738  * fib_path_recursive_loop_detect
1739  *
1740  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1741  * walk is initiated when an entry is linking to a new path list or from an old.
1742  * The entry vector passed contains all the FIB entrys that are children of this
1743  * path (it is all the entries encountered on the walk so far). If this vector
1744  * contains the entry this path resolve via, then a loop is about to form.
1745  * The loop must be allowed to form, since we need the dependencies in place
1746  * so that we can track when the loop breaks.
1747  * However, we MUST not produce a loop in the forwarding graph (else packets
1748  * would loop around the switch path until the loop breaks), so we mark recursive
1749  * paths as looped so that they do not contribute forwarding information.
1750  * By marking the path as looped, an etry such as;
1751  *    X/Y
1752  *     via a.a.a.a (looped)
1753  *     via b.b.b.b (not looped)
1754  * can still forward using the info provided by b.b.b.b only
1755  */
1756 int
1757 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1758                                 fib_node_index_t **entry_indicies)
1759 {
1760     fib_path_t *path;
1761
1762     path = fib_path_get(path_index);
1763
1764     /*
1765      * the forced drop path is never looped, cos it is never resolved.
1766      */
1767     if (fib_path_is_permanent_drop(path))
1768     {
1769         return (0);
1770     }
1771
1772     switch (path->fp_type)
1773     {
1774     case FIB_PATH_TYPE_RECURSIVE:
1775     {
1776         fib_node_index_t *entry_index, *entries;
1777         int looped = 0;
1778         entries = *entry_indicies;
1779
1780         vec_foreach(entry_index, entries) {
1781             if (*entry_index == path->fp_via_fib)
1782             {
1783                 /*
1784                  * the entry that is about to link to this path-list (or
1785                  * one of this path-list's children) is the same entry that
1786                  * this recursive path resolves through. this is a cycle.
1787                  * abort the walk.
1788                  */
1789                 looped = 1;
1790                 break;
1791             }
1792         }
1793
1794         if (looped)
1795         {
1796             FIB_PATH_DBG(path, "recursive loop formed");
1797             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1798
1799             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1800         }
1801         else
1802         {
1803             /*
1804              * no loop here yet. keep forward walking the graph.
1805              */
1806             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1807             {
1808                 FIB_PATH_DBG(path, "recursive loop formed");
1809                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1810             }
1811             else
1812             {
1813                 FIB_PATH_DBG(path, "recursive loop cleared");
1814                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1815             }
1816         }
1817         break;
1818     }
1819     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1820     case FIB_PATH_TYPE_ATTACHED:
1821         if (adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1822                                       entry_indicies))
1823         {
1824             FIB_PATH_DBG(path, "recursive loop formed");
1825             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1826         }
1827         else
1828         {
1829             FIB_PATH_DBG(path, "recursive loop cleared");
1830             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1831         }
1832         break;
1833     case FIB_PATH_TYPE_SPECIAL:
1834     case FIB_PATH_TYPE_DEAG:
1835     case FIB_PATH_TYPE_DVR:
1836     case FIB_PATH_TYPE_RECEIVE:
1837     case FIB_PATH_TYPE_INTF_RX:
1838     case FIB_PATH_TYPE_UDP_ENCAP:
1839     case FIB_PATH_TYPE_EXCLUSIVE:
1840     case FIB_PATH_TYPE_BIER_FMASK:
1841     case FIB_PATH_TYPE_BIER_TABLE:
1842     case FIB_PATH_TYPE_BIER_IMP:
1843         /*
1844          * these path types cannot be part of a loop, since they are the leaves
1845          * of the graph.
1846          */
1847         break;
1848     }
1849
1850     return (fib_path_is_looped(path_index));
1851 }
1852
1853 int
1854 fib_path_resolve (fib_node_index_t path_index)
1855 {
1856     fib_path_t *path;
1857
1858     path = fib_path_get(path_index);
1859
1860     /*
1861      * hope for the best.
1862      */
1863     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1864
1865     /*
1866      * the forced drop path resolves via the drop adj
1867      */
1868     if (fib_path_is_permanent_drop(path))
1869     {
1870         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1871         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1872         return (fib_path_is_resolved(path_index));
1873     }
1874
1875     switch (path->fp_type)
1876     {
1877     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1878         fib_path_attached_next_hop_set(path);
1879         break;
1880     case FIB_PATH_TYPE_ATTACHED:
1881         /*
1882          * path->attached.fp_interface
1883          */
1884         if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1885                                            path->attached.fp_interface))
1886         {
1887             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1888         }
1889         dpo_set(&path->fp_dpo,
1890                 DPO_ADJACENCY,
1891                 path->fp_nh_proto,
1892                 fib_path_attached_get_adj(path,
1893                                           dpo_proto_to_link(path->fp_nh_proto)));
1894
1895         /*
1896          * become a child of the adjacency so we receive updates
1897          * when the interface state changes
1898          */
1899         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1900                                          FIB_NODE_TYPE_PATH,
1901                                          fib_path_get_index(path));
1902         break;
1903     case FIB_PATH_TYPE_RECURSIVE:
1904     {
1905         /*
1906          * Create a RR source entry in the table for the address
1907          * that this path recurses through.
1908          * This resolve action is recursive, hence we may create
1909          * more paths in the process. more creates mean maybe realloc
1910          * of this path.
1911          */
1912         fib_node_index_t fei;
1913         fib_prefix_t pfx;
1914
1915         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1916
1917         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1918         {
1919             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1920                                        path->recursive.fp_nh.fp_eos,
1921                                        &pfx);
1922         }
1923         else
1924         {
1925             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1926         }
1927
1928         fib_table_lock(path->recursive.fp_tbl_id,
1929                        dpo_proto_to_fib(path->fp_nh_proto),
1930                        FIB_SOURCE_RR);
1931         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1932                                           &pfx,
1933                                           FIB_SOURCE_RR,
1934                                           FIB_ENTRY_FLAG_NONE);
1935
1936         path = fib_path_get(path_index);
1937         path->fp_via_fib = fei;
1938
1939         /*
1940          * become a dependent child of the entry so the path is 
1941          * informed when the forwarding for the entry changes.
1942          */
1943         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1944                                                FIB_NODE_TYPE_PATH,
1945                                                fib_path_get_index(path));
1946
1947         /*
1948          * create and configure the IP DPO
1949          */
1950         fib_path_recursive_adj_update(
1951             path,
1952             fib_path_to_chain_type(path),
1953             &path->fp_dpo);
1954
1955         break;
1956     }
1957     case FIB_PATH_TYPE_BIER_FMASK:
1958     {
1959         /*
1960          * become a dependent child of the entry so the path is
1961          * informed when the forwarding for the entry changes.
1962          */
1963         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
1964                                                 FIB_NODE_TYPE_PATH,
1965                                                 fib_path_get_index(path));
1966
1967         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
1968         fib_path_bier_fmask_update(path, &path->fp_dpo);
1969
1970         break;
1971     }
1972     case FIB_PATH_TYPE_BIER_IMP:
1973         bier_imp_lock(path->bier_imp.fp_bier_imp);
1974         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
1975                                        DPO_PROTO_IP4,
1976                                        &path->fp_dpo);
1977         break;
1978     case FIB_PATH_TYPE_BIER_TABLE:
1979     {
1980         /*
1981          * Find/create the BIER table to link to
1982          */
1983         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
1984
1985         path->fp_via_bier_tbl =
1986             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
1987
1988         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
1989                                          &path->fp_dpo);
1990         break;
1991     }
1992     case FIB_PATH_TYPE_SPECIAL:
1993         /*
1994          * Resolve via the drop
1995          */
1996         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1997         break;
1998     case FIB_PATH_TYPE_DEAG:
1999     {
2000         if (DPO_PROTO_BIER == path->fp_nh_proto)
2001         {
2002             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2003                                                   &path->fp_dpo);
2004         }
2005         else
2006         {
2007             /*
2008              * Resolve via a lookup DPO.
2009              * FIXME. control plane should add routes with a table ID
2010              */
2011             lookup_input_t input;
2012             lookup_cast_t cast;
2013
2014             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2015                     LOOKUP_MULTICAST :
2016                     LOOKUP_UNICAST);
2017             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2018                      LOOKUP_INPUT_SRC_ADDR :
2019                      LOOKUP_INPUT_DST_ADDR);
2020
2021             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2022                                                path->fp_nh_proto,
2023                                                cast,
2024                                                input,
2025                                                LOOKUP_TABLE_FROM_CONFIG,
2026                                                &path->fp_dpo);
2027         }
2028         break;
2029     }
2030     case FIB_PATH_TYPE_DVR:
2031         dvr_dpo_add_or_lock(path->attached.fp_interface,
2032                             path->fp_nh_proto,
2033                             &path->fp_dpo);
2034         break;
2035     case FIB_PATH_TYPE_RECEIVE:
2036         /*
2037          * Resolve via a receive DPO.
2038          */
2039         receive_dpo_add_or_lock(path->fp_nh_proto,
2040                                 path->receive.fp_interface,
2041                                 &path->receive.fp_addr,
2042                                 &path->fp_dpo);
2043         break;
2044     case FIB_PATH_TYPE_UDP_ENCAP:
2045         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2046         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2047                                         path->fp_nh_proto,
2048                                         &path->fp_dpo);
2049         break;
2050     case FIB_PATH_TYPE_INTF_RX: {
2051         /*
2052          * Resolve via a receive DPO.
2053          */
2054         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2055                                      path->intf_rx.fp_interface,
2056                                      &path->fp_dpo);
2057         break;
2058     }
2059     case FIB_PATH_TYPE_EXCLUSIVE:
2060         /*
2061          * Resolve via the user provided DPO
2062          */
2063         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2064         break;
2065     }
2066
2067     return (fib_path_is_resolved(path_index));
2068 }
2069
2070 u32
2071 fib_path_get_resolving_interface (fib_node_index_t path_index)
2072 {
2073     fib_path_t *path;
2074
2075     path = fib_path_get(path_index);
2076
2077     switch (path->fp_type)
2078     {
2079     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2080         return (path->attached_next_hop.fp_interface);
2081     case FIB_PATH_TYPE_ATTACHED:
2082         return (path->attached.fp_interface);
2083     case FIB_PATH_TYPE_RECEIVE:
2084         return (path->receive.fp_interface);
2085     case FIB_PATH_TYPE_RECURSIVE:
2086         if (fib_path_is_resolved(path_index))
2087         {
2088             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2089         }
2090         break;
2091     case FIB_PATH_TYPE_DVR:
2092         return (path->dvr.fp_interface);
2093     case FIB_PATH_TYPE_INTF_RX:
2094     case FIB_PATH_TYPE_UDP_ENCAP:
2095     case FIB_PATH_TYPE_SPECIAL:
2096     case FIB_PATH_TYPE_DEAG:
2097     case FIB_PATH_TYPE_EXCLUSIVE:
2098     case FIB_PATH_TYPE_BIER_FMASK:
2099     case FIB_PATH_TYPE_BIER_TABLE:
2100     case FIB_PATH_TYPE_BIER_IMP:
2101         break;
2102     }
2103     return (dpo_get_urpf(&path->fp_dpo));
2104 }
2105
2106 index_t
2107 fib_path_get_resolving_index (fib_node_index_t path_index)
2108 {
2109     fib_path_t *path;
2110
2111     path = fib_path_get(path_index);
2112
2113     switch (path->fp_type)
2114     {
2115     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2116     case FIB_PATH_TYPE_ATTACHED:
2117     case FIB_PATH_TYPE_RECEIVE:
2118     case FIB_PATH_TYPE_INTF_RX:
2119     case FIB_PATH_TYPE_SPECIAL:
2120     case FIB_PATH_TYPE_DEAG:
2121     case FIB_PATH_TYPE_DVR:
2122     case FIB_PATH_TYPE_EXCLUSIVE:
2123         break;
2124     case FIB_PATH_TYPE_UDP_ENCAP:
2125         return (path->udp_encap.fp_udp_encap_id);
2126     case FIB_PATH_TYPE_RECURSIVE:
2127         return (path->fp_via_fib);
2128     case FIB_PATH_TYPE_BIER_FMASK:
2129         return (path->bier_fmask.fp_bier_fmask);
2130    case FIB_PATH_TYPE_BIER_TABLE:
2131        return (path->fp_via_bier_tbl);
2132    case FIB_PATH_TYPE_BIER_IMP:
2133        return (path->bier_imp.fp_bier_imp);
2134     }
2135     return (~0);
2136 }
2137
2138 adj_index_t
2139 fib_path_get_adj (fib_node_index_t path_index)
2140 {
2141     fib_path_t *path;
2142
2143     path = fib_path_get(path_index);
2144
2145     ASSERT(dpo_is_adj(&path->fp_dpo));
2146     if (dpo_is_adj(&path->fp_dpo))
2147     {
2148         return (path->fp_dpo.dpoi_index);
2149     }
2150     return (ADJ_INDEX_INVALID);
2151 }
2152
2153 u16
2154 fib_path_get_weight (fib_node_index_t path_index)
2155 {
2156     fib_path_t *path;
2157
2158     path = fib_path_get(path_index);
2159
2160     ASSERT(path);
2161
2162     return (path->fp_weight);
2163 }
2164
2165 u16
2166 fib_path_get_preference (fib_node_index_t path_index)
2167 {
2168     fib_path_t *path;
2169
2170     path = fib_path_get(path_index);
2171
2172     ASSERT(path);
2173
2174     return (path->fp_preference);
2175 }
2176
2177 u32
2178 fib_path_get_rpf_id (fib_node_index_t path_index)
2179 {
2180     fib_path_t *path;
2181
2182     path = fib_path_get(path_index);
2183
2184     ASSERT(path);
2185
2186     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2187     {
2188         return (path->deag.fp_rpf_id);
2189     }
2190
2191     return (~0);
2192 }
2193
2194 /**
2195  * @brief Contribute the path's adjacency to the list passed.
2196  * By calling this function over all paths, recursively, a child
2197  * can construct its full set of forwarding adjacencies, and hence its
2198  * uRPF list.
2199  */
2200 void
2201 fib_path_contribute_urpf (fib_node_index_t path_index,
2202                           index_t urpf)
2203 {
2204     fib_path_t *path;
2205
2206     path = fib_path_get(path_index);
2207
2208     /*
2209      * resolved and unresolved paths contribute to the RPF list.
2210      */
2211     switch (path->fp_type)
2212     {
2213     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2214         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2215         break;
2216
2217     case FIB_PATH_TYPE_ATTACHED:
2218         fib_urpf_list_append(urpf, path->attached.fp_interface);
2219         break;
2220
2221     case FIB_PATH_TYPE_RECURSIVE:
2222         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2223             !fib_path_is_looped(path_index))
2224         {
2225             /*
2226              * there's unresolved due to constraints, and there's unresolved
2227              * due to ain't got no via. can't do nowt w'out via.
2228              */
2229             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2230         }
2231         break;
2232
2233     case FIB_PATH_TYPE_EXCLUSIVE:
2234     case FIB_PATH_TYPE_SPECIAL:
2235     {
2236         /*
2237          * these path types may link to an adj, if that's what
2238          * the clinet gave
2239          */
2240         u32 rpf_sw_if_index;
2241
2242         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2243
2244         if (~0 != rpf_sw_if_index)
2245         {
2246             fib_urpf_list_append(urpf, rpf_sw_if_index);
2247         }
2248         break;
2249     }
2250     case FIB_PATH_TYPE_DVR:
2251         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2252         break;
2253     case FIB_PATH_TYPE_DEAG:
2254     case FIB_PATH_TYPE_RECEIVE:
2255     case FIB_PATH_TYPE_INTF_RX:
2256     case FIB_PATH_TYPE_UDP_ENCAP:
2257     case FIB_PATH_TYPE_BIER_FMASK:
2258     case FIB_PATH_TYPE_BIER_TABLE:
2259     case FIB_PATH_TYPE_BIER_IMP:
2260         /*
2261          * these path types don't link to an adj
2262          */
2263         break;
2264     }
2265 }
2266
2267 void
2268 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2269                           dpo_proto_t payload_proto,
2270                           fib_mpls_lsp_mode_t mode,
2271                           dpo_id_t *dpo)
2272 {
2273     fib_path_t *path;
2274
2275     path = fib_path_get(path_index);
2276
2277     ASSERT(path);
2278
2279     switch (path->fp_type)
2280     {
2281     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2282     {
2283         dpo_id_t tmp = DPO_INVALID;
2284
2285         dpo_copy(&tmp, dpo);
2286
2287         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2288         dpo_reset(&tmp);
2289         break;
2290     }                
2291     case FIB_PATH_TYPE_DEAG:
2292     {
2293         dpo_id_t tmp = DPO_INVALID;
2294
2295         dpo_copy(&tmp, dpo);
2296
2297         mpls_disp_dpo_create(payload_proto,
2298                              path->deag.fp_rpf_id,
2299                              mode, &tmp, dpo);
2300         dpo_reset(&tmp);
2301         break;
2302     }
2303     case FIB_PATH_TYPE_RECEIVE:
2304     case FIB_PATH_TYPE_ATTACHED:
2305     case FIB_PATH_TYPE_RECURSIVE:
2306     case FIB_PATH_TYPE_INTF_RX:
2307     case FIB_PATH_TYPE_UDP_ENCAP:
2308     case FIB_PATH_TYPE_EXCLUSIVE:
2309     case FIB_PATH_TYPE_SPECIAL:
2310     case FIB_PATH_TYPE_BIER_FMASK:
2311     case FIB_PATH_TYPE_BIER_TABLE:
2312     case FIB_PATH_TYPE_BIER_IMP:
2313     case FIB_PATH_TYPE_DVR:
2314         break;
2315     }
2316 }
2317
2318 void
2319 fib_path_contribute_forwarding (fib_node_index_t path_index,
2320                                 fib_forward_chain_type_t fct,
2321                                 dpo_id_t *dpo)
2322 {
2323     fib_path_t *path;
2324
2325     path = fib_path_get(path_index);
2326
2327     ASSERT(path);
2328     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2329
2330     /*
2331      * The DPO stored in the path was created when the path was resolved.
2332      * This then represents the path's 'native' protocol; IP.
2333      * For all others will need to go find something else.
2334      */
2335     if (fib_path_to_chain_type(path) == fct)
2336     {
2337         dpo_copy(dpo, &path->fp_dpo);
2338     }
2339     else
2340     {
2341         switch (path->fp_type)
2342         {
2343         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2344             switch (fct)
2345             {
2346             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2347             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2348             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2349             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2350             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2351             case FIB_FORW_CHAIN_TYPE_NSH:
2352             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2353             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2354             {
2355                 adj_index_t ai;
2356
2357                 /*
2358                  * get a appropriate link type adj.
2359                  */
2360                 ai = fib_path_attached_next_hop_get_adj(
2361                          path,
2362                          fib_forw_chain_type_to_link_type(fct));
2363                 dpo_set(dpo, DPO_ADJACENCY,
2364                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2365                 adj_unlock(ai);
2366
2367                 break;
2368             }
2369             case FIB_FORW_CHAIN_TYPE_BIER:
2370                 break;
2371             }
2372             break;
2373         case FIB_PATH_TYPE_RECURSIVE:
2374             switch (fct)
2375             {
2376             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2377             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2378             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2379             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2380             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2381             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2382             case FIB_FORW_CHAIN_TYPE_BIER:
2383                 fib_path_recursive_adj_update(path, fct, dpo);
2384                 break;
2385             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2386             case FIB_FORW_CHAIN_TYPE_NSH:
2387                 ASSERT(0);
2388                 break;
2389             }
2390             break;
2391         case FIB_PATH_TYPE_BIER_TABLE:
2392             switch (fct)
2393             {
2394             case FIB_FORW_CHAIN_TYPE_BIER:
2395                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2396                 break;
2397             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2398             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2399             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2400             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2401             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2402             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2403             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2404             case FIB_FORW_CHAIN_TYPE_NSH:
2405                 ASSERT(0);
2406                 break;
2407             }
2408             break;
2409         case FIB_PATH_TYPE_BIER_FMASK:
2410             switch (fct)
2411             {
2412             case FIB_FORW_CHAIN_TYPE_BIER:
2413                 fib_path_bier_fmask_update(path, dpo);
2414                 break;
2415             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2416             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2417             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2418             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2419             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2420             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2421             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2422             case FIB_FORW_CHAIN_TYPE_NSH:
2423                 ASSERT(0);
2424                 break;
2425             }
2426             break;
2427         case FIB_PATH_TYPE_BIER_IMP:
2428             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2429                                            fib_forw_chain_type_to_dpo_proto(fct),
2430                                            dpo);
2431             break;
2432         case FIB_PATH_TYPE_DEAG:
2433             switch (fct)
2434             {
2435             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2436                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2437                                                   DPO_PROTO_MPLS,
2438                                                   LOOKUP_UNICAST,
2439                                                   LOOKUP_INPUT_DST_ADDR,
2440                                                   LOOKUP_TABLE_FROM_CONFIG,
2441                                                   dpo);
2442                 break;
2443             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2444             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2445             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2446                 dpo_copy(dpo, &path->fp_dpo);
2447                 break;
2448             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2449             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2450             case FIB_FORW_CHAIN_TYPE_BIER:
2451                 break;
2452             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2453             case FIB_FORW_CHAIN_TYPE_NSH:
2454                 ASSERT(0);
2455                 break;
2456             }
2457             break;
2458         case FIB_PATH_TYPE_EXCLUSIVE:
2459             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2460             break;
2461         case FIB_PATH_TYPE_ATTACHED:
2462             switch (fct)
2463             {
2464             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2465             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2466             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2467             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2468             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2469             case FIB_FORW_CHAIN_TYPE_NSH:
2470             case FIB_FORW_CHAIN_TYPE_BIER:
2471                 {
2472                     adj_index_t ai;
2473
2474                     /*
2475                      * get a appropriate link type adj.
2476                      */
2477                     ai = fib_path_attached_get_adj(
2478                             path,
2479                             fib_forw_chain_type_to_link_type(fct));
2480                     dpo_set(dpo, DPO_ADJACENCY,
2481                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2482                     adj_unlock(ai);
2483                     break;
2484                 }
2485             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2486             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2487                 {
2488                     adj_index_t ai;
2489
2490                     /*
2491                      * Create the adj needed for sending IP multicast traffic
2492                      */
2493                     ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2494                                                fib_forw_chain_type_to_link_type(fct),
2495                                                path->attached.fp_interface);
2496                     dpo_set(dpo, DPO_ADJACENCY,
2497                             fib_forw_chain_type_to_dpo_proto(fct),
2498                             ai);
2499                     adj_unlock(ai);
2500                 }
2501                 break;
2502             }
2503             break;
2504         case FIB_PATH_TYPE_INTF_RX:
2505             /*
2506              * Create the adj needed for sending IP multicast traffic
2507              */
2508             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2509                                          path->attached.fp_interface,
2510                                          dpo);
2511             break;
2512         case FIB_PATH_TYPE_UDP_ENCAP:
2513             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2514                                             path->fp_nh_proto,
2515                                             dpo);
2516             break;
2517         case FIB_PATH_TYPE_RECEIVE:
2518         case FIB_PATH_TYPE_SPECIAL:
2519         case FIB_PATH_TYPE_DVR:
2520             dpo_copy(dpo, &path->fp_dpo);
2521             break;
2522         }
2523     }
2524 }
2525
2526 load_balance_path_t *
2527 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2528                                        fib_forward_chain_type_t fct,
2529                                        load_balance_path_t *hash_key)
2530 {
2531     load_balance_path_t *mnh;
2532     fib_path_t *path;
2533
2534     path = fib_path_get(path_index);
2535
2536     ASSERT(path);
2537
2538     vec_add2(hash_key, mnh, 1);
2539
2540     mnh->path_weight = path->fp_weight;
2541     mnh->path_index = path_index;
2542
2543     if (fib_path_is_resolved(path_index))
2544     {
2545         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2546     }
2547     else
2548     {
2549         dpo_copy(&mnh->path_dpo,
2550                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2551     }
2552     return (hash_key);
2553 }
2554
2555 int
2556 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2557 {
2558     fib_path_t *path;
2559
2560     path = fib_path_get(path_index);
2561
2562     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2563             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2564              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2565 }
2566
2567 int
2568 fib_path_is_exclusive (fib_node_index_t path_index)
2569 {
2570     fib_path_t *path;
2571
2572     path = fib_path_get(path_index);
2573
2574     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2575 }
2576
2577 int
2578 fib_path_is_deag (fib_node_index_t path_index)
2579 {
2580     fib_path_t *path;
2581
2582     path = fib_path_get(path_index);
2583
2584     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2585 }
2586
2587 int
2588 fib_path_is_resolved (fib_node_index_t path_index)
2589 {
2590     fib_path_t *path;
2591
2592     path = fib_path_get(path_index);
2593
2594     return (dpo_id_is_valid(&path->fp_dpo) &&
2595             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2596             !fib_path_is_looped(path_index) &&
2597             !fib_path_is_permanent_drop(path));
2598 }
2599
2600 int
2601 fib_path_is_looped (fib_node_index_t path_index)
2602 {
2603     fib_path_t *path;
2604
2605     path = fib_path_get(path_index);
2606
2607     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2608 }
2609
2610 fib_path_list_walk_rc_t
2611 fib_path_encode (fib_node_index_t path_list_index,
2612                  fib_node_index_t path_index,
2613                  void *ctx)
2614 {
2615     fib_route_path_encode_t **api_rpaths = ctx;
2616     fib_route_path_encode_t *api_rpath;
2617     fib_path_t *path;
2618
2619     path = fib_path_get(path_index);
2620     if (!path)
2621       return (FIB_PATH_LIST_WALK_CONTINUE);
2622     vec_add2(*api_rpaths, api_rpath, 1);
2623     api_rpath->rpath.frp_weight = path->fp_weight;
2624     api_rpath->rpath.frp_preference = path->fp_preference;
2625     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2626     api_rpath->rpath.frp_sw_if_index = ~0;
2627     api_rpath->rpath.frp_fib_index = 0;
2628     api_rpath->dpo = path->fp_dpo;
2629
2630     switch (path->fp_type)
2631       {
2632       case FIB_PATH_TYPE_RECEIVE:
2633         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2634         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2635         break;
2636       case FIB_PATH_TYPE_ATTACHED:
2637         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2638         break;
2639       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2640         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2641         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2642         break;
2643       case FIB_PATH_TYPE_BIER_FMASK:
2644         api_rpath->rpath.frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2645         break;
2646       case FIB_PATH_TYPE_SPECIAL:
2647         break;
2648       case FIB_PATH_TYPE_DEAG:
2649         api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2650         break;
2651       case FIB_PATH_TYPE_RECURSIVE:
2652         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2653         api_rpath->rpath.frp_fib_index = path->recursive.fp_tbl_id;
2654         break;
2655       case FIB_PATH_TYPE_DVR:
2656           api_rpath->rpath.frp_sw_if_index = path->dvr.fp_interface;
2657           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_DVR;
2658           break;
2659       case FIB_PATH_TYPE_UDP_ENCAP:
2660           api_rpath->rpath.frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2661           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2662           break;
2663       default:
2664         break;
2665       }
2666
2667     return (FIB_PATH_LIST_WALK_CONTINUE);
2668 }
2669
2670 dpo_proto_t
2671 fib_path_get_proto (fib_node_index_t path_index)
2672 {
2673     fib_path_t *path;
2674
2675     path = fib_path_get(path_index);
2676
2677     return (path->fp_nh_proto);
2678 }
2679
2680 void
2681 fib_path_module_init (void)
2682 {
2683     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2684     fib_path_logger = vlib_log_register_class ("fib", "path");
2685 }
2686
2687 static clib_error_t *
2688 show_fib_path_command (vlib_main_t * vm,
2689                         unformat_input_t * input,
2690                         vlib_cli_command_t * cmd)
2691 {
2692     fib_node_index_t pi;
2693     fib_path_t *path;
2694
2695     if (unformat (input, "%d", &pi))
2696     {
2697         /*
2698          * show one in detail
2699          */
2700         if (!pool_is_free_index(fib_path_pool, pi))
2701         {
2702             path = fib_path_get(pi);
2703             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2704                            FIB_PATH_FORMAT_FLAGS_NONE);
2705             s = format(s, "\n  children:");
2706             s = fib_node_children_format(path->fp_node.fn_children, s);
2707             vlib_cli_output (vm, "%s", s);
2708             vec_free(s);
2709         }
2710         else
2711         {
2712             vlib_cli_output (vm, "path %d invalid", pi);
2713         }
2714     }
2715     else
2716     {
2717         vlib_cli_output (vm, "FIB Paths");
2718         pool_foreach_index (pi, fib_path_pool,
2719         ({
2720             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2721                              FIB_PATH_FORMAT_FLAGS_NONE);
2722         }));
2723     }
2724
2725     return (NULL);
2726 }
2727
2728 VLIB_CLI_COMMAND (show_fib_path, static) = {
2729   .path = "show fib paths",
2730   .function = show_fib_path_command,
2731   .short_help = "show fib paths",
2732 };