FIB: encode the label stack in the FIB path during table dump
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/drop_dpo.h>
28
29 #include <vnet/adj/adj.h>
30 #include <vnet/adj/adj_mcast.h>
31
32 #include <vnet/fib/fib_path.h>
33 #include <vnet/fib/fib_node.h>
34 #include <vnet/fib/fib_table.h>
35 #include <vnet/fib/fib_entry.h>
36 #include <vnet/fib/fib_path_list.h>
37 #include <vnet/fib/fib_internal.h>
38 #include <vnet/fib/fib_urpf_list.h>
39 #include <vnet/fib/mpls_fib.h>
40 #include <vnet/fib/fib_path_ext.h>
41 #include <vnet/udp/udp_encap.h>
42 #include <vnet/bier/bier_fmask.h>
43 #include <vnet/bier/bier_table.h>
44 #include <vnet/bier/bier_imp.h>
45 #include <vnet/bier/bier_disp_table.h>
46
47 /**
48  * Enurmeration of path types
49  */
50 typedef enum fib_path_type_t_ {
51     /**
52      * Marker. Add new types after this one.
53      */
54     FIB_PATH_TYPE_FIRST = 0,
55     /**
56      * Attached-nexthop. An interface and a nexthop are known.
57      */
58     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
59     /**
60      * attached. Only the interface is known.
61      */
62     FIB_PATH_TYPE_ATTACHED,
63     /**
64      * recursive. Only the next-hop is known.
65      */
66     FIB_PATH_TYPE_RECURSIVE,
67     /**
68      * special. nothing is known. so we drop.
69      */
70     FIB_PATH_TYPE_SPECIAL,
71     /**
72      * exclusive. user provided adj.
73      */
74     FIB_PATH_TYPE_EXCLUSIVE,
75     /**
76      * deag. Link to a lookup adj in the next table
77      */
78     FIB_PATH_TYPE_DEAG,
79     /**
80      * interface receive.
81      */
82     FIB_PATH_TYPE_INTF_RX,
83     /**
84      * Path resolves via a UDP encap object.
85      */
86     FIB_PATH_TYPE_UDP_ENCAP,
87     /**
88      * receive. it's for-us.
89      */
90     FIB_PATH_TYPE_RECEIVE,
91     /**
92      * bier-imp. it's via a BIER imposition.
93      */
94     FIB_PATH_TYPE_BIER_IMP,
95     /**
96      * bier-fmask. it's via a BIER ECMP-table.
97      */
98     FIB_PATH_TYPE_BIER_TABLE,
99     /**
100      * bier-fmask. it's via a BIER f-mask.
101      */
102     FIB_PATH_TYPE_BIER_FMASK,
103     /**
104      * via a DVR.
105      */
106     FIB_PATH_TYPE_DVR,
107     /**
108      * Marker. Add new types before this one, then update it.
109      */
110     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
111 } __attribute__ ((packed)) fib_path_type_t;
112
113 /**
114  * The maximum number of path_types
115  */
116 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
117
118 #define FIB_PATH_TYPES {                                        \
119     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
120     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
121     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
122     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
123     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
124     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
125     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
126     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
127     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
128     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
129     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
130     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
131     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
132 }
133
134 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
135     for (_item = FIB_PATH_TYPE_FIRST;           \
136          _item <= FIB_PATH_TYPE_LAST;           \
137          _item++)
138
139 /**
140  * Enurmeration of path operational (i.e. derived) attributes
141  */
142 typedef enum fib_path_oper_attribute_t_ {
143     /**
144      * Marker. Add new types after this one.
145      */
146     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
147     /**
148      * The path forms part of a recursive loop.
149      */
150     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
151     /**
152      * The path is resolved
153      */
154     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
155     /**
156      * The path is attached, despite what the next-hop may say.
157      */
158     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
159     /**
160      * The path has become a permanent drop.
161      */
162     FIB_PATH_OPER_ATTRIBUTE_DROP,
163     /**
164      * Marker. Add new types before this one, then update it.
165      */
166     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
167 } __attribute__ ((packed)) fib_path_oper_attribute_t;
168
169 /**
170  * The maximum number of path operational attributes
171  */
172 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
173
174 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
175     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
176     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
177     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
178 }
179
180 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
181     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
182          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
183          _item++)
184
185 /**
186  * Path flags from the attributes
187  */
188 typedef enum fib_path_oper_flags_t_ {
189     FIB_PATH_OPER_FLAG_NONE = 0,
190     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
191     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
192     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
193     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
194 } __attribute__ ((packed)) fib_path_oper_flags_t;
195
196 /**
197  * A FIB path
198  */
199 typedef struct fib_path_t_ {
200     /**
201      * A path is a node in the FIB graph.
202      */
203     fib_node_t fp_node;
204
205     /**
206      * The index of the path-list to which this path belongs
207      */
208     u32 fp_pl_index;
209
210     /**
211      * This marks the start of the memory area used to hash
212      * the path
213      */
214     STRUCT_MARK(path_hash_start);
215
216     /**
217      * Configuration Flags
218      */
219     fib_path_cfg_flags_t fp_cfg_flags;
220
221     /**
222      * The type of the path. This is the selector for the union
223      */
224     fib_path_type_t fp_type;
225
226     /**
227      * The protocol of the next-hop, i.e. the address family of the
228      * next-hop's address. We can't derive this from the address itself
229      * since the address can be all zeros
230      */
231     dpo_proto_t fp_nh_proto;
232
233     /**
234      * UCMP [unnormalised] weigth
235      */
236     u8 fp_weight;
237
238     /**
239      * A path preference. 0 is the best.
240      * Only paths of the best preference, that are 'up', are considered
241      * for forwarding.
242      */
243     u8 fp_preference;
244
245     /**
246      * per-type union of the data required to resolve the path
247      */
248     union {
249         struct {
250             /**
251              * The next-hop
252              */
253             ip46_address_t fp_nh;
254             /**
255              * The interface
256              */
257             u32 fp_interface;
258         } attached_next_hop;
259         struct {
260             /**
261              * The interface
262              */
263             u32 fp_interface;
264         } attached;
265         struct {
266             union
267             {
268                 /**
269                  * The next-hop
270                  */
271                 ip46_address_t fp_ip;
272                 struct {
273                     /**
274                      * The local label to resolve through.
275                      */
276                     mpls_label_t fp_local_label;
277                     /**
278                      * The EOS bit of the resolving label
279                      */
280                     mpls_eos_bit_t fp_eos;
281                 };
282             } fp_nh;
283             union {
284                 /**
285                  * The FIB table index in which to find the next-hop.
286                  */
287                 fib_node_index_t fp_tbl_id;
288                 /**
289                  * The BIER FIB the fmask is in
290                  */
291                 index_t fp_bier_fib;
292             };
293         } recursive;
294         struct {
295             /**
296              * BIER FMask ID
297              */
298             index_t fp_bier_fmask;
299         } bier_fmask;
300         struct {
301             /**
302              * The BIER table's ID
303              */
304             bier_table_id_t fp_bier_tbl;
305         } bier_table;
306         struct {
307             /**
308              * The BIER imposition object
309              * this is part of the path's key, since the index_t
310              * of an imposition object is the object's key.
311              */
312             index_t fp_bier_imp;
313         } bier_imp;
314         struct {
315             /**
316              * The FIB index in which to perfom the next lookup
317              */
318             fib_node_index_t fp_tbl_id;
319             /**
320              * The RPF-ID to tag the packets with
321              */
322             fib_rpf_id_t fp_rpf_id;
323         } deag;
324         struct {
325         } special;
326         struct {
327             /**
328              * The user provided 'exclusive' DPO
329              */
330             dpo_id_t fp_ex_dpo;
331         } exclusive;
332         struct {
333             /**
334              * The interface on which the local address is configured
335              */
336             u32 fp_interface;
337             /**
338              * The next-hop
339              */
340             ip46_address_t fp_addr;
341         } receive;
342         struct {
343             /**
344              * The interface on which the packets will be input.
345              */
346             u32 fp_interface;
347         } intf_rx;
348         struct {
349             /**
350              * The UDP Encap object this path resolves through
351              */
352             u32 fp_udp_encap_id;
353         } udp_encap;
354         struct {
355             /**
356              * The interface
357              */
358             u32 fp_interface;
359         } dvr;
360     };
361     STRUCT_MARK(path_hash_end);
362
363     /**
364      * Memebers in this last section represent information that is
365      * dervied during resolution. It should not be copied to new paths
366      * nor compared.
367      */
368
369     /**
370      * Operational Flags
371      */
372     fib_path_oper_flags_t fp_oper_flags;
373
374     union {
375         /**
376          * the resolving via fib. not part of the union, since it it not part
377          * of the path's hash.
378          */
379         fib_node_index_t fp_via_fib;
380         /**
381          * the resolving bier-table
382          */
383         index_t fp_via_bier_tbl;
384         /**
385          * the resolving bier-fmask
386          */
387         index_t fp_via_bier_fmask;
388     };
389
390     /**
391      * The Data-path objects through which this path resolves for IP.
392      */
393     dpo_id_t fp_dpo;
394
395     /**
396      * the index of this path in the parent's child list.
397      */
398     u32 fp_sibling;
399 } fib_path_t;
400
401 /*
402  * Array of strings/names for the path types and attributes
403  */
404 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
405 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
406 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
407
408 /*
409  * The memory pool from which we allocate all the paths
410  */
411 static fib_path_t *fib_path_pool;
412
413 /**
414  * the logger
415  */
416 vlib_log_class_t fib_path_logger;
417
418 /*
419  * Debug macro
420  */
421 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
422 {                                                                       \
423     vlib_log_debug (fib_path_logger,                                    \
424                     "[%U]: " _fmt,                                      \
425                     format_fib_path, fib_path_get_index(_p), 0,         \
426                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
427                     ##_args);                                           \
428 }
429
430 static fib_path_t *
431 fib_path_get (fib_node_index_t index)
432 {
433     return (pool_elt_at_index(fib_path_pool, index));
434 }
435
436 static fib_node_index_t 
437 fib_path_get_index (fib_path_t *path)
438 {
439     return (path - fib_path_pool);
440 }
441
442 static fib_node_t *
443 fib_path_get_node (fib_node_index_t index)
444 {
445     return ((fib_node_t*)fib_path_get(index));
446 }
447
448 static fib_path_t*
449 fib_path_from_fib_node (fib_node_t *node)
450 {
451     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
452     return ((fib_path_t*)node);
453 }
454
455 u8 *
456 format_fib_path (u8 * s, va_list * args)
457 {
458     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
459     u32 indent = va_arg (*args, u32);
460     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
461     vnet_main_t * vnm = vnet_get_main();
462     fib_path_oper_attribute_t oattr;
463     fib_path_cfg_attribute_t cattr;
464     fib_path_t *path;
465     const char *eol;
466
467     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
468     {
469         eol = "";
470     }
471     else
472     {
473         eol = "\n";
474     }
475
476     path = fib_path_get(path_index);
477
478     s = format (s, "%Upath:[%d] ", format_white_space, indent,
479                 fib_path_get_index(path));
480     s = format (s, "pl-index:%d ", path->fp_pl_index);
481     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
482     s = format (s, "weight=%d ", path->fp_weight);
483     s = format (s, "pref=%d ", path->fp_preference);
484     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
485     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
486         s = format(s, " oper-flags:");
487         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
488             if ((1<<oattr) & path->fp_oper_flags) {
489                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
490             }
491         }
492     }
493     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
494         s = format(s, " cfg-flags:");
495         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
496             if ((1<<cattr) & path->fp_cfg_flags) {
497                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
498             }
499         }
500     }
501     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
502         s = format(s, "\n%U", format_white_space, indent+2);
503
504     switch (path->fp_type)
505     {
506     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
507         s = format (s, "%U", format_ip46_address,
508                     &path->attached_next_hop.fp_nh,
509                     IP46_TYPE_ANY);
510         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
511         {
512             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
513         }
514         else
515         {
516             s = format (s, " %U",
517                         format_vnet_sw_interface_name,
518                         vnm,
519                         vnet_get_sw_interface(
520                             vnm,
521                             path->attached_next_hop.fp_interface));
522             if (vnet_sw_interface_is_p2p(vnet_get_main(),
523                                          path->attached_next_hop.fp_interface))
524             {
525                 s = format (s, " (p2p)");
526             }
527         }
528         if (!dpo_id_is_valid(&path->fp_dpo))
529         {
530             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
531         }
532         else
533         {
534             s = format(s, "%s%U%U", eol,
535                        format_white_space, indent,
536                        format_dpo_id,
537                        &path->fp_dpo, 13);
538         }
539         break;
540     case FIB_PATH_TYPE_ATTACHED:
541         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
542         {
543             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
544         }
545         else
546         {
547             s = format (s, " %U",
548                         format_vnet_sw_interface_name,
549                         vnm,
550                         vnet_get_sw_interface(
551                             vnm,
552                             path->attached.fp_interface));
553         }
554         break;
555     case FIB_PATH_TYPE_RECURSIVE:
556         if (DPO_PROTO_MPLS == path->fp_nh_proto)
557         {
558             s = format (s, "via %U %U",
559                         format_mpls_unicast_label,
560                         path->recursive.fp_nh.fp_local_label,
561                         format_mpls_eos_bit,
562                         path->recursive.fp_nh.fp_eos);
563         }
564         else
565         {
566             s = format (s, "via %U",
567                         format_ip46_address,
568                         &path->recursive.fp_nh.fp_ip,
569                         IP46_TYPE_ANY);
570         }
571         s = format (s, " in fib:%d",
572                     path->recursive.fp_tbl_id,
573                     path->fp_via_fib); 
574         s = format (s, " via-fib:%d", path->fp_via_fib); 
575         s = format (s, " via-dpo:[%U:%d]",
576                     format_dpo_type, path->fp_dpo.dpoi_type, 
577                     path->fp_dpo.dpoi_index);
578
579         break;
580     case FIB_PATH_TYPE_UDP_ENCAP:
581         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
582         break;
583     case FIB_PATH_TYPE_BIER_TABLE:
584         s = format (s, "via bier-table:[%U}",
585                     format_bier_table_id,
586                     &path->bier_table.fp_bier_tbl);
587         s = format (s, " via-dpo:[%U:%d]",
588                     format_dpo_type, path->fp_dpo.dpoi_type,
589                     path->fp_dpo.dpoi_index);
590         break;
591     case FIB_PATH_TYPE_BIER_FMASK:
592         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
593         s = format (s, " via-dpo:[%U:%d]",
594                     format_dpo_type, path->fp_dpo.dpoi_type, 
595                     path->fp_dpo.dpoi_index);
596         break;
597     case FIB_PATH_TYPE_BIER_IMP:
598         s = format (s, "via %U", format_bier_imp,
599                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
600         break;
601     case FIB_PATH_TYPE_DVR:
602         s = format (s, " %U",
603                     format_vnet_sw_interface_name,
604                     vnm,
605                     vnet_get_sw_interface(
606                         vnm,
607                         path->dvr.fp_interface));
608         break;
609     case FIB_PATH_TYPE_DEAG:
610         s = format (s, " %sfib-index:%d",
611                     (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?  "m" : ""),
612                     path->deag.fp_tbl_id);
613         break;
614     case FIB_PATH_TYPE_RECEIVE:
615     case FIB_PATH_TYPE_INTF_RX:
616     case FIB_PATH_TYPE_SPECIAL:
617     case FIB_PATH_TYPE_EXCLUSIVE:
618         if (dpo_id_is_valid(&path->fp_dpo))
619         {
620             s = format(s, "%U", format_dpo_id,
621                        &path->fp_dpo, indent+2);
622         }
623         break;
624     }
625     return (s);
626 }
627
628 /*
629  * fib_path_last_lock_gone
630  *
631  * We don't share paths, we share path lists, so the [un]lock functions
632  * are no-ops
633  */
634 static void
635 fib_path_last_lock_gone (fib_node_t *node)
636 {
637     ASSERT(0);
638 }
639
640 static const adj_index_t
641 fib_path_attached_next_hop_get_adj (fib_path_t *path,
642                                     vnet_link_t link)
643 {
644     if (vnet_sw_interface_is_p2p(vnet_get_main(),
645                                  path->attached_next_hop.fp_interface))
646     {
647         /*
648          * if the interface is p2p then the adj for the specific
649          * neighbour on that link will never exist. on p2p links
650          * the subnet address (the attached route) links to the
651          * auto-adj (see below), we want that adj here too.
652          */
653         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
654                                     link,
655                                     &zero_addr,
656                                     path->attached_next_hop.fp_interface));
657     }
658     else
659     {
660         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
661                                     link,
662                                     &path->attached_next_hop.fp_nh,
663                                     path->attached_next_hop.fp_interface));
664     }
665 }
666
667 static void
668 fib_path_attached_next_hop_set (fib_path_t *path)
669 {
670     /*
671      * resolve directly via the adjacnecy discribed by the
672      * interface and next-hop
673      */
674     dpo_set(&path->fp_dpo,
675             DPO_ADJACENCY,
676             path->fp_nh_proto,
677             fib_path_attached_next_hop_get_adj(
678                  path,
679                  dpo_proto_to_link(path->fp_nh_proto)));
680
681     /*
682      * become a child of the adjacency so we receive updates
683      * when its rewrite changes
684      */
685     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
686                                      FIB_NODE_TYPE_PATH,
687                                      fib_path_get_index(path));
688
689     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
690                                       path->attached_next_hop.fp_interface) ||
691         !adj_is_up(path->fp_dpo.dpoi_index))
692     {
693         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
694     }
695 }
696
697 static const adj_index_t
698 fib_path_attached_get_adj (fib_path_t *path,
699                            vnet_link_t link)
700 {
701     if (vnet_sw_interface_is_p2p(vnet_get_main(),
702                                  path->attached.fp_interface))
703     {
704         /*
705          * point-2-point interfaces do not require a glean, since
706          * there is nothing to ARP. Install a rewrite/nbr adj instead
707          */
708         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
709                                     link,
710                                     &zero_addr,
711                                     path->attached.fp_interface));
712     }
713     else
714     {
715         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
716                                       link,
717                                       path->attached.fp_interface,
718                                       NULL));
719     }
720 }
721
722 /*
723  * create of update the paths recursive adj
724  */
725 static void
726 fib_path_recursive_adj_update (fib_path_t *path,
727                                fib_forward_chain_type_t fct,
728                                dpo_id_t *dpo)
729 {
730     dpo_id_t via_dpo = DPO_INVALID;
731
732     /*
733      * get the DPO to resolve through from the via-entry
734      */
735     fib_entry_contribute_forwarding(path->fp_via_fib,
736                                     fct,
737                                     &via_dpo);
738
739
740     /*
741      * hope for the best - clear if restrictions apply.
742      */
743     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
744
745     /*
746      * Validate any recursion constraints and over-ride the via
747      * adj if not met
748      */
749     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
750     {
751         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
752         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
753     }
754     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
755     {
756         /*
757          * the via FIB must be a host route.
758          * note the via FIB just added will always be a host route
759          * since it is an RR source added host route. So what we need to
760          * check is whether the route has other sources. If it does then
761          * some other source has added it as a host route. If it doesn't
762          * then it was added only here and inherits forwarding from a cover.
763          * the cover is not a host route.
764          * The RR source is the lowest priority source, so we check if it
765          * is the best. if it is there are no other sources.
766          */
767         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
768         {
769             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
770             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
771
772             /*
773              * PIC edge trigger. let the load-balance maps know
774              */
775             load_balance_map_path_state_change(fib_path_get_index(path));
776         }
777     }
778     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
779     {
780         /*
781          * RR source entries inherit the flags from the cover, so
782          * we can check the via directly
783          */
784         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
785         {
786             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
787             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
788
789             /*
790              * PIC edge trigger. let the load-balance maps know
791              */
792             load_balance_map_path_state_change(fib_path_get_index(path));
793         }
794     }
795     /*
796      * check for over-riding factors on the FIB entry itself
797      */
798     if (!fib_entry_is_resolved(path->fp_via_fib))
799     {
800         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
801         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
802
803         /*
804          * PIC edge trigger. let the load-balance maps know
805          */
806         load_balance_map_path_state_change(fib_path_get_index(path));
807     }
808
809     /*
810      * If this path is contributing a drop, then it's not resolved
811      */
812     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
813     {
814         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
815     }
816
817     /*
818      * update the path's contributed DPO
819      */
820     dpo_copy(dpo, &via_dpo);
821
822     FIB_PATH_DBG(path, "recursive update:");
823
824     dpo_reset(&via_dpo);
825 }
826
827 /*
828  * re-evaulate the forwarding state for a via fmask path
829  */
830 static void
831 fib_path_bier_fmask_update (fib_path_t *path,
832                             dpo_id_t *dpo)
833 {
834     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
835
836     /*
837      * if we are stakcing on the drop, then the path is not resolved
838      */
839     if (dpo_is_drop(dpo))
840     {
841         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
842     }
843     else
844     {
845         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
846     }
847 }
848
849 /*
850  * fib_path_is_permanent_drop
851  *
852  * Return !0 if the path is configured to permanently drop,
853  * despite other attributes.
854  */
855 static int
856 fib_path_is_permanent_drop (fib_path_t *path)
857 {
858     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
859             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
860 }
861
862 /*
863  * fib_path_unresolve
864  *
865  * Remove our dependency on the resolution target
866  */
867 static void
868 fib_path_unresolve (fib_path_t *path)
869 {
870     /*
871      * the forced drop path does not need unresolving
872      */
873     if (fib_path_is_permanent_drop(path))
874     {
875         return;
876     }
877
878     switch (path->fp_type)
879     {
880     case FIB_PATH_TYPE_RECURSIVE:
881         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
882         {
883             fib_entry_child_remove(path->fp_via_fib,
884                                    path->fp_sibling);
885             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
886                                            fib_entry_get_prefix(path->fp_via_fib),
887                                            FIB_SOURCE_RR);
888             fib_table_unlock(path->recursive.fp_tbl_id,
889                              dpo_proto_to_fib(path->fp_nh_proto),
890                              FIB_SOURCE_RR);
891             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
892         }
893         break;
894     case FIB_PATH_TYPE_BIER_FMASK:
895         bier_fmask_child_remove(path->fp_via_bier_fmask,
896                                 path->fp_sibling);
897         break;
898     case FIB_PATH_TYPE_BIER_IMP:
899         bier_imp_unlock(path->fp_dpo.dpoi_index);
900         break;
901     case FIB_PATH_TYPE_BIER_TABLE:
902         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
903         break;
904     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
905         adj_child_remove(path->fp_dpo.dpoi_index,
906                          path->fp_sibling);
907         adj_unlock(path->fp_dpo.dpoi_index);
908         break;
909     case FIB_PATH_TYPE_ATTACHED:
910         adj_child_remove(path->fp_dpo.dpoi_index,
911                          path->fp_sibling);
912         adj_unlock(path->fp_dpo.dpoi_index);
913         break;
914     case FIB_PATH_TYPE_UDP_ENCAP:
915         udp_encap_unlock(path->fp_dpo.dpoi_index);
916         break;
917     case FIB_PATH_TYPE_EXCLUSIVE:
918         dpo_reset(&path->exclusive.fp_ex_dpo);
919         break;
920     case FIB_PATH_TYPE_SPECIAL:
921     case FIB_PATH_TYPE_RECEIVE:
922     case FIB_PATH_TYPE_INTF_RX:
923     case FIB_PATH_TYPE_DEAG:
924     case FIB_PATH_TYPE_DVR:
925         /*
926          * these hold only the path's DPO, which is reset below.
927          */
928         break;
929     }
930
931     /*
932      * release the adj we were holding and pick up the
933      * drop just in case.
934      */
935     dpo_reset(&path->fp_dpo);
936     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
937
938     return;
939 }
940
941 static fib_forward_chain_type_t
942 fib_path_to_chain_type (const fib_path_t *path)
943 {
944     if (DPO_PROTO_MPLS == path->fp_nh_proto)
945     {
946         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
947             MPLS_EOS == path->recursive.fp_nh.fp_eos)
948         {
949             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
950         }
951         else
952         {
953             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
954         }
955     }
956     else
957     {
958         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
959     }
960 }
961
962 /*
963  * fib_path_back_walk_notify
964  *
965  * A back walk has reach this path.
966  */
967 static fib_node_back_walk_rc_t
968 fib_path_back_walk_notify (fib_node_t *node,
969                            fib_node_back_walk_ctx_t *ctx)
970 {
971     fib_path_t *path;
972
973     path = fib_path_from_fib_node(node);
974
975     FIB_PATH_DBG(path, "bw:%U",
976                  format_fib_node_bw_reason, ctx->fnbw_reason);
977
978     switch (path->fp_type)
979     {
980     case FIB_PATH_TYPE_RECURSIVE:
981         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
982         {
983             /*
984              * modify the recursive adjacency to use the new forwarding
985              * of the via-fib.
986              * this update is visible to packets in flight in the DP.
987              */
988             fib_path_recursive_adj_update(
989                 path,
990                 fib_path_to_chain_type(path),
991                 &path->fp_dpo);
992         }
993         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
994             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
995         {
996             /*
997              * ADJ updates (complete<->incomplete) do not need to propagate to
998              * recursive entries.
999              * The only reason its needed as far back as here, is that the adj
1000              * and the incomplete adj are a different DPO type, so the LBs need
1001              * to re-stack.
1002              * If this walk was quashed in the fib_entry, then any non-fib_path
1003              * children (like tunnels that collapse out the LB when they stack)
1004              * would not see the update.
1005              */
1006             return (FIB_NODE_BACK_WALK_CONTINUE);
1007         }
1008         break;
1009     case FIB_PATH_TYPE_BIER_FMASK:
1010         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1011         {
1012             /*
1013              * update to use the BIER fmask's new forwading
1014              */
1015             fib_path_bier_fmask_update(path, &path->fp_dpo);
1016         }
1017         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1018             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1019         {
1020             /*
1021              * ADJ updates (complete<->incomplete) do not need to propagate to
1022              * recursive entries.
1023              * The only reason its needed as far back as here, is that the adj
1024              * and the incomplete adj are a different DPO type, so the LBs need
1025              * to re-stack.
1026              * If this walk was quashed in the fib_entry, then any non-fib_path
1027              * children (like tunnels that collapse out the LB when they stack)
1028              * would not see the update.
1029              */
1030             return (FIB_NODE_BACK_WALK_CONTINUE);
1031         }
1032         break;
1033     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1034         /*
1035 FIXME comment
1036          * ADJ_UPDATE backwalk pass silently through here and up to
1037          * the path-list when the multipath adj collapse occurs.
1038          * The reason we do this is that the assumtption is that VPP
1039          * runs in an environment where the Control-Plane is remote
1040          * and hence reacts slowly to link up down. In order to remove
1041          * this down link from the ECMP set quickly, we back-walk.
1042          * VPP also has dedicated CPUs, so we are not stealing resources
1043          * from the CP to do so.
1044          */
1045         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1046         {
1047             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1048             {
1049                 /*
1050                  * alreday resolved. no need to walk back again
1051                  */
1052                 return (FIB_NODE_BACK_WALK_CONTINUE);
1053             }
1054             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1055         }
1056         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1057         {
1058             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1059             {
1060                 /*
1061                  * alreday unresolved. no need to walk back again
1062                  */
1063                 return (FIB_NODE_BACK_WALK_CONTINUE);
1064             }
1065             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1066         }
1067         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1068         {
1069             /*
1070              * The interface this path resolves through has been deleted.
1071              * This will leave the path in a permanent drop state. The route
1072              * needs to be removed and readded (and hence the path-list deleted)
1073              * before it can forward again.
1074              */
1075             fib_path_unresolve(path);
1076             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1077         }
1078         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1079         {
1080             /*
1081              * restack the DPO to pick up the correct DPO sub-type
1082              */
1083             uword if_is_up;
1084             adj_index_t ai;
1085
1086             if_is_up = vnet_sw_interface_is_admin_up(
1087                            vnet_get_main(),
1088                            path->attached_next_hop.fp_interface);
1089
1090             ai = fib_path_attached_next_hop_get_adj(
1091                      path,
1092                      dpo_proto_to_link(path->fp_nh_proto));
1093
1094             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1095             if (if_is_up && adj_is_up(ai))
1096             {
1097                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1098             }
1099
1100             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1101             adj_unlock(ai);
1102
1103             if (!if_is_up)
1104             {
1105                 /*
1106                  * If the interface is not up there is no reason to walk
1107                  * back to children. if we did they would only evalute
1108                  * that this path is unresolved and hence it would
1109                  * not contribute the adjacency - so it would be wasted
1110                  * CPU time.
1111                  */
1112                 return (FIB_NODE_BACK_WALK_CONTINUE);
1113             }
1114         }
1115         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1116         {
1117             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1118             {
1119                 /*
1120                  * alreday unresolved. no need to walk back again
1121                  */
1122                 return (FIB_NODE_BACK_WALK_CONTINUE);
1123             }
1124             /*
1125              * the adj has gone down. the path is no longer resolved.
1126              */
1127             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1128         }
1129         break;
1130     case FIB_PATH_TYPE_ATTACHED:
1131     case FIB_PATH_TYPE_DVR:
1132         /*
1133          * FIXME; this could schedule a lower priority walk, since attached
1134          * routes are not usually in ECMP configurations so the backwalk to
1135          * the FIB entry does not need to be high priority
1136          */
1137         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1138         {
1139             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1140         }
1141         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1142         {
1143             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1144         }
1145         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1146         {
1147             fib_path_unresolve(path);
1148             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1149         }
1150         break;
1151     case FIB_PATH_TYPE_UDP_ENCAP:
1152     {
1153         dpo_id_t via_dpo = DPO_INVALID;
1154
1155         /*
1156          * hope for the best - clear if restrictions apply.
1157          */
1158         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1159
1160         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1161                                         path->fp_nh_proto,
1162                                         &via_dpo);
1163         /*
1164          * If this path is contributing a drop, then it's not resolved
1165          */
1166         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1167         {
1168             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1169         }
1170
1171         /*
1172          * update the path's contributed DPO
1173          */
1174         dpo_copy(&path->fp_dpo, &via_dpo);
1175         dpo_reset(&via_dpo);
1176         break;
1177     }
1178     case FIB_PATH_TYPE_INTF_RX:
1179         ASSERT(0);
1180     case FIB_PATH_TYPE_DEAG:
1181         /*
1182          * FIXME When VRF delete is allowed this will need a poke.
1183          */
1184     case FIB_PATH_TYPE_SPECIAL:
1185     case FIB_PATH_TYPE_RECEIVE:
1186     case FIB_PATH_TYPE_EXCLUSIVE:
1187     case FIB_PATH_TYPE_BIER_TABLE:
1188     case FIB_PATH_TYPE_BIER_IMP:
1189         /*
1190          * these path types have no parents. so to be
1191          * walked from one is unexpected.
1192          */
1193         ASSERT(0);
1194         break;
1195     }
1196
1197     /*
1198      * propagate the backwalk further to the path-list
1199      */
1200     fib_path_list_back_walk(path->fp_pl_index, ctx);
1201
1202     return (FIB_NODE_BACK_WALK_CONTINUE);
1203 }
1204
1205 static void
1206 fib_path_memory_show (void)
1207 {
1208     fib_show_memory_usage("Path",
1209                           pool_elts(fib_path_pool),
1210                           pool_len(fib_path_pool),
1211                           sizeof(fib_path_t));
1212 }
1213
1214 /*
1215  * The FIB path's graph node virtual function table
1216  */
1217 static const fib_node_vft_t fib_path_vft = {
1218     .fnv_get = fib_path_get_node,
1219     .fnv_last_lock = fib_path_last_lock_gone,
1220     .fnv_back_walk = fib_path_back_walk_notify,
1221     .fnv_mem_show = fib_path_memory_show,
1222 };
1223
1224 static fib_path_cfg_flags_t
1225 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1226 {
1227     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1228
1229     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1230         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1231     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1232         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1233     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1234         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1235     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1236         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1237     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1238         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1239     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1240         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1241     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1242         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1243     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1244         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1245     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1246         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1247
1248     return (cfg_flags);
1249 }
1250
1251 /*
1252  * fib_path_create
1253  *
1254  * Create and initialise a new path object.
1255  * return the index of the path.
1256  */
1257 fib_node_index_t
1258 fib_path_create (fib_node_index_t pl_index,
1259                  const fib_route_path_t *rpath)
1260 {
1261     fib_path_t *path;
1262
1263     pool_get(fib_path_pool, path);
1264     clib_memset(path, 0, sizeof(*path));
1265
1266     fib_node_init(&path->fp_node,
1267                   FIB_NODE_TYPE_PATH);
1268
1269     dpo_reset(&path->fp_dpo);
1270     path->fp_pl_index = pl_index;
1271     path->fp_nh_proto = rpath->frp_proto;
1272     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1273     path->fp_weight = rpath->frp_weight;
1274     if (0 == path->fp_weight)
1275     {
1276         /*
1277          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1278          * clients to always use 1, or we can accept it and fixup approrpiately.
1279          */
1280         path->fp_weight = 1;
1281     }
1282     path->fp_preference = rpath->frp_preference;
1283     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1284
1285     /*
1286      * deduce the path's tpye from the parementers and save what is needed.
1287      */
1288     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1289     {
1290         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1291         path->receive.fp_interface = rpath->frp_sw_if_index;
1292         path->receive.fp_addr = rpath->frp_addr;
1293     }
1294     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1295     {
1296         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1297         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1298     }
1299     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1300     {
1301         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1302         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1303     }
1304     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1305     {
1306         path->fp_type = FIB_PATH_TYPE_DEAG;
1307         path->deag.fp_tbl_id = rpath->frp_fib_index;
1308         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1309     }
1310     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1311     {
1312         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1313         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1314     }
1315     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1316     {
1317         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1318         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1319     }
1320     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1321     {
1322         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1323         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1324     }
1325     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1326     {
1327         path->fp_type = FIB_PATH_TYPE_DEAG;
1328         path->deag.fp_tbl_id = rpath->frp_fib_index;
1329     }
1330     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1331     {
1332         path->fp_type = FIB_PATH_TYPE_DVR;
1333         path->dvr.fp_interface = rpath->frp_sw_if_index;
1334     }
1335     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1336     {
1337         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1338         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1339     }
1340     else if (~0 != rpath->frp_sw_if_index)
1341     {
1342         if (ip46_address_is_zero(&rpath->frp_addr))
1343         {
1344             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1345             path->attached.fp_interface = rpath->frp_sw_if_index;
1346         }
1347         else
1348         {
1349             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1350             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1351             path->attached_next_hop.fp_nh = rpath->frp_addr;
1352         }
1353     }
1354     else
1355     {
1356         if (ip46_address_is_zero(&rpath->frp_addr))
1357         {
1358             if (~0 == rpath->frp_fib_index)
1359             {
1360                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1361             }
1362             else
1363             {
1364                 path->fp_type = FIB_PATH_TYPE_DEAG;
1365                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1366                 path->deag.fp_rpf_id = ~0;
1367             }
1368         }
1369         else
1370         {
1371             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1372             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1373             {
1374                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1375                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1376             }
1377             else
1378             {
1379                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1380             }
1381             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1382         }
1383     }
1384
1385     FIB_PATH_DBG(path, "create");
1386
1387     return (fib_path_get_index(path));
1388 }
1389
1390 /*
1391  * fib_path_create_special
1392  *
1393  * Create and initialise a new path object.
1394  * return the index of the path.
1395  */
1396 fib_node_index_t
1397 fib_path_create_special (fib_node_index_t pl_index,
1398                          dpo_proto_t nh_proto,
1399                          fib_path_cfg_flags_t flags,
1400                          const dpo_id_t *dpo)
1401 {
1402     fib_path_t *path;
1403
1404     pool_get(fib_path_pool, path);
1405     clib_memset(path, 0, sizeof(*path));
1406
1407     fib_node_init(&path->fp_node,
1408                   FIB_NODE_TYPE_PATH);
1409     dpo_reset(&path->fp_dpo);
1410
1411     path->fp_pl_index = pl_index;
1412     path->fp_weight = 1;
1413     path->fp_preference = 0;
1414     path->fp_nh_proto = nh_proto;
1415     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1416     path->fp_cfg_flags = flags;
1417
1418     if (FIB_PATH_CFG_FLAG_DROP & flags)
1419     {
1420         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1421     }
1422     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1423     {
1424         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1425         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1426     }
1427     else
1428     {
1429         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1430         ASSERT(NULL != dpo);
1431         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1432     }
1433
1434     return (fib_path_get_index(path));
1435 }
1436
1437 /*
1438  * fib_path_copy
1439  *
1440  * Copy a path. return index of new path.
1441  */
1442 fib_node_index_t
1443 fib_path_copy (fib_node_index_t path_index,
1444                fib_node_index_t path_list_index)
1445 {
1446     fib_path_t *path, *orig_path;
1447
1448     pool_get(fib_path_pool, path);
1449
1450     orig_path = fib_path_get(path_index);
1451     ASSERT(NULL != orig_path);
1452
1453     memcpy(path, orig_path, sizeof(*path));
1454
1455     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1456
1457     /*
1458      * reset the dynamic section
1459      */
1460     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1461     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1462     path->fp_pl_index  = path_list_index;
1463     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1464     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1465     dpo_reset(&path->fp_dpo);
1466
1467     return (fib_path_get_index(path));
1468 }
1469
1470 /*
1471  * fib_path_destroy
1472  *
1473  * destroy a path that is no longer required
1474  */
1475 void
1476 fib_path_destroy (fib_node_index_t path_index)
1477 {
1478     fib_path_t *path;
1479
1480     path = fib_path_get(path_index);
1481
1482     ASSERT(NULL != path);
1483     FIB_PATH_DBG(path, "destroy");
1484
1485     fib_path_unresolve(path);
1486
1487     fib_node_deinit(&path->fp_node);
1488     pool_put(fib_path_pool, path);
1489 }
1490
1491 /*
1492  * fib_path_destroy
1493  *
1494  * destroy a path that is no longer required
1495  */
1496 uword
1497 fib_path_hash (fib_node_index_t path_index)
1498 {
1499     fib_path_t *path;
1500
1501     path = fib_path_get(path_index);
1502
1503     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1504                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1505                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1506                         0));
1507 }
1508
1509 /*
1510  * fib_path_cmp_i
1511  *
1512  * Compare two paths for equivalence.
1513  */
1514 static int
1515 fib_path_cmp_i (const fib_path_t *path1,
1516                 const fib_path_t *path2)
1517 {
1518     int res;
1519
1520     res = 1;
1521
1522     /*
1523      * paths of different types and protocol are not equal.
1524      * different weights and/or preference only are the same path.
1525      */
1526     if (path1->fp_type != path2->fp_type)
1527     {
1528         res = (path1->fp_type - path2->fp_type);
1529     }
1530     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1531     {
1532         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1533     }
1534     else
1535     {
1536         /*
1537          * both paths are of the same type.
1538          * consider each type and its attributes in turn.
1539          */
1540         switch (path1->fp_type)
1541         {
1542         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1543             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1544                                    &path2->attached_next_hop.fp_nh);
1545             if (0 == res) {
1546                 res = (path1->attached_next_hop.fp_interface -
1547                        path2->attached_next_hop.fp_interface);
1548             }
1549             break;
1550         case FIB_PATH_TYPE_ATTACHED:
1551             res = (path1->attached.fp_interface -
1552                    path2->attached.fp_interface);
1553             break;
1554         case FIB_PATH_TYPE_RECURSIVE:
1555             res = ip46_address_cmp(&path1->recursive.fp_nh,
1556                                    &path2->recursive.fp_nh);
1557  
1558             if (0 == res)
1559             {
1560                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1561             }
1562             break;
1563         case FIB_PATH_TYPE_BIER_FMASK:
1564             res = (path1->bier_fmask.fp_bier_fmask -
1565                    path2->bier_fmask.fp_bier_fmask);
1566             break;
1567         case FIB_PATH_TYPE_BIER_IMP:
1568             res = (path1->bier_imp.fp_bier_imp -
1569                    path2->bier_imp.fp_bier_imp);
1570             break;
1571         case FIB_PATH_TYPE_BIER_TABLE:
1572             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1573                                     &path2->bier_table.fp_bier_tbl);
1574             break;
1575         case FIB_PATH_TYPE_DEAG:
1576             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1577             if (0 == res)
1578             {
1579                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1580             }
1581             break;
1582         case FIB_PATH_TYPE_INTF_RX:
1583             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1584             break;
1585         case FIB_PATH_TYPE_UDP_ENCAP:
1586             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1587             break;
1588         case FIB_PATH_TYPE_DVR:
1589             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1590             break;
1591         case FIB_PATH_TYPE_EXCLUSIVE:
1592             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1593             break;
1594         case FIB_PATH_TYPE_SPECIAL:
1595         case FIB_PATH_TYPE_RECEIVE:
1596             res = 0;
1597             break;
1598         }
1599     }
1600     return (res);
1601 }
1602
1603 /*
1604  * fib_path_cmp_for_sort
1605  *
1606  * Compare two paths for equivalence. Used during path sorting.
1607  * As usual 0 means equal.
1608  */
1609 int
1610 fib_path_cmp_for_sort (void * v1,
1611                        void * v2)
1612 {
1613     fib_node_index_t *pi1 = v1, *pi2 = v2;
1614     fib_path_t *path1, *path2;
1615
1616     path1 = fib_path_get(*pi1);
1617     path2 = fib_path_get(*pi2);
1618
1619     /*
1620      * when sorting paths we want the highest preference paths
1621      * first, so that the choices set built is in prefernce order
1622      */
1623     if (path1->fp_preference != path2->fp_preference)
1624     {
1625         return (path1->fp_preference - path2->fp_preference);
1626     }
1627
1628     return (fib_path_cmp_i(path1, path2));
1629 }
1630
1631 /*
1632  * fib_path_cmp
1633  *
1634  * Compare two paths for equivalence.
1635  */
1636 int
1637 fib_path_cmp (fib_node_index_t pi1,
1638               fib_node_index_t pi2)
1639 {
1640     fib_path_t *path1, *path2;
1641
1642     path1 = fib_path_get(pi1);
1643     path2 = fib_path_get(pi2);
1644
1645     return (fib_path_cmp_i(path1, path2));
1646 }
1647
1648 int
1649 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1650                            const fib_route_path_t *rpath)
1651 {
1652     fib_path_t *path;
1653     int res;
1654
1655     path = fib_path_get(path_index);
1656
1657     res = 1;
1658
1659     if (path->fp_weight != rpath->frp_weight)
1660     {
1661         res = (path->fp_weight - rpath->frp_weight);
1662     }
1663     else
1664     {
1665         /*
1666          * both paths are of the same type.
1667          * consider each type and its attributes in turn.
1668          */
1669         switch (path->fp_type)
1670         {
1671         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1672             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1673                                    &rpath->frp_addr);
1674             if (0 == res)
1675             {
1676                 res = (path->attached_next_hop.fp_interface -
1677                        rpath->frp_sw_if_index);
1678             }
1679             break;
1680         case FIB_PATH_TYPE_ATTACHED:
1681             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1682             break;
1683         case FIB_PATH_TYPE_RECURSIVE:
1684             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1685             {
1686                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1687
1688                 if (res == 0)
1689                 {
1690                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1691                 }
1692             }
1693             else
1694             {
1695                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1696                                        &rpath->frp_addr);
1697             }
1698
1699             if (0 == res)
1700             {
1701                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1702             }
1703             break;
1704         case FIB_PATH_TYPE_BIER_FMASK:
1705             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1706             break;
1707         case FIB_PATH_TYPE_BIER_IMP:
1708             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1709             break;
1710         case FIB_PATH_TYPE_BIER_TABLE:
1711             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1712                                     &rpath->frp_bier_tbl);
1713             break;
1714         case FIB_PATH_TYPE_INTF_RX:
1715             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1716             break;
1717         case FIB_PATH_TYPE_UDP_ENCAP:
1718             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1719             break;
1720         case FIB_PATH_TYPE_DEAG:
1721             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1722             if (0 == res)
1723             {
1724                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1725             }
1726             break;
1727         case FIB_PATH_TYPE_DVR:
1728             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1729             break;
1730         case FIB_PATH_TYPE_EXCLUSIVE:
1731             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1732             break;
1733         case FIB_PATH_TYPE_SPECIAL:
1734         case FIB_PATH_TYPE_RECEIVE:
1735             res = 0;
1736             break;
1737         }
1738     }
1739     return (res);
1740 }
1741
1742 /*
1743  * fib_path_recursive_loop_detect
1744  *
1745  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1746  * walk is initiated when an entry is linking to a new path list or from an old.
1747  * The entry vector passed contains all the FIB entrys that are children of this
1748  * path (it is all the entries encountered on the walk so far). If this vector
1749  * contains the entry this path resolve via, then a loop is about to form.
1750  * The loop must be allowed to form, since we need the dependencies in place
1751  * so that we can track when the loop breaks.
1752  * However, we MUST not produce a loop in the forwarding graph (else packets
1753  * would loop around the switch path until the loop breaks), so we mark recursive
1754  * paths as looped so that they do not contribute forwarding information.
1755  * By marking the path as looped, an etry such as;
1756  *    X/Y
1757  *     via a.a.a.a (looped)
1758  *     via b.b.b.b (not looped)
1759  * can still forward using the info provided by b.b.b.b only
1760  */
1761 int
1762 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1763                                 fib_node_index_t **entry_indicies)
1764 {
1765     fib_path_t *path;
1766
1767     path = fib_path_get(path_index);
1768
1769     /*
1770      * the forced drop path is never looped, cos it is never resolved.
1771      */
1772     if (fib_path_is_permanent_drop(path))
1773     {
1774         return (0);
1775     }
1776
1777     switch (path->fp_type)
1778     {
1779     case FIB_PATH_TYPE_RECURSIVE:
1780     {
1781         fib_node_index_t *entry_index, *entries;
1782         int looped = 0;
1783         entries = *entry_indicies;
1784
1785         vec_foreach(entry_index, entries) {
1786             if (*entry_index == path->fp_via_fib)
1787             {
1788                 /*
1789                  * the entry that is about to link to this path-list (or
1790                  * one of this path-list's children) is the same entry that
1791                  * this recursive path resolves through. this is a cycle.
1792                  * abort the walk.
1793                  */
1794                 looped = 1;
1795                 break;
1796             }
1797         }
1798
1799         if (looped)
1800         {
1801             FIB_PATH_DBG(path, "recursive loop formed");
1802             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1803
1804             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1805         }
1806         else
1807         {
1808             /*
1809              * no loop here yet. keep forward walking the graph.
1810              */
1811             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1812             {
1813                 FIB_PATH_DBG(path, "recursive loop formed");
1814                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1815             }
1816             else
1817             {
1818                 FIB_PATH_DBG(path, "recursive loop cleared");
1819                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1820             }
1821         }
1822         break;
1823     }
1824     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1825     case FIB_PATH_TYPE_ATTACHED:
1826         if (adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1827                                       entry_indicies))
1828         {
1829             FIB_PATH_DBG(path, "recursive loop formed");
1830             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1831         }
1832         else
1833         {
1834             FIB_PATH_DBG(path, "recursive loop cleared");
1835             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1836         }
1837         break;
1838     case FIB_PATH_TYPE_SPECIAL:
1839     case FIB_PATH_TYPE_DEAG:
1840     case FIB_PATH_TYPE_DVR:
1841     case FIB_PATH_TYPE_RECEIVE:
1842     case FIB_PATH_TYPE_INTF_RX:
1843     case FIB_PATH_TYPE_UDP_ENCAP:
1844     case FIB_PATH_TYPE_EXCLUSIVE:
1845     case FIB_PATH_TYPE_BIER_FMASK:
1846     case FIB_PATH_TYPE_BIER_TABLE:
1847     case FIB_PATH_TYPE_BIER_IMP:
1848         /*
1849          * these path types cannot be part of a loop, since they are the leaves
1850          * of the graph.
1851          */
1852         break;
1853     }
1854
1855     return (fib_path_is_looped(path_index));
1856 }
1857
1858 int
1859 fib_path_resolve (fib_node_index_t path_index)
1860 {
1861     fib_path_t *path;
1862
1863     path = fib_path_get(path_index);
1864
1865     /*
1866      * hope for the best.
1867      */
1868     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1869
1870     /*
1871      * the forced drop path resolves via the drop adj
1872      */
1873     if (fib_path_is_permanent_drop(path))
1874     {
1875         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1876         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1877         return (fib_path_is_resolved(path_index));
1878     }
1879
1880     switch (path->fp_type)
1881     {
1882     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1883         fib_path_attached_next_hop_set(path);
1884         break;
1885     case FIB_PATH_TYPE_ATTACHED:
1886         /*
1887          * path->attached.fp_interface
1888          */
1889         if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1890                                            path->attached.fp_interface))
1891         {
1892             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1893         }
1894         dpo_set(&path->fp_dpo,
1895                 DPO_ADJACENCY,
1896                 path->fp_nh_proto,
1897                 fib_path_attached_get_adj(path,
1898                                           dpo_proto_to_link(path->fp_nh_proto)));
1899
1900         /*
1901          * become a child of the adjacency so we receive updates
1902          * when the interface state changes
1903          */
1904         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1905                                          FIB_NODE_TYPE_PATH,
1906                                          fib_path_get_index(path));
1907         break;
1908     case FIB_PATH_TYPE_RECURSIVE:
1909     {
1910         /*
1911          * Create a RR source entry in the table for the address
1912          * that this path recurses through.
1913          * This resolve action is recursive, hence we may create
1914          * more paths in the process. more creates mean maybe realloc
1915          * of this path.
1916          */
1917         fib_node_index_t fei;
1918         fib_prefix_t pfx;
1919
1920         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1921
1922         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1923         {
1924             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1925                                        path->recursive.fp_nh.fp_eos,
1926                                        &pfx);
1927         }
1928         else
1929         {
1930             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1931         }
1932
1933         fib_table_lock(path->recursive.fp_tbl_id,
1934                        dpo_proto_to_fib(path->fp_nh_proto),
1935                        FIB_SOURCE_RR);
1936         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1937                                           &pfx,
1938                                           FIB_SOURCE_RR,
1939                                           FIB_ENTRY_FLAG_NONE);
1940
1941         path = fib_path_get(path_index);
1942         path->fp_via_fib = fei;
1943
1944         /*
1945          * become a dependent child of the entry so the path is 
1946          * informed when the forwarding for the entry changes.
1947          */
1948         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1949                                                FIB_NODE_TYPE_PATH,
1950                                                fib_path_get_index(path));
1951
1952         /*
1953          * create and configure the IP DPO
1954          */
1955         fib_path_recursive_adj_update(
1956             path,
1957             fib_path_to_chain_type(path),
1958             &path->fp_dpo);
1959
1960         break;
1961     }
1962     case FIB_PATH_TYPE_BIER_FMASK:
1963     {
1964         /*
1965          * become a dependent child of the entry so the path is
1966          * informed when the forwarding for the entry changes.
1967          */
1968         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
1969                                                 FIB_NODE_TYPE_PATH,
1970                                                 fib_path_get_index(path));
1971
1972         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
1973         fib_path_bier_fmask_update(path, &path->fp_dpo);
1974
1975         break;
1976     }
1977     case FIB_PATH_TYPE_BIER_IMP:
1978         bier_imp_lock(path->bier_imp.fp_bier_imp);
1979         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
1980                                        DPO_PROTO_IP4,
1981                                        &path->fp_dpo);
1982         break;
1983     case FIB_PATH_TYPE_BIER_TABLE:
1984     {
1985         /*
1986          * Find/create the BIER table to link to
1987          */
1988         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
1989
1990         path->fp_via_bier_tbl =
1991             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
1992
1993         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
1994                                          &path->fp_dpo);
1995         break;
1996     }
1997     case FIB_PATH_TYPE_SPECIAL:
1998         /*
1999          * Resolve via the drop
2000          */
2001         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
2002         break;
2003     case FIB_PATH_TYPE_DEAG:
2004     {
2005         if (DPO_PROTO_BIER == path->fp_nh_proto)
2006         {
2007             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2008                                                   &path->fp_dpo);
2009         }
2010         else
2011         {
2012             /*
2013              * Resolve via a lookup DPO.
2014              * FIXME. control plane should add routes with a table ID
2015              */
2016             lookup_input_t input;
2017             lookup_cast_t cast;
2018
2019             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2020                     LOOKUP_MULTICAST :
2021                     LOOKUP_UNICAST);
2022             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2023                      LOOKUP_INPUT_SRC_ADDR :
2024                      LOOKUP_INPUT_DST_ADDR);
2025
2026             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2027                                                path->fp_nh_proto,
2028                                                cast,
2029                                                input,
2030                                                LOOKUP_TABLE_FROM_CONFIG,
2031                                                &path->fp_dpo);
2032         }
2033         break;
2034     }
2035     case FIB_PATH_TYPE_DVR:
2036         dvr_dpo_add_or_lock(path->attached.fp_interface,
2037                             path->fp_nh_proto,
2038                             &path->fp_dpo);
2039         break;
2040     case FIB_PATH_TYPE_RECEIVE:
2041         /*
2042          * Resolve via a receive DPO.
2043          */
2044         receive_dpo_add_or_lock(path->fp_nh_proto,
2045                                 path->receive.fp_interface,
2046                                 &path->receive.fp_addr,
2047                                 &path->fp_dpo);
2048         break;
2049     case FIB_PATH_TYPE_UDP_ENCAP:
2050         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2051         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2052                                         path->fp_nh_proto,
2053                                         &path->fp_dpo);
2054         break;
2055     case FIB_PATH_TYPE_INTF_RX: {
2056         /*
2057          * Resolve via a receive DPO.
2058          */
2059         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2060                                      path->intf_rx.fp_interface,
2061                                      &path->fp_dpo);
2062         break;
2063     }
2064     case FIB_PATH_TYPE_EXCLUSIVE:
2065         /*
2066          * Resolve via the user provided DPO
2067          */
2068         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2069         break;
2070     }
2071
2072     return (fib_path_is_resolved(path_index));
2073 }
2074
2075 u32
2076 fib_path_get_resolving_interface (fib_node_index_t path_index)
2077 {
2078     fib_path_t *path;
2079
2080     path = fib_path_get(path_index);
2081
2082     switch (path->fp_type)
2083     {
2084     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2085         return (path->attached_next_hop.fp_interface);
2086     case FIB_PATH_TYPE_ATTACHED:
2087         return (path->attached.fp_interface);
2088     case FIB_PATH_TYPE_RECEIVE:
2089         return (path->receive.fp_interface);
2090     case FIB_PATH_TYPE_RECURSIVE:
2091         if (fib_path_is_resolved(path_index))
2092         {
2093             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2094         }
2095         break;
2096     case FIB_PATH_TYPE_DVR:
2097         return (path->dvr.fp_interface);
2098     case FIB_PATH_TYPE_INTF_RX:
2099     case FIB_PATH_TYPE_UDP_ENCAP:
2100     case FIB_PATH_TYPE_SPECIAL:
2101     case FIB_PATH_TYPE_DEAG:
2102     case FIB_PATH_TYPE_EXCLUSIVE:
2103     case FIB_PATH_TYPE_BIER_FMASK:
2104     case FIB_PATH_TYPE_BIER_TABLE:
2105     case FIB_PATH_TYPE_BIER_IMP:
2106         break;
2107     }
2108     return (dpo_get_urpf(&path->fp_dpo));
2109 }
2110
2111 index_t
2112 fib_path_get_resolving_index (fib_node_index_t path_index)
2113 {
2114     fib_path_t *path;
2115
2116     path = fib_path_get(path_index);
2117
2118     switch (path->fp_type)
2119     {
2120     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2121     case FIB_PATH_TYPE_ATTACHED:
2122     case FIB_PATH_TYPE_RECEIVE:
2123     case FIB_PATH_TYPE_INTF_RX:
2124     case FIB_PATH_TYPE_SPECIAL:
2125     case FIB_PATH_TYPE_DEAG:
2126     case FIB_PATH_TYPE_DVR:
2127     case FIB_PATH_TYPE_EXCLUSIVE:
2128         break;
2129     case FIB_PATH_TYPE_UDP_ENCAP:
2130         return (path->udp_encap.fp_udp_encap_id);
2131     case FIB_PATH_TYPE_RECURSIVE:
2132         return (path->fp_via_fib);
2133     case FIB_PATH_TYPE_BIER_FMASK:
2134         return (path->bier_fmask.fp_bier_fmask);
2135    case FIB_PATH_TYPE_BIER_TABLE:
2136        return (path->fp_via_bier_tbl);
2137    case FIB_PATH_TYPE_BIER_IMP:
2138        return (path->bier_imp.fp_bier_imp);
2139     }
2140     return (~0);
2141 }
2142
2143 adj_index_t
2144 fib_path_get_adj (fib_node_index_t path_index)
2145 {
2146     fib_path_t *path;
2147
2148     path = fib_path_get(path_index);
2149
2150     ASSERT(dpo_is_adj(&path->fp_dpo));
2151     if (dpo_is_adj(&path->fp_dpo))
2152     {
2153         return (path->fp_dpo.dpoi_index);
2154     }
2155     return (ADJ_INDEX_INVALID);
2156 }
2157
2158 u16
2159 fib_path_get_weight (fib_node_index_t path_index)
2160 {
2161     fib_path_t *path;
2162
2163     path = fib_path_get(path_index);
2164
2165     ASSERT(path);
2166
2167     return (path->fp_weight);
2168 }
2169
2170 u16
2171 fib_path_get_preference (fib_node_index_t path_index)
2172 {
2173     fib_path_t *path;
2174
2175     path = fib_path_get(path_index);
2176
2177     ASSERT(path);
2178
2179     return (path->fp_preference);
2180 }
2181
2182 u32
2183 fib_path_get_rpf_id (fib_node_index_t path_index)
2184 {
2185     fib_path_t *path;
2186
2187     path = fib_path_get(path_index);
2188
2189     ASSERT(path);
2190
2191     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2192     {
2193         return (path->deag.fp_rpf_id);
2194     }
2195
2196     return (~0);
2197 }
2198
2199 /**
2200  * @brief Contribute the path's adjacency to the list passed.
2201  * By calling this function over all paths, recursively, a child
2202  * can construct its full set of forwarding adjacencies, and hence its
2203  * uRPF list.
2204  */
2205 void
2206 fib_path_contribute_urpf (fib_node_index_t path_index,
2207                           index_t urpf)
2208 {
2209     fib_path_t *path;
2210
2211     path = fib_path_get(path_index);
2212
2213     /*
2214      * resolved and unresolved paths contribute to the RPF list.
2215      */
2216     switch (path->fp_type)
2217     {
2218     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2219         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2220         break;
2221
2222     case FIB_PATH_TYPE_ATTACHED:
2223         fib_urpf_list_append(urpf, path->attached.fp_interface);
2224         break;
2225
2226     case FIB_PATH_TYPE_RECURSIVE:
2227         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2228             !fib_path_is_looped(path_index))
2229         {
2230             /*
2231              * there's unresolved due to constraints, and there's unresolved
2232              * due to ain't got no via. can't do nowt w'out via.
2233              */
2234             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2235         }
2236         break;
2237
2238     case FIB_PATH_TYPE_EXCLUSIVE:
2239     case FIB_PATH_TYPE_SPECIAL:
2240     {
2241         /*
2242          * these path types may link to an adj, if that's what
2243          * the clinet gave
2244          */
2245         u32 rpf_sw_if_index;
2246
2247         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2248
2249         if (~0 != rpf_sw_if_index)
2250         {
2251             fib_urpf_list_append(urpf, rpf_sw_if_index);
2252         }
2253         break;
2254     }
2255     case FIB_PATH_TYPE_DVR:
2256         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2257         break;
2258     case FIB_PATH_TYPE_DEAG:
2259     case FIB_PATH_TYPE_RECEIVE:
2260     case FIB_PATH_TYPE_INTF_RX:
2261     case FIB_PATH_TYPE_UDP_ENCAP:
2262     case FIB_PATH_TYPE_BIER_FMASK:
2263     case FIB_PATH_TYPE_BIER_TABLE:
2264     case FIB_PATH_TYPE_BIER_IMP:
2265         /*
2266          * these path types don't link to an adj
2267          */
2268         break;
2269     }
2270 }
2271
2272 void
2273 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2274                           dpo_proto_t payload_proto,
2275                           fib_mpls_lsp_mode_t mode,
2276                           dpo_id_t *dpo)
2277 {
2278     fib_path_t *path;
2279
2280     path = fib_path_get(path_index);
2281
2282     ASSERT(path);
2283
2284     switch (path->fp_type)
2285     {
2286     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2287     {
2288         dpo_id_t tmp = DPO_INVALID;
2289
2290         dpo_copy(&tmp, dpo);
2291
2292         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2293         dpo_reset(&tmp);
2294         break;
2295     }                
2296     case FIB_PATH_TYPE_DEAG:
2297     {
2298         dpo_id_t tmp = DPO_INVALID;
2299
2300         dpo_copy(&tmp, dpo);
2301
2302         mpls_disp_dpo_create(payload_proto,
2303                              path->deag.fp_rpf_id,
2304                              mode, &tmp, dpo);
2305         dpo_reset(&tmp);
2306         break;
2307     }
2308     case FIB_PATH_TYPE_RECEIVE:
2309     case FIB_PATH_TYPE_ATTACHED:
2310     case FIB_PATH_TYPE_RECURSIVE:
2311     case FIB_PATH_TYPE_INTF_RX:
2312     case FIB_PATH_TYPE_UDP_ENCAP:
2313     case FIB_PATH_TYPE_EXCLUSIVE:
2314     case FIB_PATH_TYPE_SPECIAL:
2315     case FIB_PATH_TYPE_BIER_FMASK:
2316     case FIB_PATH_TYPE_BIER_TABLE:
2317     case FIB_PATH_TYPE_BIER_IMP:
2318     case FIB_PATH_TYPE_DVR:
2319         break;
2320     }
2321 }
2322
2323 void
2324 fib_path_contribute_forwarding (fib_node_index_t path_index,
2325                                 fib_forward_chain_type_t fct,
2326                                 dpo_id_t *dpo)
2327 {
2328     fib_path_t *path;
2329
2330     path = fib_path_get(path_index);
2331
2332     ASSERT(path);
2333     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2334
2335     /*
2336      * The DPO stored in the path was created when the path was resolved.
2337      * This then represents the path's 'native' protocol; IP.
2338      * For all others will need to go find something else.
2339      */
2340     if (fib_path_to_chain_type(path) == fct)
2341     {
2342         dpo_copy(dpo, &path->fp_dpo);
2343     }
2344     else
2345     {
2346         switch (path->fp_type)
2347         {
2348         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2349             switch (fct)
2350             {
2351             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2352             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2353             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2354             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2355             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2356             case FIB_FORW_CHAIN_TYPE_NSH:
2357             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2358             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2359             {
2360                 adj_index_t ai;
2361
2362                 /*
2363                  * get a appropriate link type adj.
2364                  */
2365                 ai = fib_path_attached_next_hop_get_adj(
2366                          path,
2367                          fib_forw_chain_type_to_link_type(fct));
2368                 dpo_set(dpo, DPO_ADJACENCY,
2369                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2370                 adj_unlock(ai);
2371
2372                 break;
2373             }
2374             case FIB_FORW_CHAIN_TYPE_BIER:
2375                 break;
2376             }
2377             break;
2378         case FIB_PATH_TYPE_RECURSIVE:
2379             switch (fct)
2380             {
2381             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2382             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2383             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2384             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2385             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2386             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2387             case FIB_FORW_CHAIN_TYPE_BIER:
2388                 fib_path_recursive_adj_update(path, fct, dpo);
2389                 break;
2390             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2391             case FIB_FORW_CHAIN_TYPE_NSH:
2392                 ASSERT(0);
2393                 break;
2394             }
2395             break;
2396         case FIB_PATH_TYPE_BIER_TABLE:
2397             switch (fct)
2398             {
2399             case FIB_FORW_CHAIN_TYPE_BIER:
2400                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2401                 break;
2402             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2403             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2404             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2405             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2406             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2407             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2408             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2409             case FIB_FORW_CHAIN_TYPE_NSH:
2410                 ASSERT(0);
2411                 break;
2412             }
2413             break;
2414         case FIB_PATH_TYPE_BIER_FMASK:
2415             switch (fct)
2416             {
2417             case FIB_FORW_CHAIN_TYPE_BIER:
2418                 fib_path_bier_fmask_update(path, dpo);
2419                 break;
2420             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2421             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2422             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2423             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2424             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2425             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2426             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2427             case FIB_FORW_CHAIN_TYPE_NSH:
2428                 ASSERT(0);
2429                 break;
2430             }
2431             break;
2432         case FIB_PATH_TYPE_BIER_IMP:
2433             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2434                                            fib_forw_chain_type_to_dpo_proto(fct),
2435                                            dpo);
2436             break;
2437         case FIB_PATH_TYPE_DEAG:
2438             switch (fct)
2439             {
2440             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2441                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2442                                                   DPO_PROTO_MPLS,
2443                                                   LOOKUP_UNICAST,
2444                                                   LOOKUP_INPUT_DST_ADDR,
2445                                                   LOOKUP_TABLE_FROM_CONFIG,
2446                                                   dpo);
2447                 break;
2448             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2449             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2450             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2451                 dpo_copy(dpo, &path->fp_dpo);
2452                 break;
2453             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2454             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2455             case FIB_FORW_CHAIN_TYPE_BIER:
2456                 break;
2457             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2458             case FIB_FORW_CHAIN_TYPE_NSH:
2459                 ASSERT(0);
2460                 break;
2461             }
2462             break;
2463         case FIB_PATH_TYPE_EXCLUSIVE:
2464             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2465             break;
2466         case FIB_PATH_TYPE_ATTACHED:
2467             switch (fct)
2468             {
2469             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2470             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2471             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2472             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2473             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2474             case FIB_FORW_CHAIN_TYPE_NSH:
2475             case FIB_FORW_CHAIN_TYPE_BIER:
2476                 {
2477                     adj_index_t ai;
2478
2479                     /*
2480                      * get a appropriate link type adj.
2481                      */
2482                     ai = fib_path_attached_get_adj(
2483                             path,
2484                             fib_forw_chain_type_to_link_type(fct));
2485                     dpo_set(dpo, DPO_ADJACENCY,
2486                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2487                     adj_unlock(ai);
2488                     break;
2489                 }
2490             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2491             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2492                 {
2493                     adj_index_t ai;
2494
2495                     /*
2496                      * Create the adj needed for sending IP multicast traffic
2497                      */
2498                     ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2499                                                fib_forw_chain_type_to_link_type(fct),
2500                                                path->attached.fp_interface);
2501                     dpo_set(dpo, DPO_ADJACENCY,
2502                             fib_forw_chain_type_to_dpo_proto(fct),
2503                             ai);
2504                     adj_unlock(ai);
2505                 }
2506                 break;
2507             }
2508             break;
2509         case FIB_PATH_TYPE_INTF_RX:
2510             /*
2511              * Create the adj needed for sending IP multicast traffic
2512              */
2513             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2514                                          path->attached.fp_interface,
2515                                          dpo);
2516             break;
2517         case FIB_PATH_TYPE_UDP_ENCAP:
2518             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2519                                             path->fp_nh_proto,
2520                                             dpo);
2521             break;
2522         case FIB_PATH_TYPE_RECEIVE:
2523         case FIB_PATH_TYPE_SPECIAL:
2524         case FIB_PATH_TYPE_DVR:
2525             dpo_copy(dpo, &path->fp_dpo);
2526             break;
2527         }
2528     }
2529 }
2530
2531 load_balance_path_t *
2532 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2533                                        fib_forward_chain_type_t fct,
2534                                        load_balance_path_t *hash_key)
2535 {
2536     load_balance_path_t *mnh;
2537     fib_path_t *path;
2538
2539     path = fib_path_get(path_index);
2540
2541     ASSERT(path);
2542
2543     vec_add2(hash_key, mnh, 1);
2544
2545     mnh->path_weight = path->fp_weight;
2546     mnh->path_index = path_index;
2547
2548     if (fib_path_is_resolved(path_index))
2549     {
2550         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2551     }
2552     else
2553     {
2554         dpo_copy(&mnh->path_dpo,
2555                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2556     }
2557     return (hash_key);
2558 }
2559
2560 int
2561 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2562 {
2563     fib_path_t *path;
2564
2565     path = fib_path_get(path_index);
2566
2567     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2568             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2569              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2570 }
2571
2572 int
2573 fib_path_is_exclusive (fib_node_index_t path_index)
2574 {
2575     fib_path_t *path;
2576
2577     path = fib_path_get(path_index);
2578
2579     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2580 }
2581
2582 int
2583 fib_path_is_deag (fib_node_index_t path_index)
2584 {
2585     fib_path_t *path;
2586
2587     path = fib_path_get(path_index);
2588
2589     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2590 }
2591
2592 int
2593 fib_path_is_resolved (fib_node_index_t path_index)
2594 {
2595     fib_path_t *path;
2596
2597     path = fib_path_get(path_index);
2598
2599     return (dpo_id_is_valid(&path->fp_dpo) &&
2600             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2601             !fib_path_is_looped(path_index) &&
2602             !fib_path_is_permanent_drop(path));
2603 }
2604
2605 int
2606 fib_path_is_looped (fib_node_index_t path_index)
2607 {
2608     fib_path_t *path;
2609
2610     path = fib_path_get(path_index);
2611
2612     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2613 }
2614
2615 fib_path_list_walk_rc_t
2616 fib_path_encode (fib_node_index_t path_list_index,
2617                  fib_node_index_t path_index,
2618                  const fib_path_ext_t *path_ext,
2619                  void *ctx)
2620 {
2621     fib_route_path_encode_t **api_rpaths = ctx;
2622     fib_route_path_encode_t *api_rpath;
2623     fib_path_t *path;
2624
2625     path = fib_path_get(path_index);
2626     if (!path)
2627       return (FIB_PATH_LIST_WALK_CONTINUE);
2628     vec_add2(*api_rpaths, api_rpath, 1);
2629     api_rpath->rpath.frp_weight = path->fp_weight;
2630     api_rpath->rpath.frp_preference = path->fp_preference;
2631     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2632     api_rpath->rpath.frp_sw_if_index = ~0;
2633     api_rpath->rpath.frp_fib_index = 0;
2634     api_rpath->dpo = path->fp_dpo;
2635
2636     switch (path->fp_type)
2637     {
2638       case FIB_PATH_TYPE_RECEIVE:
2639         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2640         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2641         break;
2642       case FIB_PATH_TYPE_ATTACHED:
2643         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2644         break;
2645       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2646         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2647         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2648         break;
2649       case FIB_PATH_TYPE_BIER_FMASK:
2650         api_rpath->rpath.frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2651         break;
2652       case FIB_PATH_TYPE_SPECIAL:
2653         break;
2654       case FIB_PATH_TYPE_DEAG:
2655         api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2656         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
2657         {
2658             api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_RPF_ID;
2659         }
2660         break;
2661       case FIB_PATH_TYPE_RECURSIVE:
2662         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2663         api_rpath->rpath.frp_fib_index = path->recursive.fp_tbl_id;
2664         break;
2665       case FIB_PATH_TYPE_DVR:
2666           api_rpath->rpath.frp_sw_if_index = path->dvr.fp_interface;
2667           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_DVR;
2668           break;
2669       case FIB_PATH_TYPE_UDP_ENCAP:
2670           api_rpath->rpath.frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2671           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2672           break;
2673       case FIB_PATH_TYPE_INTF_RX:
2674           api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2675           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_INTF_RX;
2676           break;
2677       default:
2678         break;
2679     }
2680
2681     if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS) 
2682     {
2683         api_rpath->rpath.frp_label_stack = path_ext->fpe_path.frp_label_stack;
2684     }
2685
2686     return (FIB_PATH_LIST_WALK_CONTINUE);
2687 }
2688
2689 dpo_proto_t
2690 fib_path_get_proto (fib_node_index_t path_index)
2691 {
2692     fib_path_t *path;
2693
2694     path = fib_path_get(path_index);
2695
2696     return (path->fp_nh_proto);
2697 }
2698
2699 void
2700 fib_path_module_init (void)
2701 {
2702     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2703     fib_path_logger = vlib_log_register_class ("fib", "path");
2704 }
2705
2706 static clib_error_t *
2707 show_fib_path_command (vlib_main_t * vm,
2708                         unformat_input_t * input,
2709                         vlib_cli_command_t * cmd)
2710 {
2711     fib_node_index_t pi;
2712     fib_path_t *path;
2713
2714     if (unformat (input, "%d", &pi))
2715     {
2716         /*
2717          * show one in detail
2718          */
2719         if (!pool_is_free_index(fib_path_pool, pi))
2720         {
2721             path = fib_path_get(pi);
2722             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2723                            FIB_PATH_FORMAT_FLAGS_NONE);
2724             s = format(s, "\n  children:");
2725             s = fib_node_children_format(path->fp_node.fn_children, s);
2726             vlib_cli_output (vm, "%s", s);
2727             vec_free(s);
2728         }
2729         else
2730         {
2731             vlib_cli_output (vm, "path %d invalid", pi);
2732         }
2733     }
2734     else
2735     {
2736         vlib_cli_output (vm, "FIB Paths");
2737         pool_foreach_index (pi, fib_path_pool,
2738         ({
2739             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2740                              FIB_PATH_FORMAT_FLAGS_NONE);
2741         }));
2742     }
2743
2744     return (NULL);
2745 }
2746
2747 VLIB_CLI_COMMAND (show_fib_path, static) = {
2748   .path = "show fib paths",
2749   .function = show_fib_path_command,
2750   .short_help = "show fib paths",
2751 };