BIER
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/l2_bridge_dpo.h>
27 #include <vnet/dpo/drop_dpo.h>
28
29 #include <vnet/adj/adj.h>
30 #include <vnet/adj/adj_mcast.h>
31
32 #include <vnet/fib/fib_path.h>
33 #include <vnet/fib/fib_node.h>
34 #include <vnet/fib/fib_table.h>
35 #include <vnet/fib/fib_entry.h>
36 #include <vnet/fib/fib_path_list.h>
37 #include <vnet/fib/fib_internal.h>
38 #include <vnet/fib/fib_urpf_list.h>
39 #include <vnet/fib/mpls_fib.h>
40 #include <vnet/udp/udp_encap.h>
41 #include <vnet/bier/bier_fmask.h>
42 #include <vnet/bier/bier_table.h>
43 #include <vnet/bier/bier_imp.h>
44
45 /**
46  * Enurmeration of path types
47  */
48 typedef enum fib_path_type_t_ {
49     /**
50      * Marker. Add new types after this one.
51      */
52     FIB_PATH_TYPE_FIRST = 0,
53     /**
54      * Attached-nexthop. An interface and a nexthop are known.
55      */
56     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
57     /**
58      * attached. Only the interface is known.
59      */
60     FIB_PATH_TYPE_ATTACHED,
61     /**
62      * recursive. Only the next-hop is known.
63      */
64     FIB_PATH_TYPE_RECURSIVE,
65     /**
66      * special. nothing is known. so we drop.
67      */
68     FIB_PATH_TYPE_SPECIAL,
69     /**
70      * exclusive. user provided adj.
71      */
72     FIB_PATH_TYPE_EXCLUSIVE,
73     /**
74      * deag. Link to a lookup adj in the next table
75      */
76     FIB_PATH_TYPE_DEAG,
77     /**
78      * interface receive.
79      */
80     FIB_PATH_TYPE_INTF_RX,
81     /**
82      * interface receive.
83      */
84     FIB_PATH_TYPE_UDP_ENCAP,
85     /**
86      * receive. it's for-us.
87      */
88     FIB_PATH_TYPE_RECEIVE,
89     /**
90      * bier-imp. it's via a BIER imposition.
91      */
92     FIB_PATH_TYPE_BIER_IMP,
93     /**
94      * bier-fmask. it's via a BIER ECMP-table.
95      */
96     FIB_PATH_TYPE_BIER_TABLE,
97     /**
98      * bier-fmask. it's via a BIER f-mask.
99      */
100     FIB_PATH_TYPE_BIER_FMASK,
101     /**
102      * Marker. Add new types before this one, then update it.
103      */
104     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
105 } __attribute__ ((packed)) fib_path_type_t;
106
107 /**
108  * The maximum number of path_types
109  */
110 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
111
112 #define FIB_PATH_TYPES {                                        \
113     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
114     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
115     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
116     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
117     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
118     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
119     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
120     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
121     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
122     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
123     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
124     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
125 }
126
127 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
128     for (_item = FIB_PATH_TYPE_FIRST;           \
129          _item <= FIB_PATH_TYPE_LAST;           \
130          _item++)
131
132 /**
133  * Enurmeration of path operational (i.e. derived) attributes
134  */
135 typedef enum fib_path_oper_attribute_t_ {
136     /**
137      * Marker. Add new types after this one.
138      */
139     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
140     /**
141      * The path forms part of a recursive loop.
142      */
143     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
144     /**
145      * The path is resolved
146      */
147     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
148     /**
149      * The path is attached, despite what the next-hop may say.
150      */
151     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
152     /**
153      * The path has become a permanent drop.
154      */
155     FIB_PATH_OPER_ATTRIBUTE_DROP,
156     /**
157      * Marker. Add new types before this one, then update it.
158      */
159     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
160 } __attribute__ ((packed)) fib_path_oper_attribute_t;
161
162 /**
163  * The maximum number of path operational attributes
164  */
165 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
166
167 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
168     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
169     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
170     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
171 }
172
173 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
174     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
175          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
176          _item++)
177
178 /**
179  * Path flags from the attributes
180  */
181 typedef enum fib_path_oper_flags_t_ {
182     FIB_PATH_OPER_FLAG_NONE = 0,
183     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
184     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
185     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
186     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
187 } __attribute__ ((packed)) fib_path_oper_flags_t;
188
189 /**
190  * A FIB path
191  */
192 typedef struct fib_path_t_ {
193     /**
194      * A path is a node in the FIB graph.
195      */
196     fib_node_t fp_node;
197
198     /**
199      * The index of the path-list to which this path belongs
200      */
201     u32 fp_pl_index;
202
203     /**
204      * This marks the start of the memory area used to hash
205      * the path
206      */
207     STRUCT_MARK(path_hash_start);
208
209     /**
210      * Configuration Flags
211      */
212     fib_path_cfg_flags_t fp_cfg_flags;
213
214     /**
215      * The type of the path. This is the selector for the union
216      */
217     fib_path_type_t fp_type;
218
219     /**
220      * The protocol of the next-hop, i.e. the address family of the
221      * next-hop's address. We can't derive this from the address itself
222      * since the address can be all zeros
223      */
224     dpo_proto_t fp_nh_proto;
225
226     /**
227      * UCMP [unnormalised] weigth
228      */
229     u8 fp_weight;
230
231     /**
232      * A path preference. 0 is the best.
233      * Only paths of the best preference, that are 'up', are considered
234      * for forwarding.
235      */
236     u8 fp_preference;
237
238     /**
239      * per-type union of the data required to resolve the path
240      */
241     union {
242         struct {
243             /**
244              * The next-hop
245              */
246             ip46_address_t fp_nh;
247             /**
248              * The interface
249              */
250             u32 fp_interface;
251         } attached_next_hop;
252         struct {
253             /**
254              * The interface
255              */
256             u32 fp_interface;
257         } attached;
258         struct {
259             union
260             {
261                 /**
262                  * The next-hop
263                  */
264                 ip46_address_t fp_ip;
265                 struct {
266                     /**
267                      * The local label to resolve through.
268                      */
269                     mpls_label_t fp_local_label;
270                     /**
271                      * The EOS bit of the resolving label
272                      */
273                     mpls_eos_bit_t fp_eos;
274                 };
275             } fp_nh;
276             union {
277                 /**
278                  * The FIB table index in which to find the next-hop.
279                  */
280                 fib_node_index_t fp_tbl_id;
281                 /**
282                  * The BIER FIB the fmask is in
283                  */
284                 index_t fp_bier_fib;
285             };
286         } recursive;
287         struct {
288             /**
289              * The next-hop
290              */
291             ip46_address_t fp_nh;
292             /**
293              * The BIER FIB the fmask is in
294              */
295             index_t fp_bier_fib;
296         } bier_fmask;
297         struct {
298             /**
299              * The BIER table's ID
300              */
301             bier_table_id_t fp_bier_tbl;
302         } bier_table;
303         struct {
304             /**
305              * The BIER imposition object
306              * this is part of the path's key, since the index_t
307              * of an imposition object is the object's key.
308              */
309             index_t fp_bier_imp;
310         } bier_imp;
311         struct {
312             /**
313              * The FIB index in which to perfom the next lookup
314              */
315             fib_node_index_t fp_tbl_id;
316             /**
317              * The RPF-ID to tag the packets with
318              */
319             fib_rpf_id_t fp_rpf_id;
320         } deag;
321         struct {
322         } special;
323         struct {
324             /**
325              * The user provided 'exclusive' DPO
326              */
327             dpo_id_t fp_ex_dpo;
328         } exclusive;
329         struct {
330             /**
331              * The interface on which the local address is configured
332              */
333             u32 fp_interface;
334             /**
335              * The next-hop
336              */
337             ip46_address_t fp_addr;
338         } receive;
339         struct {
340             /**
341              * The interface on which the packets will be input.
342              */
343             u32 fp_interface;
344         } intf_rx;
345         struct {
346             /**
347              * The UDP Encap object this path resolves through
348              */
349             u32 fp_udp_encap_id;
350         } udp_encap;
351     };
352     STRUCT_MARK(path_hash_end);
353
354     /**
355      * Memebers in this last section represent information that is
356      * dervied during resolution. It should not be copied to new paths
357      * nor compared.
358      */
359
360     /**
361      * Operational Flags
362      */
363     fib_path_oper_flags_t fp_oper_flags;
364
365     union {
366         /**
367          * the resolving via fib. not part of the union, since it it not part
368          * of the path's hash.
369          */
370         fib_node_index_t fp_via_fib;
371         /**
372          * the resolving bier-fmask
373          */
374         index_t fp_via_bier_fmask;
375         /**
376          * the resolving bier-table
377          */
378         index_t fp_via_bier_tbl;
379     };
380
381     /**
382      * The Data-path objects through which this path resolves for IP.
383      */
384     dpo_id_t fp_dpo;
385
386     /**
387      * the index of this path in the parent's child list.
388      */
389     u32 fp_sibling;
390 } fib_path_t;
391
392 /*
393  * Array of strings/names for the path types and attributes
394  */
395 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
396 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
397 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
398
399 /*
400  * The memory pool from which we allocate all the paths
401  */
402 static fib_path_t *fib_path_pool;
403
404 /*
405  * Debug macro
406  */
407 #ifdef FIB_DEBUG
408 #define FIB_PATH_DBG(_p, _fmt, _args...)                        \
409 {                                                               \
410     u8 *_tmp = NULL;                                            \
411     _tmp = fib_path_format(fib_path_get_index(_p), _tmp);       \
412     clib_warning("path:[%d:%s]:" _fmt,                          \
413                  fib_path_get_index(_p), _tmp,                  \
414                  ##_args);                                      \
415     vec_free(_tmp);                                             \
416 }
417 #else
418 #define FIB_PATH_DBG(_p, _fmt, _args...)
419 #endif
420
421 static fib_path_t *
422 fib_path_get (fib_node_index_t index)
423 {
424     return (pool_elt_at_index(fib_path_pool, index));
425 }
426
427 static fib_node_index_t 
428 fib_path_get_index (fib_path_t *path)
429 {
430     return (path - fib_path_pool);
431 }
432
433 static fib_node_t *
434 fib_path_get_node (fib_node_index_t index)
435 {
436     return ((fib_node_t*)fib_path_get(index));
437 }
438
439 static fib_path_t*
440 fib_path_from_fib_node (fib_node_t *node)
441 {
442 #if CLIB_DEBUG > 0
443     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
444 #endif
445     return ((fib_path_t*)node);
446 }
447
448 u8 *
449 format_fib_path (u8 * s, va_list * args)
450 {
451     fib_path_t *path = va_arg (*args, fib_path_t *);
452     vnet_main_t * vnm = vnet_get_main();
453     fib_path_oper_attribute_t oattr;
454     fib_path_cfg_attribute_t cattr;
455
456     s = format (s, "      index:%d ", fib_path_get_index(path));
457     s = format (s, "pl-index:%d ", path->fp_pl_index);
458     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
459     s = format (s, "weight=%d ", path->fp_weight);
460     s = format (s, "pref=%d ", path->fp_preference);
461     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
462     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
463         s = format(s, " oper-flags:");
464         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
465             if ((1<<oattr) & path->fp_oper_flags) {
466                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
467             }
468         }
469     }
470     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
471         s = format(s, " cfg-flags:");
472         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
473             if ((1<<cattr) & path->fp_cfg_flags) {
474                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
475             }
476         }
477     }
478     s = format(s, "\n       ");
479
480     switch (path->fp_type)
481     {
482     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
483         s = format (s, "%U", format_ip46_address,
484                     &path->attached_next_hop.fp_nh,
485                     IP46_TYPE_ANY);
486         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
487         {
488             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
489         }
490         else
491         {
492             s = format (s, " %U",
493                         format_vnet_sw_interface_name,
494                         vnm,
495                         vnet_get_sw_interface(
496                             vnm,
497                             path->attached_next_hop.fp_interface));
498             if (vnet_sw_interface_is_p2p(vnet_get_main(),
499                                          path->attached_next_hop.fp_interface))
500             {
501                 s = format (s, " (p2p)");
502             }
503         }
504         if (!dpo_id_is_valid(&path->fp_dpo))
505         {
506             s = format(s, "\n          unresolved");
507         }
508         else
509         {
510             s = format(s, "\n          %U",
511                        format_dpo_id,
512                        &path->fp_dpo, 13);
513         }
514         break;
515     case FIB_PATH_TYPE_ATTACHED:
516         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
517         {
518             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
519         }
520         else
521         {
522             s = format (s, " %U",
523                         format_vnet_sw_interface_name,
524                         vnm,
525                         vnet_get_sw_interface(
526                             vnm,
527                             path->attached.fp_interface));
528         }
529         break;
530     case FIB_PATH_TYPE_RECURSIVE:
531         if (DPO_PROTO_MPLS == path->fp_nh_proto)
532         {
533             s = format (s, "via %U %U",
534                         format_mpls_unicast_label,
535                         path->recursive.fp_nh.fp_local_label,
536                         format_mpls_eos_bit,
537                         path->recursive.fp_nh.fp_eos);
538         }
539         else
540         {
541             s = format (s, "via %U",
542                         format_ip46_address,
543                         &path->recursive.fp_nh.fp_ip,
544                         IP46_TYPE_ANY);
545         }
546         s = format (s, " in fib:%d",
547                     path->recursive.fp_tbl_id,
548                     path->fp_via_fib); 
549         s = format (s, " via-fib:%d", path->fp_via_fib); 
550         s = format (s, " via-dpo:[%U:%d]",
551                     format_dpo_type, path->fp_dpo.dpoi_type, 
552                     path->fp_dpo.dpoi_index);
553
554         break;
555     case FIB_PATH_TYPE_UDP_ENCAP:
556         s = format (s, " UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
557         break;
558     case FIB_PATH_TYPE_BIER_TABLE:
559         s = format (s, "via bier-table:[%U}",
560                     format_bier_table_id,
561                     &path->bier_table.fp_bier_tbl);
562         s = format (s, " via-dpo:[%U:%d]",
563                     format_dpo_type, path->fp_dpo.dpoi_type,
564                     path->fp_dpo.dpoi_index);
565         break;
566     case FIB_PATH_TYPE_BIER_FMASK:
567         s = format (s, "via %U",
568                     format_ip46_address,
569                     &path->bier_fmask.fp_nh,
570                     IP46_TYPE_ANY);
571         s = format (s, " in BIER-fib:%d",
572                     path->bier_fmask.fp_bier_fib,
573                     path->fp_via_fib); 
574         s = format (s, " via-fmask:%d", path->fp_via_bier_fmask); 
575         s = format (s, " via-dpo:[%U:%d]",
576                     format_dpo_type, path->fp_dpo.dpoi_type, 
577                     path->fp_dpo.dpoi_index);
578         break;
579     case FIB_PATH_TYPE_BIER_IMP:
580         s = format (s, "via %U", format_bier_imp,
581                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
582         break;
583     case FIB_PATH_TYPE_RECEIVE:
584     case FIB_PATH_TYPE_INTF_RX:
585     case FIB_PATH_TYPE_SPECIAL:
586     case FIB_PATH_TYPE_DEAG:
587     case FIB_PATH_TYPE_EXCLUSIVE:
588         if (dpo_id_is_valid(&path->fp_dpo))
589         {
590             s = format(s, "%U", format_dpo_id,
591                        &path->fp_dpo, 2);
592         }
593         break;
594     }
595     return (s);
596 }
597
598 u8 *
599 fib_path_format (fib_node_index_t pi, u8 *s)
600 {
601     fib_path_t *path;
602
603     path = fib_path_get(pi);
604     ASSERT(NULL != path);
605
606     return (format (s, "%U", format_fib_path, path));
607 }
608
609 u8 *
610 fib_path_adj_format (fib_node_index_t pi,
611                      u32 indent,
612                      u8 *s)
613 {
614     fib_path_t *path;
615
616     path = fib_path_get(pi);
617     ASSERT(NULL != path);
618
619     if (!dpo_id_is_valid(&path->fp_dpo))
620     {
621         s = format(s, " unresolved");
622     }
623     else
624     {
625         s = format(s, "%U", format_dpo_id,
626                    &path->fp_dpo, 2);
627     }
628
629     return (s);
630 }
631
632 /*
633  * fib_path_last_lock_gone
634  *
635  * We don't share paths, we share path lists, so the [un]lock functions
636  * are no-ops
637  */
638 static void
639 fib_path_last_lock_gone (fib_node_t *node)
640 {
641     ASSERT(0);
642 }
643
644 static const adj_index_t
645 fib_path_attached_next_hop_get_adj (fib_path_t *path,
646                                     vnet_link_t link)
647 {
648     if (vnet_sw_interface_is_p2p(vnet_get_main(),
649                                  path->attached_next_hop.fp_interface))
650     {
651         /*
652          * if the interface is p2p then the adj for the specific
653          * neighbour on that link will never exist. on p2p links
654          * the subnet address (the attached route) links to the
655          * auto-adj (see below), we want that adj here too.
656          */
657         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
658                                     link,
659                                     &zero_addr,
660                                     path->attached_next_hop.fp_interface));
661     }
662     else
663     {
664         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
665                                     link,
666                                     &path->attached_next_hop.fp_nh,
667                                     path->attached_next_hop.fp_interface));
668     }
669 }
670
671 static void
672 fib_path_attached_next_hop_set (fib_path_t *path)
673 {
674     /*
675      * resolve directly via the adjacnecy discribed by the
676      * interface and next-hop
677      */
678     dpo_set(&path->fp_dpo,
679             DPO_ADJACENCY,
680             path->fp_nh_proto,
681             fib_path_attached_next_hop_get_adj(
682                  path,
683                  dpo_proto_to_link(path->fp_nh_proto)));
684
685     /*
686      * become a child of the adjacency so we receive updates
687      * when its rewrite changes
688      */
689     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
690                                      FIB_NODE_TYPE_PATH,
691                                      fib_path_get_index(path));
692
693     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
694                                       path->attached_next_hop.fp_interface) ||
695         !adj_is_up(path->fp_dpo.dpoi_index))
696     {
697         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
698     }
699 }
700
701 static const adj_index_t
702 fib_path_attached_get_adj (fib_path_t *path,
703                            vnet_link_t link)
704 {
705     if (vnet_sw_interface_is_p2p(vnet_get_main(),
706                                  path->attached.fp_interface))
707     {
708         /*
709          * point-2-point interfaces do not require a glean, since
710          * there is nothing to ARP. Install a rewrite/nbr adj instead
711          */
712         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
713                                     link,
714                                     &zero_addr,
715                                     path->attached.fp_interface));
716     }
717     else
718     {
719         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
720                                       path->attached.fp_interface,
721                                       NULL));
722     }
723 }
724
725 /*
726  * create of update the paths recursive adj
727  */
728 static void
729 fib_path_recursive_adj_update (fib_path_t *path,
730                                fib_forward_chain_type_t fct,
731                                dpo_id_t *dpo)
732 {
733     dpo_id_t via_dpo = DPO_INVALID;
734
735     /*
736      * get the DPO to resolve through from the via-entry
737      */
738     fib_entry_contribute_forwarding(path->fp_via_fib,
739                                     fct,
740                                     &via_dpo);
741
742
743     /*
744      * hope for the best - clear if restrictions apply.
745      */
746     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
747
748     /*
749      * Validate any recursion constraints and over-ride the via
750      * adj if not met
751      */
752     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
753     {
754         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
755         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
756     }
757     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
758     {
759         /*
760          * the via FIB must be a host route.
761          * note the via FIB just added will always be a host route
762          * since it is an RR source added host route. So what we need to
763          * check is whether the route has other sources. If it does then
764          * some other source has added it as a host route. If it doesn't
765          * then it was added only here and inherits forwarding from a cover.
766          * the cover is not a host route.
767          * The RR source is the lowest priority source, so we check if it
768          * is the best. if it is there are no other sources.
769          */
770         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
771         {
772             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
773             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
774
775             /*
776              * PIC edge trigger. let the load-balance maps know
777              */
778             load_balance_map_path_state_change(fib_path_get_index(path));
779         }
780     }
781     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
782     {
783         /*
784          * RR source entries inherit the flags from the cover, so
785          * we can check the via directly
786          */
787         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
788         {
789             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
790             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
791
792             /*
793              * PIC edge trigger. let the load-balance maps know
794              */
795             load_balance_map_path_state_change(fib_path_get_index(path));
796         }
797     }
798     /*
799      * check for over-riding factors on the FIB entry itself
800      */
801     if (!fib_entry_is_resolved(path->fp_via_fib))
802     {
803         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
804         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
805
806         /*
807          * PIC edge trigger. let the load-balance maps know
808          */
809         load_balance_map_path_state_change(fib_path_get_index(path));
810     }
811
812     /*
813      * If this path is contributing a drop, then it's not resolved
814      */
815     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
816     {
817         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
818     }
819
820     /*
821      * update the path's contributed DPO
822      */
823     dpo_copy(dpo, &via_dpo);
824
825     FIB_PATH_DBG(path, "recursive update:");
826
827     dpo_reset(&via_dpo);
828 }
829
830 /*
831  * re-evaulate the forwarding state for a via fmask path
832  */
833 static void
834 fib_path_bier_fmask_update (fib_path_t *path,
835                             dpo_id_t *dpo)
836 {
837     bier_fmask_contribute_forwarding(path->fp_via_bier_fmask, dpo);
838
839     /*
840      * if we are stakcing on the drop, then the path is not resolved
841      */
842     if (dpo_is_drop(dpo))
843     {
844         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
845     }
846     else
847     {
848         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
849     }
850 }
851
852 /*
853  * fib_path_is_permanent_drop
854  *
855  * Return !0 if the path is configured to permanently drop,
856  * despite other attributes.
857  */
858 static int
859 fib_path_is_permanent_drop (fib_path_t *path)
860 {
861     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
862             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
863 }
864
865 /*
866  * fib_path_unresolve
867  *
868  * Remove our dependency on the resolution target
869  */
870 static void
871 fib_path_unresolve (fib_path_t *path)
872 {
873     /*
874      * the forced drop path does not need unresolving
875      */
876     if (fib_path_is_permanent_drop(path))
877     {
878         return;
879     }
880
881     switch (path->fp_type)
882     {
883     case FIB_PATH_TYPE_RECURSIVE:
884         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
885         {
886             fib_prefix_t pfx;
887
888             fib_entry_get_prefix(path->fp_via_fib, &pfx);
889             fib_entry_child_remove(path->fp_via_fib,
890                                    path->fp_sibling);
891             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
892                                            &pfx,
893                                            FIB_SOURCE_RR);
894             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
895         }
896         break;
897     case FIB_PATH_TYPE_BIER_FMASK:
898         if (FIB_NODE_INDEX_INVALID != path->fp_via_bier_fmask)
899         {
900             bier_fmask_child_remove(path->fp_via_bier_fmask,
901                                     path->fp_sibling);
902             path->fp_via_bier_fmask = FIB_NODE_INDEX_INVALID;
903         }
904         break;
905     case FIB_PATH_TYPE_BIER_IMP:
906         bier_imp_unlock(path->fp_dpo.dpoi_index);
907         break;
908     case FIB_PATH_TYPE_BIER_TABLE:
909         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
910         break;
911     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
912         adj_child_remove(path->fp_dpo.dpoi_index,
913                          path->fp_sibling);
914         adj_unlock(path->fp_dpo.dpoi_index);
915         break;
916     case FIB_PATH_TYPE_ATTACHED:
917         if (DPO_PROTO_ETHERNET != path->fp_nh_proto)
918         {
919             adj_child_remove(path->fp_dpo.dpoi_index,
920                              path->fp_sibling);
921             adj_unlock(path->fp_dpo.dpoi_index);
922         }
923         break;
924     case FIB_PATH_TYPE_UDP_ENCAP:
925         udp_encap_unlock_w_index(path->fp_dpo.dpoi_index);
926         break;
927     case FIB_PATH_TYPE_EXCLUSIVE:
928         dpo_reset(&path->exclusive.fp_ex_dpo);
929         break;
930     case FIB_PATH_TYPE_SPECIAL:
931     case FIB_PATH_TYPE_RECEIVE:
932     case FIB_PATH_TYPE_INTF_RX:
933     case FIB_PATH_TYPE_DEAG:
934         /*
935          * these hold only the path's DPO, which is reset below.
936          */
937         break;
938     }
939
940     /*
941      * release the adj we were holding and pick up the
942      * drop just in case.
943      */
944     dpo_reset(&path->fp_dpo);
945     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
946
947     return;
948 }
949
950 static fib_forward_chain_type_t
951 fib_path_to_chain_type (const fib_path_t *path)
952 {
953     if (DPO_PROTO_MPLS == path->fp_nh_proto)
954     {
955         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
956             MPLS_EOS == path->recursive.fp_nh.fp_eos)
957         {
958             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
959         }
960         else
961         {
962             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
963         }
964     }
965     else
966     {
967         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
968     }
969 }
970
971 /*
972  * fib_path_back_walk_notify
973  *
974  * A back walk has reach this path.
975  */
976 static fib_node_back_walk_rc_t
977 fib_path_back_walk_notify (fib_node_t *node,
978                            fib_node_back_walk_ctx_t *ctx)
979 {
980     fib_path_t *path;
981
982     path = fib_path_from_fib_node(node);
983
984     switch (path->fp_type)
985     {
986     case FIB_PATH_TYPE_RECURSIVE:
987         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
988         {
989             /*
990              * modify the recursive adjacency to use the new forwarding
991              * of the via-fib.
992              * this update is visible to packets in flight in the DP.
993              */
994             fib_path_recursive_adj_update(
995                 path,
996                 fib_path_to_chain_type(path),
997                 &path->fp_dpo);
998         }
999         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1000             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1001         {
1002             /*
1003              * ADJ updates (complete<->incomplete) do not need to propagate to
1004              * recursive entries.
1005              * The only reason its needed as far back as here, is that the adj
1006              * and the incomplete adj are a different DPO type, so the LBs need
1007              * to re-stack.
1008              * If this walk was quashed in the fib_entry, then any non-fib_path
1009              * children (like tunnels that collapse out the LB when they stack)
1010              * would not see the update.
1011              */
1012             return (FIB_NODE_BACK_WALK_CONTINUE);
1013         }
1014         break;
1015     case FIB_PATH_TYPE_BIER_FMASK:
1016         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1017         {
1018             /*
1019              * update to use the BIER fmask's new forwading
1020              */
1021             fib_path_bier_fmask_update(path, &path->fp_dpo);
1022         }
1023         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1024             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1025         {
1026             /*
1027              * ADJ updates (complete<->incomplete) do not need to propagate to
1028              * recursive entries.
1029              * The only reason its needed as far back as here, is that the adj
1030              * and the incomplete adj are a different DPO type, so the LBs need
1031              * to re-stack.
1032              * If this walk was quashed in the fib_entry, then any non-fib_path
1033              * children (like tunnels that collapse out the LB when they stack)
1034              * would not see the update.
1035              */
1036             return (FIB_NODE_BACK_WALK_CONTINUE);
1037         }
1038         break;
1039     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1040         /*
1041 FIXME comment
1042          * ADJ_UPDATE backwalk pass silently through here and up to
1043          * the path-list when the multipath adj collapse occurs.
1044          * The reason we do this is that the assumtption is that VPP
1045          * runs in an environment where the Control-Plane is remote
1046          * and hence reacts slowly to link up down. In order to remove
1047          * this down link from the ECMP set quickly, we back-walk.
1048          * VPP also has dedicated CPUs, so we are not stealing resources
1049          * from the CP to do so.
1050          */
1051         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1052         {
1053             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1054             {
1055                 /*
1056                  * alreday resolved. no need to walk back again
1057                  */
1058                 return (FIB_NODE_BACK_WALK_CONTINUE);
1059             }
1060             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1061         }
1062         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1063         {
1064             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1065             {
1066                 /*
1067                  * alreday unresolved. no need to walk back again
1068                  */
1069                 return (FIB_NODE_BACK_WALK_CONTINUE);
1070             }
1071             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1072         }
1073         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1074         {
1075             /*
1076              * The interface this path resolves through has been deleted.
1077              * This will leave the path in a permanent drop state. The route
1078              * needs to be removed and readded (and hence the path-list deleted)
1079              * before it can forward again.
1080              */
1081             fib_path_unresolve(path);
1082             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1083         }
1084         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1085         {
1086             /*
1087              * restack the DPO to pick up the correct DPO sub-type
1088              */
1089             uword if_is_up;
1090             adj_index_t ai;
1091
1092             if_is_up = vnet_sw_interface_is_admin_up(
1093                            vnet_get_main(),
1094                            path->attached_next_hop.fp_interface);
1095
1096             ai = fib_path_attached_next_hop_get_adj(
1097                      path,
1098                      dpo_proto_to_link(path->fp_nh_proto));
1099
1100             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1101             if (if_is_up && adj_is_up(ai))
1102             {
1103                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1104             }
1105
1106             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1107             adj_unlock(ai);
1108
1109             if (!if_is_up)
1110             {
1111                 /*
1112                  * If the interface is not up there is no reason to walk
1113                  * back to children. if we did they would only evalute
1114                  * that this path is unresolved and hence it would
1115                  * not contribute the adjacency - so it would be wasted
1116                  * CPU time.
1117                  */
1118                 return (FIB_NODE_BACK_WALK_CONTINUE);
1119             }
1120         }
1121         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1122         {
1123             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1124             {
1125                 /*
1126                  * alreday unresolved. no need to walk back again
1127                  */
1128                 return (FIB_NODE_BACK_WALK_CONTINUE);
1129             }
1130             /*
1131              * the adj has gone down. the path is no longer resolved.
1132              */
1133             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1134         }
1135         break;
1136     case FIB_PATH_TYPE_ATTACHED:
1137         /*
1138          * FIXME; this could schedule a lower priority walk, since attached
1139          * routes are not usually in ECMP configurations so the backwalk to
1140          * the FIB entry does not need to be high priority
1141          */
1142         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1143         {
1144             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1145         }
1146         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1147         {
1148             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1149         }
1150         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1151         {
1152             fib_path_unresolve(path);
1153             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1154         }
1155         break;
1156     case FIB_PATH_TYPE_UDP_ENCAP:
1157     {
1158         dpo_id_t via_dpo = DPO_INVALID;
1159
1160         /*
1161          * hope for the best - clear if restrictions apply.
1162          */
1163         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1164
1165         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1166                                         path->fp_nh_proto,
1167                                         &via_dpo);
1168         /*
1169          * If this path is contributing a drop, then it's not resolved
1170          */
1171         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1172         {
1173             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1174         }
1175
1176         /*
1177          * update the path's contributed DPO
1178          */
1179         dpo_copy(&path->fp_dpo, &via_dpo);
1180         dpo_reset(&via_dpo);
1181         break;
1182     }
1183     case FIB_PATH_TYPE_INTF_RX:
1184         ASSERT(0);
1185     case FIB_PATH_TYPE_DEAG:
1186         /*
1187          * FIXME When VRF delete is allowed this will need a poke.
1188          */
1189     case FIB_PATH_TYPE_SPECIAL:
1190     case FIB_PATH_TYPE_RECEIVE:
1191     case FIB_PATH_TYPE_EXCLUSIVE:
1192     case FIB_PATH_TYPE_BIER_TABLE:
1193     case FIB_PATH_TYPE_BIER_IMP:
1194         /*
1195          * these path types have no parents. so to be
1196          * walked from one is unexpected.
1197          */
1198         ASSERT(0);
1199         break;
1200     }
1201
1202     /*
1203      * propagate the backwalk further to the path-list
1204      */
1205     fib_path_list_back_walk(path->fp_pl_index, ctx);
1206
1207     return (FIB_NODE_BACK_WALK_CONTINUE);
1208 }
1209
1210 static void
1211 fib_path_memory_show (void)
1212 {
1213     fib_show_memory_usage("Path",
1214                           pool_elts(fib_path_pool),
1215                           pool_len(fib_path_pool),
1216                           sizeof(fib_path_t));
1217 }
1218
1219 /*
1220  * The FIB path's graph node virtual function table
1221  */
1222 static const fib_node_vft_t fib_path_vft = {
1223     .fnv_get = fib_path_get_node,
1224     .fnv_last_lock = fib_path_last_lock_gone,
1225     .fnv_back_walk = fib_path_back_walk_notify,
1226     .fnv_mem_show = fib_path_memory_show,
1227 };
1228
1229 static fib_path_cfg_flags_t
1230 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1231 {
1232     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1233
1234     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1235         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1236     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1237         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1238     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1239         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1240     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1241         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1242     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1243         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1244     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1245         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1246     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1247         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1248     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1249         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1250     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1251         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1252
1253     return (cfg_flags);
1254 }
1255
1256 /*
1257  * fib_path_create
1258  *
1259  * Create and initialise a new path object.
1260  * return the index of the path.
1261  */
1262 fib_node_index_t
1263 fib_path_create (fib_node_index_t pl_index,
1264                  const fib_route_path_t *rpath)
1265 {
1266     fib_path_t *path;
1267
1268     pool_get(fib_path_pool, path);
1269     memset(path, 0, sizeof(*path));
1270
1271     fib_node_init(&path->fp_node,
1272                   FIB_NODE_TYPE_PATH);
1273
1274     dpo_reset(&path->fp_dpo);
1275     path->fp_pl_index = pl_index;
1276     path->fp_nh_proto = rpath->frp_proto;
1277     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1278     path->fp_weight = rpath->frp_weight;
1279     if (0 == path->fp_weight)
1280     {
1281         /*
1282          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1283          * clients to always use 1, or we can accept it and fixup approrpiately.
1284          */
1285         path->fp_weight = 1;
1286     }
1287     path->fp_preference = rpath->frp_preference;
1288     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1289
1290     /*
1291      * deduce the path's tpye from the parementers and save what is needed.
1292      */
1293     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1294     {
1295         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1296         path->receive.fp_interface = rpath->frp_sw_if_index;
1297         path->receive.fp_addr = rpath->frp_addr;
1298     }
1299     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1300     {
1301         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1302         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1303     }
1304     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1305     {
1306         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1307         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1308     }
1309     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1310     {
1311         path->fp_type = FIB_PATH_TYPE_DEAG;
1312         path->deag.fp_tbl_id = rpath->frp_fib_index;
1313         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1314     }
1315     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1316     {
1317         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1318         path->bier_fmask.fp_nh = rpath->frp_addr;
1319         path->bier_fmask.fp_bier_fib = rpath->frp_bier_fib_index;
1320     }
1321     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1322     {
1323         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1324         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1325     }
1326     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1327     {
1328         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1329         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1330     }
1331     else if (~0 != rpath->frp_sw_if_index)
1332     {
1333         if (ip46_address_is_zero(&rpath->frp_addr))
1334         {
1335             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1336             path->attached.fp_interface = rpath->frp_sw_if_index;
1337         }
1338         else
1339         {
1340             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1341             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1342             path->attached_next_hop.fp_nh = rpath->frp_addr;
1343         }
1344     }
1345     else
1346     {
1347         if (ip46_address_is_zero(&rpath->frp_addr))
1348         {
1349             if (~0 == rpath->frp_fib_index)
1350             {
1351                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1352             }
1353             else
1354             {
1355                 path->fp_type = FIB_PATH_TYPE_DEAG;
1356                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1357             }           
1358         }
1359         else
1360         {
1361             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1362             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1363             {
1364                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1365                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1366             }
1367             else
1368             {
1369                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1370             }
1371             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1372         }
1373     }
1374
1375     FIB_PATH_DBG(path, "create");
1376
1377     return (fib_path_get_index(path));
1378 }
1379
1380 /*
1381  * fib_path_create_special
1382  *
1383  * Create and initialise a new path object.
1384  * return the index of the path.
1385  */
1386 fib_node_index_t
1387 fib_path_create_special (fib_node_index_t pl_index,
1388                          dpo_proto_t nh_proto,
1389                          fib_path_cfg_flags_t flags,
1390                          const dpo_id_t *dpo)
1391 {
1392     fib_path_t *path;
1393
1394     pool_get(fib_path_pool, path);
1395     memset(path, 0, sizeof(*path));
1396
1397     fib_node_init(&path->fp_node,
1398                   FIB_NODE_TYPE_PATH);
1399     dpo_reset(&path->fp_dpo);
1400
1401     path->fp_pl_index = pl_index;
1402     path->fp_weight = 1;
1403     path->fp_preference = 0;
1404     path->fp_nh_proto = nh_proto;
1405     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1406     path->fp_cfg_flags = flags;
1407
1408     if (FIB_PATH_CFG_FLAG_DROP & flags)
1409     {
1410         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1411     }
1412     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1413     {
1414         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1415         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1416     }
1417     else
1418     {
1419         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1420         ASSERT(NULL != dpo);
1421         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1422     }
1423
1424     return (fib_path_get_index(path));
1425 }
1426
1427 /*
1428  * fib_path_copy
1429  *
1430  * Copy a path. return index of new path.
1431  */
1432 fib_node_index_t
1433 fib_path_copy (fib_node_index_t path_index,
1434                fib_node_index_t path_list_index)
1435 {
1436     fib_path_t *path, *orig_path;
1437
1438     pool_get(fib_path_pool, path);
1439
1440     orig_path = fib_path_get(path_index);
1441     ASSERT(NULL != orig_path);
1442
1443     memcpy(path, orig_path, sizeof(*path));
1444
1445     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1446
1447     /*
1448      * reset the dynamic section
1449      */
1450     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1451     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1452     path->fp_pl_index  = path_list_index;
1453     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1454     memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1455     dpo_reset(&path->fp_dpo);
1456
1457     return (fib_path_get_index(path));
1458 }
1459
1460 /*
1461  * fib_path_destroy
1462  *
1463  * destroy a path that is no longer required
1464  */
1465 void
1466 fib_path_destroy (fib_node_index_t path_index)
1467 {
1468     fib_path_t *path;
1469
1470     path = fib_path_get(path_index);
1471
1472     ASSERT(NULL != path);
1473     FIB_PATH_DBG(path, "destroy");
1474
1475     fib_path_unresolve(path);
1476
1477     fib_node_deinit(&path->fp_node);
1478     pool_put(fib_path_pool, path);
1479 }
1480
1481 /*
1482  * fib_path_destroy
1483  *
1484  * destroy a path that is no longer required
1485  */
1486 uword
1487 fib_path_hash (fib_node_index_t path_index)
1488 {
1489     fib_path_t *path;
1490
1491     path = fib_path_get(path_index);
1492
1493     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1494                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1495                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1496                         0));
1497 }
1498
1499 /*
1500  * fib_path_cmp_i
1501  *
1502  * Compare two paths for equivalence.
1503  */
1504 static int
1505 fib_path_cmp_i (const fib_path_t *path1,
1506                 const fib_path_t *path2)
1507 {
1508     int res;
1509
1510     res = 1;
1511
1512     /*
1513      * paths of different types and protocol are not equal.
1514      * different weights and/or preference only are the same path.
1515      */
1516     if (path1->fp_type != path2->fp_type)
1517     {
1518         res = (path1->fp_type - path2->fp_type);
1519     }
1520     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1521     {
1522         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1523     }
1524     else
1525     {
1526         /*
1527          * both paths are of the same type.
1528          * consider each type and its attributes in turn.
1529          */
1530         switch (path1->fp_type)
1531         {
1532         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1533             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1534                                    &path2->attached_next_hop.fp_nh);
1535             if (0 == res) {
1536                 res = (path1->attached_next_hop.fp_interface -
1537                        path2->attached_next_hop.fp_interface);
1538             }
1539             break;
1540         case FIB_PATH_TYPE_ATTACHED:
1541             res = (path1->attached.fp_interface -
1542                    path2->attached.fp_interface);
1543             break;
1544         case FIB_PATH_TYPE_RECURSIVE:
1545             res = ip46_address_cmp(&path1->recursive.fp_nh,
1546                                    &path2->recursive.fp_nh);
1547  
1548             if (0 == res)
1549             {
1550                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1551             }
1552             break;
1553         case FIB_PATH_TYPE_BIER_FMASK:
1554             res = ip46_address_cmp(&path1->bier_fmask.fp_nh,
1555                                    &path2->bier_fmask.fp_nh);
1556  
1557             if (0 == res)
1558             {
1559                 res = (path1->bier_fmask.fp_bier_fib -
1560                        path2->bier_fmask.fp_bier_fib);
1561             }
1562             break;
1563         case FIB_PATH_TYPE_BIER_IMP:
1564             res = (path1->bier_imp.fp_bier_imp -
1565                    path2->bier_imp.fp_bier_imp);
1566             break;
1567         case FIB_PATH_TYPE_BIER_TABLE:
1568             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1569                                     &path2->bier_table.fp_bier_tbl);
1570             break;
1571         case FIB_PATH_TYPE_DEAG:
1572             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1573             if (0 == res)
1574             {
1575                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1576             }
1577             break;
1578         case FIB_PATH_TYPE_INTF_RX:
1579             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1580             break;
1581         case FIB_PATH_TYPE_UDP_ENCAP:
1582             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1583             break;
1584         case FIB_PATH_TYPE_SPECIAL:
1585         case FIB_PATH_TYPE_RECEIVE:
1586         case FIB_PATH_TYPE_EXCLUSIVE:
1587             res = 0;
1588             break;
1589         }
1590     }
1591     return (res);
1592 }
1593
1594 /*
1595  * fib_path_cmp_for_sort
1596  *
1597  * Compare two paths for equivalence. Used during path sorting.
1598  * As usual 0 means equal.
1599  */
1600 int
1601 fib_path_cmp_for_sort (void * v1,
1602                        void * v2)
1603 {
1604     fib_node_index_t *pi1 = v1, *pi2 = v2;
1605     fib_path_t *path1, *path2;
1606
1607     path1 = fib_path_get(*pi1);
1608     path2 = fib_path_get(*pi2);
1609
1610     /*
1611      * when sorting paths we want the highest preference paths
1612      * first, so that the choices set built is in prefernce order
1613      */
1614     if (path1->fp_preference != path2->fp_preference)
1615     {
1616         return (path1->fp_preference - path2->fp_preference);
1617     }
1618
1619     return (fib_path_cmp_i(path1, path2));
1620 }
1621
1622 /*
1623  * fib_path_cmp
1624  *
1625  * Compare two paths for equivalence.
1626  */
1627 int
1628 fib_path_cmp (fib_node_index_t pi1,
1629               fib_node_index_t pi2)
1630 {
1631     fib_path_t *path1, *path2;
1632
1633     path1 = fib_path_get(pi1);
1634     path2 = fib_path_get(pi2);
1635
1636     return (fib_path_cmp_i(path1, path2));
1637 }
1638
1639 int
1640 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1641                            const fib_route_path_t *rpath)
1642 {
1643     fib_path_t *path;
1644     int res;
1645
1646     path = fib_path_get(path_index);
1647
1648     res = 1;
1649
1650     if (path->fp_weight != rpath->frp_weight)
1651     {
1652         res = (path->fp_weight - rpath->frp_weight);
1653     }
1654     else
1655     {
1656         /*
1657          * both paths are of the same type.
1658          * consider each type and its attributes in turn.
1659          */
1660         switch (path->fp_type)
1661         {
1662         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1663             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1664                                    &rpath->frp_addr);
1665             if (0 == res)
1666             {
1667                 res = (path->attached_next_hop.fp_interface -
1668                        rpath->frp_sw_if_index);
1669             }
1670             break;
1671         case FIB_PATH_TYPE_ATTACHED:
1672             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1673             break;
1674         case FIB_PATH_TYPE_RECURSIVE:
1675             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1676             {
1677                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1678
1679                 if (res == 0)
1680                 {
1681                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1682                 }
1683             }
1684             else
1685             {
1686                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1687                                        &rpath->frp_addr);
1688             }
1689
1690             if (0 == res)
1691             {
1692                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1693             }
1694             break;
1695         case FIB_PATH_TYPE_BIER_FMASK:
1696             res = ip46_address_cmp(&path->bier_fmask.fp_nh,
1697                                    &rpath->frp_addr);
1698
1699             if (0 == res)
1700             {
1701                 res = (path->bier_fmask.fp_bier_fib - rpath->frp_bier_fib_index);
1702             }
1703             break;
1704         case FIB_PATH_TYPE_BIER_IMP:
1705             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1706             break;
1707         case FIB_PATH_TYPE_BIER_TABLE:
1708             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1709                                     &rpath->frp_bier_tbl);
1710             break;
1711         case FIB_PATH_TYPE_INTF_RX:
1712             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1713             break;
1714         case FIB_PATH_TYPE_UDP_ENCAP:
1715             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1716             break;
1717         case FIB_PATH_TYPE_DEAG:
1718             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1719             if (0 == res)
1720             {
1721                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1722             }
1723             break;
1724         case FIB_PATH_TYPE_SPECIAL:
1725         case FIB_PATH_TYPE_RECEIVE:
1726         case FIB_PATH_TYPE_EXCLUSIVE:
1727             res = 0;
1728             break;
1729         }
1730     }
1731     return (res);
1732 }
1733
1734 /*
1735  * fib_path_recursive_loop_detect
1736  *
1737  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1738  * walk is initiated when an entry is linking to a new path list or from an old.
1739  * The entry vector passed contains all the FIB entrys that are children of this
1740  * path (it is all the entries encountered on the walk so far). If this vector
1741  * contains the entry this path resolve via, then a loop is about to form.
1742  * The loop must be allowed to form, since we need the dependencies in place
1743  * so that we can track when the loop breaks.
1744  * However, we MUST not produce a loop in the forwarding graph (else packets
1745  * would loop around the switch path until the loop breaks), so we mark recursive
1746  * paths as looped so that they do not contribute forwarding information.
1747  * By marking the path as looped, an etry such as;
1748  *    X/Y
1749  *     via a.a.a.a (looped)
1750  *     via b.b.b.b (not looped)
1751  * can still forward using the info provided by b.b.b.b only
1752  */
1753 int
1754 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1755                                 fib_node_index_t **entry_indicies)
1756 {
1757     fib_path_t *path;
1758
1759     path = fib_path_get(path_index);
1760
1761     /*
1762      * the forced drop path is never looped, cos it is never resolved.
1763      */
1764     if (fib_path_is_permanent_drop(path))
1765     {
1766         return (0);
1767     }
1768
1769     switch (path->fp_type)
1770     {
1771     case FIB_PATH_TYPE_RECURSIVE:
1772     {
1773         fib_node_index_t *entry_index, *entries;
1774         int looped = 0;
1775         entries = *entry_indicies;
1776
1777         vec_foreach(entry_index, entries) {
1778             if (*entry_index == path->fp_via_fib)
1779             {
1780                 /*
1781                  * the entry that is about to link to this path-list (or
1782                  * one of this path-list's children) is the same entry that
1783                  * this recursive path resolves through. this is a cycle.
1784                  * abort the walk.
1785                  */
1786                 looped = 1;
1787                 break;
1788             }
1789         }
1790
1791         if (looped)
1792         {
1793             FIB_PATH_DBG(path, "recursive loop formed");
1794             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1795
1796             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1797         }
1798         else
1799         {
1800             /*
1801              * no loop here yet. keep forward walking the graph.
1802              */     
1803             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1804             {
1805                 FIB_PATH_DBG(path, "recursive loop formed");
1806                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1807             }
1808             else
1809             {
1810                 FIB_PATH_DBG(path, "recursive loop cleared");
1811                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1812             }
1813         }
1814         break;
1815     }
1816     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1817     case FIB_PATH_TYPE_ATTACHED:
1818     case FIB_PATH_TYPE_SPECIAL:
1819     case FIB_PATH_TYPE_DEAG:
1820     case FIB_PATH_TYPE_RECEIVE:
1821     case FIB_PATH_TYPE_INTF_RX:
1822     case FIB_PATH_TYPE_UDP_ENCAP:
1823     case FIB_PATH_TYPE_EXCLUSIVE:
1824     case FIB_PATH_TYPE_BIER_FMASK:
1825     case FIB_PATH_TYPE_BIER_TABLE:
1826     case FIB_PATH_TYPE_BIER_IMP:
1827         /*
1828          * these path types cannot be part of a loop, since they are the leaves
1829          * of the graph.
1830          */
1831         break;
1832     }
1833
1834     return (fib_path_is_looped(path_index));
1835 }
1836
1837 int
1838 fib_path_resolve (fib_node_index_t path_index)
1839 {
1840     fib_path_t *path;
1841
1842     path = fib_path_get(path_index);
1843
1844     /*
1845      * hope for the best.
1846      */
1847     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1848
1849     /*
1850      * the forced drop path resolves via the drop adj
1851      */
1852     if (fib_path_is_permanent_drop(path))
1853     {
1854         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1855         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1856         return (fib_path_is_resolved(path_index));
1857     }
1858
1859     switch (path->fp_type)
1860     {
1861     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1862         fib_path_attached_next_hop_set(path);
1863         break;
1864     case FIB_PATH_TYPE_ATTACHED:
1865         if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
1866         {
1867             l2_bridge_dpo_add_or_lock(path->attached.fp_interface,
1868                                       &path->fp_dpo);
1869         }
1870         else
1871         {
1872             /*
1873              * path->attached.fp_interface
1874              */
1875             if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1876                                                path->attached.fp_interface))
1877             {
1878                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1879             }
1880             dpo_set(&path->fp_dpo,
1881                     DPO_ADJACENCY,
1882                     path->fp_nh_proto,
1883                     fib_path_attached_get_adj(path,
1884                                               dpo_proto_to_link(path->fp_nh_proto)));
1885
1886             /*
1887              * become a child of the adjacency so we receive updates
1888              * when the interface state changes
1889              */
1890             path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1891                                              FIB_NODE_TYPE_PATH,
1892                                              fib_path_get_index(path));
1893         }
1894         break;
1895     case FIB_PATH_TYPE_RECURSIVE:
1896     {
1897         /*
1898          * Create a RR source entry in the table for the address
1899          * that this path recurses through.
1900          * This resolve action is recursive, hence we may create
1901          * more paths in the process. more creates mean maybe realloc
1902          * of this path.
1903          */
1904         fib_node_index_t fei;
1905         fib_prefix_t pfx;
1906
1907         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1908
1909         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1910         {
1911             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1912                                        path->recursive.fp_nh.fp_eos,
1913                                        &pfx);
1914         }
1915         else
1916         {
1917             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1918         }
1919
1920         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1921                                           &pfx,
1922                                           FIB_SOURCE_RR,
1923                                           FIB_ENTRY_FLAG_NONE);
1924
1925         path = fib_path_get(path_index);
1926         path->fp_via_fib = fei;
1927
1928         /*
1929          * become a dependent child of the entry so the path is 
1930          * informed when the forwarding for the entry changes.
1931          */
1932         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1933                                                FIB_NODE_TYPE_PATH,
1934                                                fib_path_get_index(path));
1935
1936         /*
1937          * create and configure the IP DPO
1938          */
1939         fib_path_recursive_adj_update(
1940             path,
1941             fib_path_to_chain_type(path),
1942             &path->fp_dpo);
1943
1944         break;
1945     }
1946     case FIB_PATH_TYPE_BIER_FMASK:
1947     {
1948         /*
1949          * Find the BIER f-mask to link to
1950          */
1951         bier_fmask_id_t fmid = {
1952             .bfmi_nh = path->bier_fmask.fp_nh,
1953             .bfmi_hdr_type = BIER_HDR_O_MPLS,
1954         };
1955
1956         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_fmask);
1957
1958         path->fp_via_bier_fmask = bier_fmask_db_find(path->bier_fmask.fp_bier_fib,
1959                                                 &fmid);
1960
1961         /*
1962          * become a dependent child of the entry so the path is
1963          * informed when the forwarding for the entry changes.
1964          */
1965         path->fp_sibling = bier_fmask_child_add(path->fp_via_bier_fmask,
1966                                                 FIB_NODE_TYPE_PATH,
1967                                                 fib_path_get_index(path));
1968
1969         fib_path_bier_fmask_update(path, &path->fp_dpo);
1970
1971         break;
1972     }
1973     case FIB_PATH_TYPE_BIER_IMP:
1974         bier_imp_lock(path->bier_imp.fp_bier_imp);
1975         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
1976                                        DPO_PROTO_IP4,
1977                                        &path->fp_dpo);
1978         break;
1979     case FIB_PATH_TYPE_BIER_TABLE:
1980     {
1981         /*
1982          * Find/create the BIER table to link to
1983          */
1984         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
1985
1986         path->fp_via_bier_tbl =
1987             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
1988
1989         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
1990                                          &path->fp_dpo);
1991         break;
1992     }
1993     case FIB_PATH_TYPE_SPECIAL:
1994         /*
1995          * Resolve via the drop
1996          */
1997         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1998         break;
1999     case FIB_PATH_TYPE_DEAG:
2000     {
2001         /*
2002          * Resolve via a lookup DPO.
2003          * FIXME. control plane should add routes with a table ID
2004          */
2005         lookup_input_t input;
2006         lookup_cast_t cast;
2007
2008         cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2009                 LOOKUP_MULTICAST :
2010                 LOOKUP_UNICAST);
2011         input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2012                 LOOKUP_INPUT_SRC_ADDR :
2013                 LOOKUP_INPUT_DST_ADDR);
2014
2015         lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2016                                            path->fp_nh_proto,
2017                                            cast,
2018                                            input,
2019                                            LOOKUP_TABLE_FROM_CONFIG,
2020                                            &path->fp_dpo);
2021         break;
2022     }
2023     case FIB_PATH_TYPE_RECEIVE:
2024         /*
2025          * Resolve via a receive DPO.
2026          */
2027         receive_dpo_add_or_lock(path->fp_nh_proto,
2028                                 path->receive.fp_interface,
2029                                 &path->receive.fp_addr,
2030                                 &path->fp_dpo);
2031         break;
2032     case FIB_PATH_TYPE_UDP_ENCAP:
2033         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2034         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2035                                         path->fp_nh_proto,
2036                                         &path->fp_dpo);
2037         break;
2038     case FIB_PATH_TYPE_INTF_RX: {
2039         /*
2040          * Resolve via a receive DPO.
2041          */
2042         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2043                                      path->intf_rx.fp_interface,
2044                                      &path->fp_dpo);
2045         break;
2046     }
2047     case FIB_PATH_TYPE_EXCLUSIVE:
2048         /*
2049          * Resolve via the user provided DPO
2050          */
2051         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2052         break;
2053     }
2054
2055     return (fib_path_is_resolved(path_index));
2056 }
2057
2058 u32
2059 fib_path_get_resolving_interface (fib_node_index_t path_index)
2060 {
2061     fib_path_t *path;
2062
2063     path = fib_path_get(path_index);
2064
2065     switch (path->fp_type)
2066     {
2067     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2068         return (path->attached_next_hop.fp_interface);
2069     case FIB_PATH_TYPE_ATTACHED:
2070         return (path->attached.fp_interface);
2071     case FIB_PATH_TYPE_RECEIVE:
2072         return (path->receive.fp_interface);
2073     case FIB_PATH_TYPE_RECURSIVE:
2074         if (fib_path_is_resolved(path_index))
2075         {
2076             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2077         }
2078         break;
2079     case FIB_PATH_TYPE_INTF_RX:
2080     case FIB_PATH_TYPE_UDP_ENCAP:
2081     case FIB_PATH_TYPE_SPECIAL:
2082     case FIB_PATH_TYPE_DEAG:
2083     case FIB_PATH_TYPE_EXCLUSIVE:
2084     case FIB_PATH_TYPE_BIER_FMASK:
2085     case FIB_PATH_TYPE_BIER_TABLE:
2086     case FIB_PATH_TYPE_BIER_IMP:
2087         break;
2088     }
2089     return (~0);
2090 }
2091
2092 index_t
2093 fib_path_get_resolving_index (fib_node_index_t path_index)
2094 {
2095     fib_path_t *path;
2096
2097     path = fib_path_get(path_index);
2098
2099     switch (path->fp_type)
2100     {
2101     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2102     case FIB_PATH_TYPE_ATTACHED:
2103     case FIB_PATH_TYPE_RECEIVE:
2104     case FIB_PATH_TYPE_INTF_RX:
2105     case FIB_PATH_TYPE_SPECIAL:
2106     case FIB_PATH_TYPE_DEAG:
2107     case FIB_PATH_TYPE_EXCLUSIVE:
2108         break;
2109     case FIB_PATH_TYPE_UDP_ENCAP:
2110         return (path->udp_encap.fp_udp_encap_id);
2111     case FIB_PATH_TYPE_RECURSIVE:
2112         return (path->fp_via_fib);
2113     case FIB_PATH_TYPE_BIER_FMASK:
2114         return (path->fp_via_bier_fmask);
2115    case FIB_PATH_TYPE_BIER_TABLE:
2116        return (path->fp_via_bier_tbl);
2117    case FIB_PATH_TYPE_BIER_IMP:
2118        return (path->bier_imp.fp_bier_imp);
2119     }
2120     return (~0);
2121 }
2122
2123 adj_index_t
2124 fib_path_get_adj (fib_node_index_t path_index)
2125 {
2126     fib_path_t *path;
2127
2128     path = fib_path_get(path_index);
2129
2130     ASSERT(dpo_is_adj(&path->fp_dpo));
2131     if (dpo_is_adj(&path->fp_dpo))
2132     {
2133         return (path->fp_dpo.dpoi_index);
2134     }
2135     return (ADJ_INDEX_INVALID);
2136 }
2137
2138 u16
2139 fib_path_get_weight (fib_node_index_t path_index)
2140 {
2141     fib_path_t *path;
2142
2143     path = fib_path_get(path_index);
2144
2145     ASSERT(path);
2146
2147     return (path->fp_weight);
2148 }
2149
2150 u16
2151 fib_path_get_preference (fib_node_index_t path_index)
2152 {
2153     fib_path_t *path;
2154
2155     path = fib_path_get(path_index);
2156
2157     ASSERT(path);
2158
2159     return (path->fp_preference);
2160 }
2161
2162 u32
2163 fib_path_get_rpf_id (fib_node_index_t path_index)
2164 {
2165     fib_path_t *path;
2166
2167     path = fib_path_get(path_index);
2168
2169     ASSERT(path);
2170
2171     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2172     {
2173         return (path->deag.fp_rpf_id);
2174     }
2175
2176     return (~0);
2177 }
2178
2179 /**
2180  * @brief Contribute the path's adjacency to the list passed.
2181  * By calling this function over all paths, recursively, a child
2182  * can construct its full set of forwarding adjacencies, and hence its
2183  * uRPF list.
2184  */
2185 void
2186 fib_path_contribute_urpf (fib_node_index_t path_index,
2187                           index_t urpf)
2188 {
2189     fib_path_t *path;
2190
2191     path = fib_path_get(path_index);
2192
2193     /*
2194      * resolved and unresolved paths contribute to the RPF list.
2195      */
2196     switch (path->fp_type)
2197     {
2198     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2199         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2200         break;
2201
2202     case FIB_PATH_TYPE_ATTACHED:
2203         fib_urpf_list_append(urpf, path->attached.fp_interface);
2204         break;
2205
2206     case FIB_PATH_TYPE_RECURSIVE:
2207         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2208             !fib_path_is_looped(path_index))
2209         {
2210             /*
2211              * there's unresolved due to constraints, and there's unresolved
2212              * due to ain't got no via. can't do nowt w'out via.
2213              */
2214             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2215         }
2216         break;
2217
2218     case FIB_PATH_TYPE_EXCLUSIVE:
2219     case FIB_PATH_TYPE_SPECIAL:
2220     {
2221         /*
2222          * these path types may link to an adj, if that's what
2223          * the clinet gave
2224          */
2225         u32 rpf_sw_if_index;
2226
2227         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2228
2229         if (~0 != rpf_sw_if_index)
2230         {
2231             fib_urpf_list_append(urpf, rpf_sw_if_index);
2232         }
2233         break;
2234     }
2235     case FIB_PATH_TYPE_DEAG:
2236     case FIB_PATH_TYPE_RECEIVE:
2237     case FIB_PATH_TYPE_INTF_RX:
2238     case FIB_PATH_TYPE_UDP_ENCAP:
2239     case FIB_PATH_TYPE_BIER_FMASK:
2240     case FIB_PATH_TYPE_BIER_TABLE:
2241     case FIB_PATH_TYPE_BIER_IMP:
2242         /*
2243          * these path types don't link to an adj
2244          */
2245         break;
2246     }
2247 }
2248
2249 void
2250 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2251                           dpo_proto_t payload_proto,
2252                           dpo_id_t *dpo)
2253 {
2254     fib_path_t *path;
2255
2256     path = fib_path_get(path_index);
2257
2258     ASSERT(path);
2259
2260     switch (path->fp_type)
2261     {
2262     case FIB_PATH_TYPE_DEAG:
2263     {
2264         dpo_id_t tmp = DPO_INVALID;
2265
2266         dpo_copy(&tmp, dpo);
2267         dpo_set(dpo,
2268                 DPO_MPLS_DISPOSITION,
2269                 payload_proto,
2270                 mpls_disp_dpo_create(payload_proto,
2271                                      path->deag.fp_rpf_id,
2272                                      &tmp));
2273         dpo_reset(&tmp);
2274         break;
2275     }
2276     case FIB_PATH_TYPE_RECEIVE:
2277     case FIB_PATH_TYPE_ATTACHED:
2278     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2279     case FIB_PATH_TYPE_RECURSIVE:
2280     case FIB_PATH_TYPE_INTF_RX:
2281     case FIB_PATH_TYPE_UDP_ENCAP:
2282     case FIB_PATH_TYPE_EXCLUSIVE:
2283     case FIB_PATH_TYPE_SPECIAL:
2284     case FIB_PATH_TYPE_BIER_FMASK:
2285     case FIB_PATH_TYPE_BIER_TABLE:
2286     case FIB_PATH_TYPE_BIER_IMP:
2287         break;
2288     }
2289 }
2290
2291 void
2292 fib_path_contribute_forwarding (fib_node_index_t path_index,
2293                                 fib_forward_chain_type_t fct,
2294                                 dpo_id_t *dpo)
2295 {
2296     fib_path_t *path;
2297
2298     path = fib_path_get(path_index);
2299
2300     ASSERT(path);
2301     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2302
2303     FIB_PATH_DBG(path, "contribute");
2304
2305     /*
2306      * The DPO stored in the path was created when the path was resolved.
2307      * This then represents the path's 'native' protocol; IP.
2308      * For all others will need to go find something else.
2309      */
2310     if (fib_path_to_chain_type(path) == fct)
2311     {
2312         dpo_copy(dpo, &path->fp_dpo);
2313     }
2314     else
2315     {
2316         switch (path->fp_type)
2317         {
2318         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2319             switch (fct)
2320             {
2321             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2322             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2323             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2324             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2325             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2326             case FIB_FORW_CHAIN_TYPE_NSH:
2327             {
2328                 adj_index_t ai;
2329
2330                 /*
2331                  * get a appropriate link type adj.
2332                  */
2333                 ai = fib_path_attached_next_hop_get_adj(
2334                          path,
2335                          fib_forw_chain_type_to_link_type(fct));
2336                 dpo_set(dpo, DPO_ADJACENCY,
2337                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2338                 adj_unlock(ai);
2339
2340                 break;
2341             }
2342             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2343             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2344             case FIB_FORW_CHAIN_TYPE_BIER:
2345                 break;
2346             }
2347             break;
2348         case FIB_PATH_TYPE_RECURSIVE:
2349             switch (fct)
2350             {
2351             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2352             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2353             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2354             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2355             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2356             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2357             case FIB_FORW_CHAIN_TYPE_BIER:
2358                 fib_path_recursive_adj_update(path, fct, dpo);
2359                 break;
2360             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2361             case FIB_FORW_CHAIN_TYPE_NSH:
2362                 ASSERT(0);
2363                 break;
2364             }
2365             break;
2366         case FIB_PATH_TYPE_BIER_TABLE:
2367             switch (fct)
2368             {
2369             case FIB_FORW_CHAIN_TYPE_BIER:
2370                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2371                 break;
2372             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2373             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2374             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2375             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2376             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2377             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2378             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2379             case FIB_FORW_CHAIN_TYPE_NSH:
2380                 ASSERT(0);
2381                 break;
2382             }
2383             break;
2384         case FIB_PATH_TYPE_BIER_FMASK:
2385             switch (fct)
2386             {
2387             case FIB_FORW_CHAIN_TYPE_BIER:
2388                 fib_path_bier_fmask_update(path, dpo);
2389                 break;
2390             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2391             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2392             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2393             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2394             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2395             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2396             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2397             case FIB_FORW_CHAIN_TYPE_NSH:
2398                 ASSERT(0);
2399                 break;
2400             }
2401             break;
2402         case FIB_PATH_TYPE_BIER_IMP:
2403             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2404                                            fib_forw_chain_type_to_dpo_proto(fct),
2405                                            dpo);
2406             break;
2407         case FIB_PATH_TYPE_DEAG:
2408             switch (fct)
2409             {
2410             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2411                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2412                                                   DPO_PROTO_MPLS,
2413                                                   LOOKUP_UNICAST,
2414                                                   LOOKUP_INPUT_DST_ADDR,
2415                                                   LOOKUP_TABLE_FROM_CONFIG,
2416                                                   dpo);
2417                 break;
2418             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2419             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2420             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2421                 dpo_copy(dpo, &path->fp_dpo);
2422                 break;
2423             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2424             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2425             case FIB_FORW_CHAIN_TYPE_BIER:
2426                 break;
2427             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2428             case FIB_FORW_CHAIN_TYPE_NSH:
2429                 ASSERT(0);
2430                 break;
2431             }
2432             break;
2433         case FIB_PATH_TYPE_EXCLUSIVE:
2434             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2435             break;
2436         case FIB_PATH_TYPE_ATTACHED:
2437             if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
2438             {
2439                 dpo_copy(dpo, &path->fp_dpo);
2440                 break;
2441             }
2442             switch (fct)
2443             {
2444             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2445             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2446             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2447             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2448             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2449             case FIB_FORW_CHAIN_TYPE_NSH:
2450             case FIB_FORW_CHAIN_TYPE_BIER:
2451                 {
2452                     adj_index_t ai;
2453
2454                     /*
2455                      * get a appropriate link type adj.
2456                      */
2457                     ai = fib_path_attached_get_adj(
2458                             path,
2459                             fib_forw_chain_type_to_link_type(fct));
2460                     dpo_set(dpo, DPO_ADJACENCY,
2461                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2462                     adj_unlock(ai);
2463                     break;
2464                 }
2465             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2466             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2467                 {
2468                     adj_index_t ai;
2469
2470                     /*
2471                      * Create the adj needed for sending IP multicast traffic
2472                      */
2473                     ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2474                                                fib_forw_chain_type_to_link_type(fct),
2475                                                path->attached.fp_interface);
2476                     dpo_set(dpo, DPO_ADJACENCY,
2477                             fib_forw_chain_type_to_dpo_proto(fct),
2478                             ai);
2479                     adj_unlock(ai);
2480                 }
2481                 break;
2482             }
2483             break;
2484         case FIB_PATH_TYPE_INTF_RX:
2485             /*
2486              * Create the adj needed for sending IP multicast traffic
2487              */
2488             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2489                                          path->attached.fp_interface,
2490                                          dpo);
2491             break;
2492         case FIB_PATH_TYPE_UDP_ENCAP:
2493             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2494                                             path->fp_nh_proto,
2495                                             dpo);
2496             break;
2497         case FIB_PATH_TYPE_RECEIVE:
2498         case FIB_PATH_TYPE_SPECIAL:
2499             dpo_copy(dpo, &path->fp_dpo);
2500             break;
2501         }
2502     }
2503 }
2504
2505 load_balance_path_t *
2506 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2507                                        fib_forward_chain_type_t fct,
2508                                        load_balance_path_t *hash_key)
2509 {
2510     load_balance_path_t *mnh;
2511     fib_path_t *path;
2512
2513     path = fib_path_get(path_index);
2514
2515     ASSERT(path);
2516
2517     if (fib_path_is_resolved(path_index))
2518     {
2519         vec_add2(hash_key, mnh, 1);
2520
2521         mnh->path_weight = path->fp_weight;
2522         mnh->path_index = path_index;
2523         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2524     }
2525
2526     return (hash_key);
2527 }
2528
2529 int
2530 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2531 {
2532     fib_path_t *path;
2533
2534     path = fib_path_get(path_index);
2535
2536     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2537             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2538              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2539 }
2540
2541 int
2542 fib_path_is_exclusive (fib_node_index_t path_index)
2543 {
2544     fib_path_t *path;
2545
2546     path = fib_path_get(path_index);
2547
2548     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2549 }
2550
2551 int
2552 fib_path_is_deag (fib_node_index_t path_index)
2553 {
2554     fib_path_t *path;
2555
2556     path = fib_path_get(path_index);
2557
2558     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2559 }
2560
2561 int
2562 fib_path_is_resolved (fib_node_index_t path_index)
2563 {
2564     fib_path_t *path;
2565
2566     path = fib_path_get(path_index);
2567
2568     return (dpo_id_is_valid(&path->fp_dpo) &&
2569             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2570             !fib_path_is_looped(path_index) &&
2571             !fib_path_is_permanent_drop(path));
2572 }
2573
2574 int
2575 fib_path_is_looped (fib_node_index_t path_index)
2576 {
2577     fib_path_t *path;
2578
2579     path = fib_path_get(path_index);
2580
2581     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2582 }
2583
2584 fib_path_list_walk_rc_t
2585 fib_path_encode (fib_node_index_t path_list_index,
2586                  fib_node_index_t path_index,
2587                  void *ctx)
2588 {
2589     fib_route_path_encode_t **api_rpaths = ctx;
2590     fib_route_path_encode_t *api_rpath;
2591     fib_path_t *path;
2592
2593     path = fib_path_get(path_index);
2594     if (!path)
2595       return (FIB_PATH_LIST_WALK_CONTINUE);
2596     vec_add2(*api_rpaths, api_rpath, 1);
2597     api_rpath->rpath.frp_weight = path->fp_weight;
2598     api_rpath->rpath.frp_preference = path->fp_preference;
2599     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2600     api_rpath->rpath.frp_sw_if_index = ~0;
2601     api_rpath->dpo = path->exclusive.fp_ex_dpo;
2602     switch (path->fp_type)
2603       {
2604       case FIB_PATH_TYPE_RECEIVE:
2605         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2606         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2607         api_rpath->dpo = path->fp_dpo;
2608         break;
2609       case FIB_PATH_TYPE_ATTACHED:
2610         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2611         api_rpath->dpo = path->fp_dpo;
2612         break;
2613       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2614         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2615         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2616         break;
2617       case FIB_PATH_TYPE_BIER_FMASK:
2618         api_rpath->rpath.frp_fib_index = path->bier_fmask.fp_bier_fib;
2619         api_rpath->rpath.frp_addr = path->bier_fmask.fp_nh;
2620         break;
2621       case FIB_PATH_TYPE_SPECIAL:
2622         break;
2623       case FIB_PATH_TYPE_DEAG:
2624         api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2625         api_rpath->dpo = path->fp_dpo;
2626         break;
2627       case FIB_PATH_TYPE_RECURSIVE:
2628         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2629         break;
2630       default:
2631         break;
2632       }
2633     return (FIB_PATH_LIST_WALK_CONTINUE);
2634 }
2635
2636 dpo_proto_t
2637 fib_path_get_proto (fib_node_index_t path_index)
2638 {
2639     fib_path_t *path;
2640
2641     path = fib_path_get(path_index);
2642
2643     return (path->fp_nh_proto);
2644 }
2645
2646 void
2647 fib_path_module_init (void)
2648 {
2649     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2650 }
2651
2652 static clib_error_t *
2653 show_fib_path_command (vlib_main_t * vm,
2654                         unformat_input_t * input,
2655                         vlib_cli_command_t * cmd)
2656 {
2657     fib_node_index_t pi;
2658     fib_path_t *path;
2659
2660     if (unformat (input, "%d", &pi))
2661     {
2662         /*
2663          * show one in detail
2664          */
2665         if (!pool_is_free_index(fib_path_pool, pi))
2666         {
2667             path = fib_path_get(pi);
2668             u8 *s = fib_path_format(pi, NULL);
2669             s = format(s, "children:");
2670             s = fib_node_children_format(path->fp_node.fn_children, s);
2671             vlib_cli_output (vm, "%s", s);
2672             vec_free(s);
2673         }
2674         else
2675         {
2676             vlib_cli_output (vm, "path %d invalid", pi);
2677         }
2678     }
2679     else
2680     {
2681         vlib_cli_output (vm, "FIB Paths");
2682         pool_foreach(path, fib_path_pool,
2683         ({
2684             vlib_cli_output (vm, "%U", format_fib_path, path);
2685         }));
2686     }
2687
2688     return (NULL);
2689 }
2690
2691 VLIB_CLI_COMMAND (show_fib_path, static) = {
2692   .path = "show fib paths",
2693   .function = show_fib_path_command,
2694   .short_help = "show fib paths",
2695 };