FIB: store the node type not the function pointer.
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/l2_bridge_dpo.h>
27 #include <vnet/dpo/drop_dpo.h>
28
29 #include <vnet/adj/adj.h>
30 #include <vnet/adj/adj_mcast.h>
31
32 #include <vnet/fib/fib_path.h>
33 #include <vnet/fib/fib_node.h>
34 #include <vnet/fib/fib_table.h>
35 #include <vnet/fib/fib_entry.h>
36 #include <vnet/fib/fib_path_list.h>
37 #include <vnet/fib/fib_internal.h>
38 #include <vnet/fib/fib_urpf_list.h>
39 #include <vnet/fib/mpls_fib.h>
40 #include <vnet/udp/udp_encap.h>
41 #include <vnet/bier/bier_fmask.h>
42 #include <vnet/bier/bier_table.h>
43 #include <vnet/bier/bier_imp.h>
44
45 /**
46  * Enurmeration of path types
47  */
48 typedef enum fib_path_type_t_ {
49     /**
50      * Marker. Add new types after this one.
51      */
52     FIB_PATH_TYPE_FIRST = 0,
53     /**
54      * Attached-nexthop. An interface and a nexthop are known.
55      */
56     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
57     /**
58      * attached. Only the interface is known.
59      */
60     FIB_PATH_TYPE_ATTACHED,
61     /**
62      * recursive. Only the next-hop is known.
63      */
64     FIB_PATH_TYPE_RECURSIVE,
65     /**
66      * special. nothing is known. so we drop.
67      */
68     FIB_PATH_TYPE_SPECIAL,
69     /**
70      * exclusive. user provided adj.
71      */
72     FIB_PATH_TYPE_EXCLUSIVE,
73     /**
74      * deag. Link to a lookup adj in the next table
75      */
76     FIB_PATH_TYPE_DEAG,
77     /**
78      * interface receive.
79      */
80     FIB_PATH_TYPE_INTF_RX,
81     /**
82      * interface receive.
83      */
84     FIB_PATH_TYPE_UDP_ENCAP,
85     /**
86      * receive. it's for-us.
87      */
88     FIB_PATH_TYPE_RECEIVE,
89     /**
90      * bier-imp. it's via a BIER imposition.
91      */
92     FIB_PATH_TYPE_BIER_IMP,
93     /**
94      * bier-fmask. it's via a BIER ECMP-table.
95      */
96     FIB_PATH_TYPE_BIER_TABLE,
97     /**
98      * bier-fmask. it's via a BIER f-mask.
99      */
100     FIB_PATH_TYPE_BIER_FMASK,
101     /**
102      * Marker. Add new types before this one, then update it.
103      */
104     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
105 } __attribute__ ((packed)) fib_path_type_t;
106
107 /**
108  * The maximum number of path_types
109  */
110 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
111
112 #define FIB_PATH_TYPES {                                        \
113     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
114     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
115     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
116     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
117     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
118     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
119     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
120     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
121     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
122     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
123     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
124     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
125 }
126
127 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
128     for (_item = FIB_PATH_TYPE_FIRST;           \
129          _item <= FIB_PATH_TYPE_LAST;           \
130          _item++)
131
132 /**
133  * Enurmeration of path operational (i.e. derived) attributes
134  */
135 typedef enum fib_path_oper_attribute_t_ {
136     /**
137      * Marker. Add new types after this one.
138      */
139     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
140     /**
141      * The path forms part of a recursive loop.
142      */
143     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
144     /**
145      * The path is resolved
146      */
147     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
148     /**
149      * The path is attached, despite what the next-hop may say.
150      */
151     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
152     /**
153      * The path has become a permanent drop.
154      */
155     FIB_PATH_OPER_ATTRIBUTE_DROP,
156     /**
157      * Marker. Add new types before this one, then update it.
158      */
159     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
160 } __attribute__ ((packed)) fib_path_oper_attribute_t;
161
162 /**
163  * The maximum number of path operational attributes
164  */
165 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
166
167 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
168     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
169     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
170     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
171 }
172
173 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
174     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
175          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
176          _item++)
177
178 /**
179  * Path flags from the attributes
180  */
181 typedef enum fib_path_oper_flags_t_ {
182     FIB_PATH_OPER_FLAG_NONE = 0,
183     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
184     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
185     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
186     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
187 } __attribute__ ((packed)) fib_path_oper_flags_t;
188
189 /**
190  * A FIB path
191  */
192 typedef struct fib_path_t_ {
193     /**
194      * A path is a node in the FIB graph.
195      */
196     fib_node_t fp_node;
197
198     /**
199      * The index of the path-list to which this path belongs
200      */
201     u32 fp_pl_index;
202
203     /**
204      * This marks the start of the memory area used to hash
205      * the path
206      */
207     STRUCT_MARK(path_hash_start);
208
209     /**
210      * Configuration Flags
211      */
212     fib_path_cfg_flags_t fp_cfg_flags;
213
214     /**
215      * The type of the path. This is the selector for the union
216      */
217     fib_path_type_t fp_type;
218
219     /**
220      * The protocol of the next-hop, i.e. the address family of the
221      * next-hop's address. We can't derive this from the address itself
222      * since the address can be all zeros
223      */
224     dpo_proto_t fp_nh_proto;
225
226     /**
227      * UCMP [unnormalised] weigth
228      */
229     u8 fp_weight;
230
231     /**
232      * A path preference. 0 is the best.
233      * Only paths of the best preference, that are 'up', are considered
234      * for forwarding.
235      */
236     u8 fp_preference;
237
238     /**
239      * per-type union of the data required to resolve the path
240      */
241     union {
242         struct {
243             /**
244              * The next-hop
245              */
246             ip46_address_t fp_nh;
247             /**
248              * The interface
249              */
250             u32 fp_interface;
251         } attached_next_hop;
252         struct {
253             /**
254              * The interface
255              */
256             u32 fp_interface;
257         } attached;
258         struct {
259             union
260             {
261                 /**
262                  * The next-hop
263                  */
264                 ip46_address_t fp_ip;
265                 struct {
266                     /**
267                      * The local label to resolve through.
268                      */
269                     mpls_label_t fp_local_label;
270                     /**
271                      * The EOS bit of the resolving label
272                      */
273                     mpls_eos_bit_t fp_eos;
274                 };
275             } fp_nh;
276             union {
277                 /**
278                  * The FIB table index in which to find the next-hop.
279                  */
280                 fib_node_index_t fp_tbl_id;
281                 /**
282                  * The BIER FIB the fmask is in
283                  */
284                 index_t fp_bier_fib;
285             };
286         } recursive;
287         struct {
288             /**
289              * The next-hop
290              */
291             ip46_address_t fp_nh;
292             /**
293              * The BIER FIB the fmask is in
294              */
295             index_t fp_bier_fib;
296         } bier_fmask;
297         struct {
298             /**
299              * The BIER table's ID
300              */
301             bier_table_id_t fp_bier_tbl;
302         } bier_table;
303         struct {
304             /**
305              * The BIER imposition object
306              * this is part of the path's key, since the index_t
307              * of an imposition object is the object's key.
308              */
309             index_t fp_bier_imp;
310         } bier_imp;
311         struct {
312             /**
313              * The FIB index in which to perfom the next lookup
314              */
315             fib_node_index_t fp_tbl_id;
316             /**
317              * The RPF-ID to tag the packets with
318              */
319             fib_rpf_id_t fp_rpf_id;
320         } deag;
321         struct {
322         } special;
323         struct {
324             /**
325              * The user provided 'exclusive' DPO
326              */
327             dpo_id_t fp_ex_dpo;
328         } exclusive;
329         struct {
330             /**
331              * The interface on which the local address is configured
332              */
333             u32 fp_interface;
334             /**
335              * The next-hop
336              */
337             ip46_address_t fp_addr;
338         } receive;
339         struct {
340             /**
341              * The interface on which the packets will be input.
342              */
343             u32 fp_interface;
344         } intf_rx;
345         struct {
346             /**
347              * The UDP Encap object this path resolves through
348              */
349             u32 fp_udp_encap_id;
350         } udp_encap;
351     };
352     STRUCT_MARK(path_hash_end);
353
354     /**
355      * Memebers in this last section represent information that is
356      * dervied during resolution. It should not be copied to new paths
357      * nor compared.
358      */
359
360     /**
361      * Operational Flags
362      */
363     fib_path_oper_flags_t fp_oper_flags;
364
365     union {
366         /**
367          * the resolving via fib. not part of the union, since it it not part
368          * of the path's hash.
369          */
370         fib_node_index_t fp_via_fib;
371         /**
372          * the resolving bier-fmask
373          */
374         index_t fp_via_bier_fmask;
375         /**
376          * the resolving bier-table
377          */
378         index_t fp_via_bier_tbl;
379     };
380
381     /**
382      * The Data-path objects through which this path resolves for IP.
383      */
384     dpo_id_t fp_dpo;
385
386     /**
387      * the index of this path in the parent's child list.
388      */
389     u32 fp_sibling;
390 } fib_path_t;
391
392 /*
393  * Array of strings/names for the path types and attributes
394  */
395 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
396 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
397 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
398
399 /*
400  * The memory pool from which we allocate all the paths
401  */
402 static fib_path_t *fib_path_pool;
403
404 /*
405  * Debug macro
406  */
407 #ifdef FIB_DEBUG
408 #define FIB_PATH_DBG(_p, _fmt, _args...)                        \
409 {                                                               \
410     u8 *_tmp = NULL;                                            \
411     _tmp = fib_path_format(fib_path_get_index(_p), _tmp);       \
412     clib_warning("path:[%d:%s]:" _fmt,                          \
413                  fib_path_get_index(_p), _tmp,                  \
414                  ##_args);                                      \
415     vec_free(_tmp);                                             \
416 }
417 #else
418 #define FIB_PATH_DBG(_p, _fmt, _args...)
419 #endif
420
421 static fib_path_t *
422 fib_path_get (fib_node_index_t index)
423 {
424     return (pool_elt_at_index(fib_path_pool, index));
425 }
426
427 static fib_node_index_t 
428 fib_path_get_index (fib_path_t *path)
429 {
430     return (path - fib_path_pool);
431 }
432
433 static fib_node_t *
434 fib_path_get_node (fib_node_index_t index)
435 {
436     return ((fib_node_t*)fib_path_get(index));
437 }
438
439 static fib_path_t*
440 fib_path_from_fib_node (fib_node_t *node)
441 {
442     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
443     return ((fib_path_t*)node);
444 }
445
446 u8 *
447 format_fib_path (u8 * s, va_list * args)
448 {
449     fib_path_t *path = va_arg (*args, fib_path_t *);
450     vnet_main_t * vnm = vnet_get_main();
451     fib_path_oper_attribute_t oattr;
452     fib_path_cfg_attribute_t cattr;
453
454     s = format (s, "      index:%d ", fib_path_get_index(path));
455     s = format (s, "pl-index:%d ", path->fp_pl_index);
456     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
457     s = format (s, "weight=%d ", path->fp_weight);
458     s = format (s, "pref=%d ", path->fp_preference);
459     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
460     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
461         s = format(s, " oper-flags:");
462         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
463             if ((1<<oattr) & path->fp_oper_flags) {
464                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
465             }
466         }
467     }
468     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
469         s = format(s, " cfg-flags:");
470         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
471             if ((1<<cattr) & path->fp_cfg_flags) {
472                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
473             }
474         }
475     }
476     s = format(s, "\n       ");
477
478     switch (path->fp_type)
479     {
480     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
481         s = format (s, "%U", format_ip46_address,
482                     &path->attached_next_hop.fp_nh,
483                     IP46_TYPE_ANY);
484         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
485         {
486             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
487         }
488         else
489         {
490             s = format (s, " %U",
491                         format_vnet_sw_interface_name,
492                         vnm,
493                         vnet_get_sw_interface(
494                             vnm,
495                             path->attached_next_hop.fp_interface));
496             if (vnet_sw_interface_is_p2p(vnet_get_main(),
497                                          path->attached_next_hop.fp_interface))
498             {
499                 s = format (s, " (p2p)");
500             }
501         }
502         if (!dpo_id_is_valid(&path->fp_dpo))
503         {
504             s = format(s, "\n          unresolved");
505         }
506         else
507         {
508             s = format(s, "\n          %U",
509                        format_dpo_id,
510                        &path->fp_dpo, 13);
511         }
512         break;
513     case FIB_PATH_TYPE_ATTACHED:
514         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
515         {
516             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
517         }
518         else
519         {
520             s = format (s, " %U",
521                         format_vnet_sw_interface_name,
522                         vnm,
523                         vnet_get_sw_interface(
524                             vnm,
525                             path->attached.fp_interface));
526         }
527         break;
528     case FIB_PATH_TYPE_RECURSIVE:
529         if (DPO_PROTO_MPLS == path->fp_nh_proto)
530         {
531             s = format (s, "via %U %U",
532                         format_mpls_unicast_label,
533                         path->recursive.fp_nh.fp_local_label,
534                         format_mpls_eos_bit,
535                         path->recursive.fp_nh.fp_eos);
536         }
537         else
538         {
539             s = format (s, "via %U",
540                         format_ip46_address,
541                         &path->recursive.fp_nh.fp_ip,
542                         IP46_TYPE_ANY);
543         }
544         s = format (s, " in fib:%d",
545                     path->recursive.fp_tbl_id,
546                     path->fp_via_fib); 
547         s = format (s, " via-fib:%d", path->fp_via_fib); 
548         s = format (s, " via-dpo:[%U:%d]",
549                     format_dpo_type, path->fp_dpo.dpoi_type, 
550                     path->fp_dpo.dpoi_index);
551
552         break;
553     case FIB_PATH_TYPE_UDP_ENCAP:
554         s = format (s, " UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
555         break;
556     case FIB_PATH_TYPE_BIER_TABLE:
557         s = format (s, "via bier-table:[%U}",
558                     format_bier_table_id,
559                     &path->bier_table.fp_bier_tbl);
560         s = format (s, " via-dpo:[%U:%d]",
561                     format_dpo_type, path->fp_dpo.dpoi_type,
562                     path->fp_dpo.dpoi_index);
563         break;
564     case FIB_PATH_TYPE_BIER_FMASK:
565         s = format (s, "via %U",
566                     format_ip46_address,
567                     &path->bier_fmask.fp_nh,
568                     IP46_TYPE_ANY);
569         s = format (s, " in BIER-fib:%d",
570                     path->bier_fmask.fp_bier_fib,
571                     path->fp_via_fib); 
572         s = format (s, " via-fmask:%d", path->fp_via_bier_fmask); 
573         s = format (s, " via-dpo:[%U:%d]",
574                     format_dpo_type, path->fp_dpo.dpoi_type, 
575                     path->fp_dpo.dpoi_index);
576         break;
577     case FIB_PATH_TYPE_BIER_IMP:
578         s = format (s, "via %U", format_bier_imp,
579                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
580         break;
581     case FIB_PATH_TYPE_RECEIVE:
582     case FIB_PATH_TYPE_INTF_RX:
583     case FIB_PATH_TYPE_SPECIAL:
584     case FIB_PATH_TYPE_DEAG:
585     case FIB_PATH_TYPE_EXCLUSIVE:
586         if (dpo_id_is_valid(&path->fp_dpo))
587         {
588             s = format(s, "%U", format_dpo_id,
589                        &path->fp_dpo, 2);
590         }
591         break;
592     }
593     return (s);
594 }
595
596 u8 *
597 fib_path_format (fib_node_index_t pi, u8 *s)
598 {
599     fib_path_t *path;
600
601     path = fib_path_get(pi);
602     ASSERT(NULL != path);
603
604     return (format (s, "%U", format_fib_path, path));
605 }
606
607 u8 *
608 fib_path_adj_format (fib_node_index_t pi,
609                      u32 indent,
610                      u8 *s)
611 {
612     fib_path_t *path;
613
614     path = fib_path_get(pi);
615     ASSERT(NULL != path);
616
617     if (!dpo_id_is_valid(&path->fp_dpo))
618     {
619         s = format(s, " unresolved");
620     }
621     else
622     {
623         s = format(s, "%U", format_dpo_id,
624                    &path->fp_dpo, 2);
625     }
626
627     return (s);
628 }
629
630 /*
631  * fib_path_last_lock_gone
632  *
633  * We don't share paths, we share path lists, so the [un]lock functions
634  * are no-ops
635  */
636 static void
637 fib_path_last_lock_gone (fib_node_t *node)
638 {
639     ASSERT(0);
640 }
641
642 static const adj_index_t
643 fib_path_attached_next_hop_get_adj (fib_path_t *path,
644                                     vnet_link_t link)
645 {
646     if (vnet_sw_interface_is_p2p(vnet_get_main(),
647                                  path->attached_next_hop.fp_interface))
648     {
649         /*
650          * if the interface is p2p then the adj for the specific
651          * neighbour on that link will never exist. on p2p links
652          * the subnet address (the attached route) links to the
653          * auto-adj (see below), we want that adj here too.
654          */
655         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
656                                     link,
657                                     &zero_addr,
658                                     path->attached_next_hop.fp_interface));
659     }
660     else
661     {
662         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
663                                     link,
664                                     &path->attached_next_hop.fp_nh,
665                                     path->attached_next_hop.fp_interface));
666     }
667 }
668
669 static void
670 fib_path_attached_next_hop_set (fib_path_t *path)
671 {
672     /*
673      * resolve directly via the adjacnecy discribed by the
674      * interface and next-hop
675      */
676     dpo_set(&path->fp_dpo,
677             DPO_ADJACENCY,
678             path->fp_nh_proto,
679             fib_path_attached_next_hop_get_adj(
680                  path,
681                  dpo_proto_to_link(path->fp_nh_proto)));
682
683     /*
684      * become a child of the adjacency so we receive updates
685      * when its rewrite changes
686      */
687     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
688                                      FIB_NODE_TYPE_PATH,
689                                      fib_path_get_index(path));
690
691     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
692                                       path->attached_next_hop.fp_interface) ||
693         !adj_is_up(path->fp_dpo.dpoi_index))
694     {
695         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
696     }
697 }
698
699 static const adj_index_t
700 fib_path_attached_get_adj (fib_path_t *path,
701                            vnet_link_t link)
702 {
703     if (vnet_sw_interface_is_p2p(vnet_get_main(),
704                                  path->attached.fp_interface))
705     {
706         /*
707          * point-2-point interfaces do not require a glean, since
708          * there is nothing to ARP. Install a rewrite/nbr adj instead
709          */
710         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
711                                     link,
712                                     &zero_addr,
713                                     path->attached.fp_interface));
714     }
715     else
716     {
717         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
718                                       path->attached.fp_interface,
719                                       NULL));
720     }
721 }
722
723 /*
724  * create of update the paths recursive adj
725  */
726 static void
727 fib_path_recursive_adj_update (fib_path_t *path,
728                                fib_forward_chain_type_t fct,
729                                dpo_id_t *dpo)
730 {
731     dpo_id_t via_dpo = DPO_INVALID;
732
733     /*
734      * get the DPO to resolve through from the via-entry
735      */
736     fib_entry_contribute_forwarding(path->fp_via_fib,
737                                     fct,
738                                     &via_dpo);
739
740
741     /*
742      * hope for the best - clear if restrictions apply.
743      */
744     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
745
746     /*
747      * Validate any recursion constraints and over-ride the via
748      * adj if not met
749      */
750     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
751     {
752         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
753         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
754     }
755     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
756     {
757         /*
758          * the via FIB must be a host route.
759          * note the via FIB just added will always be a host route
760          * since it is an RR source added host route. So what we need to
761          * check is whether the route has other sources. If it does then
762          * some other source has added it as a host route. If it doesn't
763          * then it was added only here and inherits forwarding from a cover.
764          * the cover is not a host route.
765          * The RR source is the lowest priority source, so we check if it
766          * is the best. if it is there are no other sources.
767          */
768         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
769         {
770             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
771             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
772
773             /*
774              * PIC edge trigger. let the load-balance maps know
775              */
776             load_balance_map_path_state_change(fib_path_get_index(path));
777         }
778     }
779     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
780     {
781         /*
782          * RR source entries inherit the flags from the cover, so
783          * we can check the via directly
784          */
785         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
786         {
787             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
788             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
789
790             /*
791              * PIC edge trigger. let the load-balance maps know
792              */
793             load_balance_map_path_state_change(fib_path_get_index(path));
794         }
795     }
796     /*
797      * check for over-riding factors on the FIB entry itself
798      */
799     if (!fib_entry_is_resolved(path->fp_via_fib))
800     {
801         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
802         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
803
804         /*
805          * PIC edge trigger. let the load-balance maps know
806          */
807         load_balance_map_path_state_change(fib_path_get_index(path));
808     }
809
810     /*
811      * If this path is contributing a drop, then it's not resolved
812      */
813     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
814     {
815         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
816     }
817
818     /*
819      * update the path's contributed DPO
820      */
821     dpo_copy(dpo, &via_dpo);
822
823     FIB_PATH_DBG(path, "recursive update:");
824
825     dpo_reset(&via_dpo);
826 }
827
828 /*
829  * re-evaulate the forwarding state for a via fmask path
830  */
831 static void
832 fib_path_bier_fmask_update (fib_path_t *path,
833                             dpo_id_t *dpo)
834 {
835     bier_fmask_contribute_forwarding(path->fp_via_bier_fmask, dpo);
836
837     /*
838      * if we are stakcing on the drop, then the path is not resolved
839      */
840     if (dpo_is_drop(dpo))
841     {
842         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
843     }
844     else
845     {
846         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
847     }
848 }
849
850 /*
851  * fib_path_is_permanent_drop
852  *
853  * Return !0 if the path is configured to permanently drop,
854  * despite other attributes.
855  */
856 static int
857 fib_path_is_permanent_drop (fib_path_t *path)
858 {
859     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
860             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
861 }
862
863 /*
864  * fib_path_unresolve
865  *
866  * Remove our dependency on the resolution target
867  */
868 static void
869 fib_path_unresolve (fib_path_t *path)
870 {
871     /*
872      * the forced drop path does not need unresolving
873      */
874     if (fib_path_is_permanent_drop(path))
875     {
876         return;
877     }
878
879     switch (path->fp_type)
880     {
881     case FIB_PATH_TYPE_RECURSIVE:
882         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
883         {
884             fib_prefix_t pfx;
885
886             fib_entry_get_prefix(path->fp_via_fib, &pfx);
887             fib_entry_child_remove(path->fp_via_fib,
888                                    path->fp_sibling);
889             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
890                                            &pfx,
891                                            FIB_SOURCE_RR);
892             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
893         }
894         break;
895     case FIB_PATH_TYPE_BIER_FMASK:
896         if (FIB_NODE_INDEX_INVALID != path->fp_via_bier_fmask)
897         {
898             bier_fmask_child_remove(path->fp_via_bier_fmask,
899                                     path->fp_sibling);
900             path->fp_via_bier_fmask = FIB_NODE_INDEX_INVALID;
901         }
902         break;
903     case FIB_PATH_TYPE_BIER_IMP:
904         bier_imp_unlock(path->fp_dpo.dpoi_index);
905         break;
906     case FIB_PATH_TYPE_BIER_TABLE:
907         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
908         break;
909     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
910         adj_child_remove(path->fp_dpo.dpoi_index,
911                          path->fp_sibling);
912         adj_unlock(path->fp_dpo.dpoi_index);
913         break;
914     case FIB_PATH_TYPE_ATTACHED:
915         if (DPO_PROTO_ETHERNET != path->fp_nh_proto)
916         {
917             adj_child_remove(path->fp_dpo.dpoi_index,
918                              path->fp_sibling);
919             adj_unlock(path->fp_dpo.dpoi_index);
920         }
921         break;
922     case FIB_PATH_TYPE_UDP_ENCAP:
923         udp_encap_unlock_w_index(path->fp_dpo.dpoi_index);
924         break;
925     case FIB_PATH_TYPE_EXCLUSIVE:
926         dpo_reset(&path->exclusive.fp_ex_dpo);
927         break;
928     case FIB_PATH_TYPE_SPECIAL:
929     case FIB_PATH_TYPE_RECEIVE:
930     case FIB_PATH_TYPE_INTF_RX:
931     case FIB_PATH_TYPE_DEAG:
932         /*
933          * these hold only the path's DPO, which is reset below.
934          */
935         break;
936     }
937
938     /*
939      * release the adj we were holding and pick up the
940      * drop just in case.
941      */
942     dpo_reset(&path->fp_dpo);
943     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
944
945     return;
946 }
947
948 static fib_forward_chain_type_t
949 fib_path_to_chain_type (const fib_path_t *path)
950 {
951     if (DPO_PROTO_MPLS == path->fp_nh_proto)
952     {
953         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
954             MPLS_EOS == path->recursive.fp_nh.fp_eos)
955         {
956             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
957         }
958         else
959         {
960             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
961         }
962     }
963     else
964     {
965         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
966     }
967 }
968
969 /*
970  * fib_path_back_walk_notify
971  *
972  * A back walk has reach this path.
973  */
974 static fib_node_back_walk_rc_t
975 fib_path_back_walk_notify (fib_node_t *node,
976                            fib_node_back_walk_ctx_t *ctx)
977 {
978     fib_path_t *path;
979
980     path = fib_path_from_fib_node(node);
981
982     switch (path->fp_type)
983     {
984     case FIB_PATH_TYPE_RECURSIVE:
985         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
986         {
987             /*
988              * modify the recursive adjacency to use the new forwarding
989              * of the via-fib.
990              * this update is visible to packets in flight in the DP.
991              */
992             fib_path_recursive_adj_update(
993                 path,
994                 fib_path_to_chain_type(path),
995                 &path->fp_dpo);
996         }
997         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
998             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
999         {
1000             /*
1001              * ADJ updates (complete<->incomplete) do not need to propagate to
1002              * recursive entries.
1003              * The only reason its needed as far back as here, is that the adj
1004              * and the incomplete adj are a different DPO type, so the LBs need
1005              * to re-stack.
1006              * If this walk was quashed in the fib_entry, then any non-fib_path
1007              * children (like tunnels that collapse out the LB when they stack)
1008              * would not see the update.
1009              */
1010             return (FIB_NODE_BACK_WALK_CONTINUE);
1011         }
1012         break;
1013     case FIB_PATH_TYPE_BIER_FMASK:
1014         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1015         {
1016             /*
1017              * update to use the BIER fmask's new forwading
1018              */
1019             fib_path_bier_fmask_update(path, &path->fp_dpo);
1020         }
1021         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1022             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1023         {
1024             /*
1025              * ADJ updates (complete<->incomplete) do not need to propagate to
1026              * recursive entries.
1027              * The only reason its needed as far back as here, is that the adj
1028              * and the incomplete adj are a different DPO type, so the LBs need
1029              * to re-stack.
1030              * If this walk was quashed in the fib_entry, then any non-fib_path
1031              * children (like tunnels that collapse out the LB when they stack)
1032              * would not see the update.
1033              */
1034             return (FIB_NODE_BACK_WALK_CONTINUE);
1035         }
1036         break;
1037     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1038         /*
1039 FIXME comment
1040          * ADJ_UPDATE backwalk pass silently through here and up to
1041          * the path-list when the multipath adj collapse occurs.
1042          * The reason we do this is that the assumtption is that VPP
1043          * runs in an environment where the Control-Plane is remote
1044          * and hence reacts slowly to link up down. In order to remove
1045          * this down link from the ECMP set quickly, we back-walk.
1046          * VPP also has dedicated CPUs, so we are not stealing resources
1047          * from the CP to do so.
1048          */
1049         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1050         {
1051             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1052             {
1053                 /*
1054                  * alreday resolved. no need to walk back again
1055                  */
1056                 return (FIB_NODE_BACK_WALK_CONTINUE);
1057             }
1058             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1059         }
1060         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1061         {
1062             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1063             {
1064                 /*
1065                  * alreday unresolved. no need to walk back again
1066                  */
1067                 return (FIB_NODE_BACK_WALK_CONTINUE);
1068             }
1069             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1070         }
1071         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1072         {
1073             /*
1074              * The interface this path resolves through has been deleted.
1075              * This will leave the path in a permanent drop state. The route
1076              * needs to be removed and readded (and hence the path-list deleted)
1077              * before it can forward again.
1078              */
1079             fib_path_unresolve(path);
1080             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1081         }
1082         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1083         {
1084             /*
1085              * restack the DPO to pick up the correct DPO sub-type
1086              */
1087             uword if_is_up;
1088             adj_index_t ai;
1089
1090             if_is_up = vnet_sw_interface_is_admin_up(
1091                            vnet_get_main(),
1092                            path->attached_next_hop.fp_interface);
1093
1094             ai = fib_path_attached_next_hop_get_adj(
1095                      path,
1096                      dpo_proto_to_link(path->fp_nh_proto));
1097
1098             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1099             if (if_is_up && adj_is_up(ai))
1100             {
1101                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1102             }
1103
1104             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1105             adj_unlock(ai);
1106
1107             if (!if_is_up)
1108             {
1109                 /*
1110                  * If the interface is not up there is no reason to walk
1111                  * back to children. if we did they would only evalute
1112                  * that this path is unresolved and hence it would
1113                  * not contribute the adjacency - so it would be wasted
1114                  * CPU time.
1115                  */
1116                 return (FIB_NODE_BACK_WALK_CONTINUE);
1117             }
1118         }
1119         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1120         {
1121             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1122             {
1123                 /*
1124                  * alreday unresolved. no need to walk back again
1125                  */
1126                 return (FIB_NODE_BACK_WALK_CONTINUE);
1127             }
1128             /*
1129              * the adj has gone down. the path is no longer resolved.
1130              */
1131             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1132         }
1133         break;
1134     case FIB_PATH_TYPE_ATTACHED:
1135         /*
1136          * FIXME; this could schedule a lower priority walk, since attached
1137          * routes are not usually in ECMP configurations so the backwalk to
1138          * the FIB entry does not need to be high priority
1139          */
1140         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1141         {
1142             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1143         }
1144         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1145         {
1146             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1147         }
1148         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1149         {
1150             fib_path_unresolve(path);
1151             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1152         }
1153         break;
1154     case FIB_PATH_TYPE_UDP_ENCAP:
1155     {
1156         dpo_id_t via_dpo = DPO_INVALID;
1157
1158         /*
1159          * hope for the best - clear if restrictions apply.
1160          */
1161         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1162
1163         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1164                                         path->fp_nh_proto,
1165                                         &via_dpo);
1166         /*
1167          * If this path is contributing a drop, then it's not resolved
1168          */
1169         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1170         {
1171             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1172         }
1173
1174         /*
1175          * update the path's contributed DPO
1176          */
1177         dpo_copy(&path->fp_dpo, &via_dpo);
1178         dpo_reset(&via_dpo);
1179         break;
1180     }
1181     case FIB_PATH_TYPE_INTF_RX:
1182         ASSERT(0);
1183     case FIB_PATH_TYPE_DEAG:
1184         /*
1185          * FIXME When VRF delete is allowed this will need a poke.
1186          */
1187     case FIB_PATH_TYPE_SPECIAL:
1188     case FIB_PATH_TYPE_RECEIVE:
1189     case FIB_PATH_TYPE_EXCLUSIVE:
1190     case FIB_PATH_TYPE_BIER_TABLE:
1191     case FIB_PATH_TYPE_BIER_IMP:
1192         /*
1193          * these path types have no parents. so to be
1194          * walked from one is unexpected.
1195          */
1196         ASSERT(0);
1197         break;
1198     }
1199
1200     /*
1201      * propagate the backwalk further to the path-list
1202      */
1203     fib_path_list_back_walk(path->fp_pl_index, ctx);
1204
1205     return (FIB_NODE_BACK_WALK_CONTINUE);
1206 }
1207
1208 static void
1209 fib_path_memory_show (void)
1210 {
1211     fib_show_memory_usage("Path",
1212                           pool_elts(fib_path_pool),
1213                           pool_len(fib_path_pool),
1214                           sizeof(fib_path_t));
1215 }
1216
1217 /*
1218  * The FIB path's graph node virtual function table
1219  */
1220 static const fib_node_vft_t fib_path_vft = {
1221     .fnv_get = fib_path_get_node,
1222     .fnv_last_lock = fib_path_last_lock_gone,
1223     .fnv_back_walk = fib_path_back_walk_notify,
1224     .fnv_mem_show = fib_path_memory_show,
1225 };
1226
1227 static fib_path_cfg_flags_t
1228 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1229 {
1230     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1231
1232     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1233         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1234     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1235         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1236     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1237         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1238     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1239         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1240     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1241         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1242     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1243         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1244     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1245         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1246     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1247         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1248     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1249         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1250
1251     return (cfg_flags);
1252 }
1253
1254 /*
1255  * fib_path_create
1256  *
1257  * Create and initialise a new path object.
1258  * return the index of the path.
1259  */
1260 fib_node_index_t
1261 fib_path_create (fib_node_index_t pl_index,
1262                  const fib_route_path_t *rpath)
1263 {
1264     fib_path_t *path;
1265
1266     pool_get(fib_path_pool, path);
1267     memset(path, 0, sizeof(*path));
1268
1269     fib_node_init(&path->fp_node,
1270                   FIB_NODE_TYPE_PATH);
1271
1272     dpo_reset(&path->fp_dpo);
1273     path->fp_pl_index = pl_index;
1274     path->fp_nh_proto = rpath->frp_proto;
1275     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1276     path->fp_weight = rpath->frp_weight;
1277     if (0 == path->fp_weight)
1278     {
1279         /*
1280          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1281          * clients to always use 1, or we can accept it and fixup approrpiately.
1282          */
1283         path->fp_weight = 1;
1284     }
1285     path->fp_preference = rpath->frp_preference;
1286     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1287
1288     /*
1289      * deduce the path's tpye from the parementers and save what is needed.
1290      */
1291     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1292     {
1293         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1294         path->receive.fp_interface = rpath->frp_sw_if_index;
1295         path->receive.fp_addr = rpath->frp_addr;
1296     }
1297     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1298     {
1299         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1300         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1301     }
1302     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1303     {
1304         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1305         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1306     }
1307     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1308     {
1309         path->fp_type = FIB_PATH_TYPE_DEAG;
1310         path->deag.fp_tbl_id = rpath->frp_fib_index;
1311         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1312     }
1313     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1314     {
1315         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1316         path->bier_fmask.fp_nh = rpath->frp_addr;
1317         path->bier_fmask.fp_bier_fib = rpath->frp_bier_fib_index;
1318     }
1319     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1320     {
1321         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1322         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1323     }
1324     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1325     {
1326         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1327         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1328     }
1329     else if (~0 != rpath->frp_sw_if_index)
1330     {
1331         if (ip46_address_is_zero(&rpath->frp_addr))
1332         {
1333             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1334             path->attached.fp_interface = rpath->frp_sw_if_index;
1335         }
1336         else
1337         {
1338             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1339             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1340             path->attached_next_hop.fp_nh = rpath->frp_addr;
1341         }
1342     }
1343     else
1344     {
1345         if (ip46_address_is_zero(&rpath->frp_addr))
1346         {
1347             if (~0 == rpath->frp_fib_index)
1348             {
1349                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1350             }
1351             else
1352             {
1353                 path->fp_type = FIB_PATH_TYPE_DEAG;
1354                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1355             }           
1356         }
1357         else
1358         {
1359             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1360             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1361             {
1362                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1363                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1364             }
1365             else
1366             {
1367                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1368             }
1369             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1370         }
1371     }
1372
1373     FIB_PATH_DBG(path, "create");
1374
1375     return (fib_path_get_index(path));
1376 }
1377
1378 /*
1379  * fib_path_create_special
1380  *
1381  * Create and initialise a new path object.
1382  * return the index of the path.
1383  */
1384 fib_node_index_t
1385 fib_path_create_special (fib_node_index_t pl_index,
1386                          dpo_proto_t nh_proto,
1387                          fib_path_cfg_flags_t flags,
1388                          const dpo_id_t *dpo)
1389 {
1390     fib_path_t *path;
1391
1392     pool_get(fib_path_pool, path);
1393     memset(path, 0, sizeof(*path));
1394
1395     fib_node_init(&path->fp_node,
1396                   FIB_NODE_TYPE_PATH);
1397     dpo_reset(&path->fp_dpo);
1398
1399     path->fp_pl_index = pl_index;
1400     path->fp_weight = 1;
1401     path->fp_preference = 0;
1402     path->fp_nh_proto = nh_proto;
1403     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1404     path->fp_cfg_flags = flags;
1405
1406     if (FIB_PATH_CFG_FLAG_DROP & flags)
1407     {
1408         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1409     }
1410     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1411     {
1412         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1413         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1414     }
1415     else
1416     {
1417         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1418         ASSERT(NULL != dpo);
1419         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1420     }
1421
1422     return (fib_path_get_index(path));
1423 }
1424
1425 /*
1426  * fib_path_copy
1427  *
1428  * Copy a path. return index of new path.
1429  */
1430 fib_node_index_t
1431 fib_path_copy (fib_node_index_t path_index,
1432                fib_node_index_t path_list_index)
1433 {
1434     fib_path_t *path, *orig_path;
1435
1436     pool_get(fib_path_pool, path);
1437
1438     orig_path = fib_path_get(path_index);
1439     ASSERT(NULL != orig_path);
1440
1441     memcpy(path, orig_path, sizeof(*path));
1442
1443     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1444
1445     /*
1446      * reset the dynamic section
1447      */
1448     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1449     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1450     path->fp_pl_index  = path_list_index;
1451     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1452     memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1453     dpo_reset(&path->fp_dpo);
1454
1455     return (fib_path_get_index(path));
1456 }
1457
1458 /*
1459  * fib_path_destroy
1460  *
1461  * destroy a path that is no longer required
1462  */
1463 void
1464 fib_path_destroy (fib_node_index_t path_index)
1465 {
1466     fib_path_t *path;
1467
1468     path = fib_path_get(path_index);
1469
1470     ASSERT(NULL != path);
1471     FIB_PATH_DBG(path, "destroy");
1472
1473     fib_path_unresolve(path);
1474
1475     fib_node_deinit(&path->fp_node);
1476     pool_put(fib_path_pool, path);
1477 }
1478
1479 /*
1480  * fib_path_destroy
1481  *
1482  * destroy a path that is no longer required
1483  */
1484 uword
1485 fib_path_hash (fib_node_index_t path_index)
1486 {
1487     fib_path_t *path;
1488
1489     path = fib_path_get(path_index);
1490
1491     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1492                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1493                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1494                         0));
1495 }
1496
1497 /*
1498  * fib_path_cmp_i
1499  *
1500  * Compare two paths for equivalence.
1501  */
1502 static int
1503 fib_path_cmp_i (const fib_path_t *path1,
1504                 const fib_path_t *path2)
1505 {
1506     int res;
1507
1508     res = 1;
1509
1510     /*
1511      * paths of different types and protocol are not equal.
1512      * different weights and/or preference only are the same path.
1513      */
1514     if (path1->fp_type != path2->fp_type)
1515     {
1516         res = (path1->fp_type - path2->fp_type);
1517     }
1518     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1519     {
1520         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1521     }
1522     else
1523     {
1524         /*
1525          * both paths are of the same type.
1526          * consider each type and its attributes in turn.
1527          */
1528         switch (path1->fp_type)
1529         {
1530         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1531             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1532                                    &path2->attached_next_hop.fp_nh);
1533             if (0 == res) {
1534                 res = (path1->attached_next_hop.fp_interface -
1535                        path2->attached_next_hop.fp_interface);
1536             }
1537             break;
1538         case FIB_PATH_TYPE_ATTACHED:
1539             res = (path1->attached.fp_interface -
1540                    path2->attached.fp_interface);
1541             break;
1542         case FIB_PATH_TYPE_RECURSIVE:
1543             res = ip46_address_cmp(&path1->recursive.fp_nh,
1544                                    &path2->recursive.fp_nh);
1545  
1546             if (0 == res)
1547             {
1548                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1549             }
1550             break;
1551         case FIB_PATH_TYPE_BIER_FMASK:
1552             res = ip46_address_cmp(&path1->bier_fmask.fp_nh,
1553                                    &path2->bier_fmask.fp_nh);
1554  
1555             if (0 == res)
1556             {
1557                 res = (path1->bier_fmask.fp_bier_fib -
1558                        path2->bier_fmask.fp_bier_fib);
1559             }
1560             break;
1561         case FIB_PATH_TYPE_BIER_IMP:
1562             res = (path1->bier_imp.fp_bier_imp -
1563                    path2->bier_imp.fp_bier_imp);
1564             break;
1565         case FIB_PATH_TYPE_BIER_TABLE:
1566             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1567                                     &path2->bier_table.fp_bier_tbl);
1568             break;
1569         case FIB_PATH_TYPE_DEAG:
1570             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1571             if (0 == res)
1572             {
1573                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1574             }
1575             break;
1576         case FIB_PATH_TYPE_INTF_RX:
1577             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1578             break;
1579         case FIB_PATH_TYPE_UDP_ENCAP:
1580             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1581             break;
1582         case FIB_PATH_TYPE_SPECIAL:
1583         case FIB_PATH_TYPE_RECEIVE:
1584         case FIB_PATH_TYPE_EXCLUSIVE:
1585             res = 0;
1586             break;
1587         }
1588     }
1589     return (res);
1590 }
1591
1592 /*
1593  * fib_path_cmp_for_sort
1594  *
1595  * Compare two paths for equivalence. Used during path sorting.
1596  * As usual 0 means equal.
1597  */
1598 int
1599 fib_path_cmp_for_sort (void * v1,
1600                        void * v2)
1601 {
1602     fib_node_index_t *pi1 = v1, *pi2 = v2;
1603     fib_path_t *path1, *path2;
1604
1605     path1 = fib_path_get(*pi1);
1606     path2 = fib_path_get(*pi2);
1607
1608     /*
1609      * when sorting paths we want the highest preference paths
1610      * first, so that the choices set built is in prefernce order
1611      */
1612     if (path1->fp_preference != path2->fp_preference)
1613     {
1614         return (path1->fp_preference - path2->fp_preference);
1615     }
1616
1617     return (fib_path_cmp_i(path1, path2));
1618 }
1619
1620 /*
1621  * fib_path_cmp
1622  *
1623  * Compare two paths for equivalence.
1624  */
1625 int
1626 fib_path_cmp (fib_node_index_t pi1,
1627               fib_node_index_t pi2)
1628 {
1629     fib_path_t *path1, *path2;
1630
1631     path1 = fib_path_get(pi1);
1632     path2 = fib_path_get(pi2);
1633
1634     return (fib_path_cmp_i(path1, path2));
1635 }
1636
1637 int
1638 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1639                            const fib_route_path_t *rpath)
1640 {
1641     fib_path_t *path;
1642     int res;
1643
1644     path = fib_path_get(path_index);
1645
1646     res = 1;
1647
1648     if (path->fp_weight != rpath->frp_weight)
1649     {
1650         res = (path->fp_weight - rpath->frp_weight);
1651     }
1652     else
1653     {
1654         /*
1655          * both paths are of the same type.
1656          * consider each type and its attributes in turn.
1657          */
1658         switch (path->fp_type)
1659         {
1660         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1661             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1662                                    &rpath->frp_addr);
1663             if (0 == res)
1664             {
1665                 res = (path->attached_next_hop.fp_interface -
1666                        rpath->frp_sw_if_index);
1667             }
1668             break;
1669         case FIB_PATH_TYPE_ATTACHED:
1670             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1671             break;
1672         case FIB_PATH_TYPE_RECURSIVE:
1673             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1674             {
1675                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1676
1677                 if (res == 0)
1678                 {
1679                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1680                 }
1681             }
1682             else
1683             {
1684                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1685                                        &rpath->frp_addr);
1686             }
1687
1688             if (0 == res)
1689             {
1690                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1691             }
1692             break;
1693         case FIB_PATH_TYPE_BIER_FMASK:
1694             res = ip46_address_cmp(&path->bier_fmask.fp_nh,
1695                                    &rpath->frp_addr);
1696
1697             if (0 == res)
1698             {
1699                 res = (path->bier_fmask.fp_bier_fib - rpath->frp_bier_fib_index);
1700             }
1701             break;
1702         case FIB_PATH_TYPE_BIER_IMP:
1703             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1704             break;
1705         case FIB_PATH_TYPE_BIER_TABLE:
1706             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1707                                     &rpath->frp_bier_tbl);
1708             break;
1709         case FIB_PATH_TYPE_INTF_RX:
1710             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1711             break;
1712         case FIB_PATH_TYPE_UDP_ENCAP:
1713             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1714             break;
1715         case FIB_PATH_TYPE_DEAG:
1716             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1717             if (0 == res)
1718             {
1719                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1720             }
1721             break;
1722         case FIB_PATH_TYPE_SPECIAL:
1723         case FIB_PATH_TYPE_RECEIVE:
1724         case FIB_PATH_TYPE_EXCLUSIVE:
1725             res = 0;
1726             break;
1727         }
1728     }
1729     return (res);
1730 }
1731
1732 /*
1733  * fib_path_recursive_loop_detect
1734  *
1735  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1736  * walk is initiated when an entry is linking to a new path list or from an old.
1737  * The entry vector passed contains all the FIB entrys that are children of this
1738  * path (it is all the entries encountered on the walk so far). If this vector
1739  * contains the entry this path resolve via, then a loop is about to form.
1740  * The loop must be allowed to form, since we need the dependencies in place
1741  * so that we can track when the loop breaks.
1742  * However, we MUST not produce a loop in the forwarding graph (else packets
1743  * would loop around the switch path until the loop breaks), so we mark recursive
1744  * paths as looped so that they do not contribute forwarding information.
1745  * By marking the path as looped, an etry such as;
1746  *    X/Y
1747  *     via a.a.a.a (looped)
1748  *     via b.b.b.b (not looped)
1749  * can still forward using the info provided by b.b.b.b only
1750  */
1751 int
1752 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1753                                 fib_node_index_t **entry_indicies)
1754 {
1755     fib_path_t *path;
1756
1757     path = fib_path_get(path_index);
1758
1759     /*
1760      * the forced drop path is never looped, cos it is never resolved.
1761      */
1762     if (fib_path_is_permanent_drop(path))
1763     {
1764         return (0);
1765     }
1766
1767     switch (path->fp_type)
1768     {
1769     case FIB_PATH_TYPE_RECURSIVE:
1770     {
1771         fib_node_index_t *entry_index, *entries;
1772         int looped = 0;
1773         entries = *entry_indicies;
1774
1775         vec_foreach(entry_index, entries) {
1776             if (*entry_index == path->fp_via_fib)
1777             {
1778                 /*
1779                  * the entry that is about to link to this path-list (or
1780                  * one of this path-list's children) is the same entry that
1781                  * this recursive path resolves through. this is a cycle.
1782                  * abort the walk.
1783                  */
1784                 looped = 1;
1785                 break;
1786             }
1787         }
1788
1789         if (looped)
1790         {
1791             FIB_PATH_DBG(path, "recursive loop formed");
1792             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1793
1794             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1795         }
1796         else
1797         {
1798             /*
1799              * no loop here yet. keep forward walking the graph.
1800              */     
1801             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1802             {
1803                 FIB_PATH_DBG(path, "recursive loop formed");
1804                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1805             }
1806             else
1807             {
1808                 FIB_PATH_DBG(path, "recursive loop cleared");
1809                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1810             }
1811         }
1812         break;
1813     }
1814     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1815     case FIB_PATH_TYPE_ATTACHED:
1816     case FIB_PATH_TYPE_SPECIAL:
1817     case FIB_PATH_TYPE_DEAG:
1818     case FIB_PATH_TYPE_RECEIVE:
1819     case FIB_PATH_TYPE_INTF_RX:
1820     case FIB_PATH_TYPE_UDP_ENCAP:
1821     case FIB_PATH_TYPE_EXCLUSIVE:
1822     case FIB_PATH_TYPE_BIER_FMASK:
1823     case FIB_PATH_TYPE_BIER_TABLE:
1824     case FIB_PATH_TYPE_BIER_IMP:
1825         /*
1826          * these path types cannot be part of a loop, since they are the leaves
1827          * of the graph.
1828          */
1829         break;
1830     }
1831
1832     return (fib_path_is_looped(path_index));
1833 }
1834
1835 int
1836 fib_path_resolve (fib_node_index_t path_index)
1837 {
1838     fib_path_t *path;
1839
1840     path = fib_path_get(path_index);
1841
1842     /*
1843      * hope for the best.
1844      */
1845     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1846
1847     /*
1848      * the forced drop path resolves via the drop adj
1849      */
1850     if (fib_path_is_permanent_drop(path))
1851     {
1852         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1853         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1854         return (fib_path_is_resolved(path_index));
1855     }
1856
1857     switch (path->fp_type)
1858     {
1859     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1860         fib_path_attached_next_hop_set(path);
1861         break;
1862     case FIB_PATH_TYPE_ATTACHED:
1863         if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
1864         {
1865             l2_bridge_dpo_add_or_lock(path->attached.fp_interface,
1866                                       &path->fp_dpo);
1867         }
1868         else
1869         {
1870             /*
1871              * path->attached.fp_interface
1872              */
1873             if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1874                                                path->attached.fp_interface))
1875             {
1876                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1877             }
1878             dpo_set(&path->fp_dpo,
1879                     DPO_ADJACENCY,
1880                     path->fp_nh_proto,
1881                     fib_path_attached_get_adj(path,
1882                                               dpo_proto_to_link(path->fp_nh_proto)));
1883
1884             /*
1885              * become a child of the adjacency so we receive updates
1886              * when the interface state changes
1887              */
1888             path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1889                                              FIB_NODE_TYPE_PATH,
1890                                              fib_path_get_index(path));
1891         }
1892         break;
1893     case FIB_PATH_TYPE_RECURSIVE:
1894     {
1895         /*
1896          * Create a RR source entry in the table for the address
1897          * that this path recurses through.
1898          * This resolve action is recursive, hence we may create
1899          * more paths in the process. more creates mean maybe realloc
1900          * of this path.
1901          */
1902         fib_node_index_t fei;
1903         fib_prefix_t pfx;
1904
1905         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1906
1907         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1908         {
1909             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1910                                        path->recursive.fp_nh.fp_eos,
1911                                        &pfx);
1912         }
1913         else
1914         {
1915             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1916         }
1917
1918         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1919                                           &pfx,
1920                                           FIB_SOURCE_RR,
1921                                           FIB_ENTRY_FLAG_NONE);
1922
1923         path = fib_path_get(path_index);
1924         path->fp_via_fib = fei;
1925
1926         /*
1927          * become a dependent child of the entry so the path is 
1928          * informed when the forwarding for the entry changes.
1929          */
1930         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1931                                                FIB_NODE_TYPE_PATH,
1932                                                fib_path_get_index(path));
1933
1934         /*
1935          * create and configure the IP DPO
1936          */
1937         fib_path_recursive_adj_update(
1938             path,
1939             fib_path_to_chain_type(path),
1940             &path->fp_dpo);
1941
1942         break;
1943     }
1944     case FIB_PATH_TYPE_BIER_FMASK:
1945     {
1946         /*
1947          * Find the BIER f-mask to link to
1948          */
1949         bier_fmask_id_t fmid = {
1950             .bfmi_nh = path->bier_fmask.fp_nh,
1951             .bfmi_hdr_type = BIER_HDR_O_MPLS,
1952         };
1953
1954         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_fmask);
1955
1956         path->fp_via_bier_fmask = bier_fmask_db_find(path->bier_fmask.fp_bier_fib,
1957                                                 &fmid);
1958
1959         /*
1960          * become a dependent child of the entry so the path is
1961          * informed when the forwarding for the entry changes.
1962          */
1963         path->fp_sibling = bier_fmask_child_add(path->fp_via_bier_fmask,
1964                                                 FIB_NODE_TYPE_PATH,
1965                                                 fib_path_get_index(path));
1966
1967         fib_path_bier_fmask_update(path, &path->fp_dpo);
1968
1969         break;
1970     }
1971     case FIB_PATH_TYPE_BIER_IMP:
1972         bier_imp_lock(path->bier_imp.fp_bier_imp);
1973         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
1974                                        DPO_PROTO_IP4,
1975                                        &path->fp_dpo);
1976         break;
1977     case FIB_PATH_TYPE_BIER_TABLE:
1978     {
1979         /*
1980          * Find/create the BIER table to link to
1981          */
1982         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
1983
1984         path->fp_via_bier_tbl =
1985             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
1986
1987         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
1988                                          &path->fp_dpo);
1989         break;
1990     }
1991     case FIB_PATH_TYPE_SPECIAL:
1992         /*
1993          * Resolve via the drop
1994          */
1995         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1996         break;
1997     case FIB_PATH_TYPE_DEAG:
1998     {
1999         /*
2000          * Resolve via a lookup DPO.
2001          * FIXME. control plane should add routes with a table ID
2002          */
2003         lookup_input_t input;
2004         lookup_cast_t cast;
2005
2006         cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2007                 LOOKUP_MULTICAST :
2008                 LOOKUP_UNICAST);
2009         input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2010                 LOOKUP_INPUT_SRC_ADDR :
2011                 LOOKUP_INPUT_DST_ADDR);
2012
2013         lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2014                                            path->fp_nh_proto,
2015                                            cast,
2016                                            input,
2017                                            LOOKUP_TABLE_FROM_CONFIG,
2018                                            &path->fp_dpo);
2019         break;
2020     }
2021     case FIB_PATH_TYPE_RECEIVE:
2022         /*
2023          * Resolve via a receive DPO.
2024          */
2025         receive_dpo_add_or_lock(path->fp_nh_proto,
2026                                 path->receive.fp_interface,
2027                                 &path->receive.fp_addr,
2028                                 &path->fp_dpo);
2029         break;
2030     case FIB_PATH_TYPE_UDP_ENCAP:
2031         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2032         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2033                                         path->fp_nh_proto,
2034                                         &path->fp_dpo);
2035         break;
2036     case FIB_PATH_TYPE_INTF_RX: {
2037         /*
2038          * Resolve via a receive DPO.
2039          */
2040         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2041                                      path->intf_rx.fp_interface,
2042                                      &path->fp_dpo);
2043         break;
2044     }
2045     case FIB_PATH_TYPE_EXCLUSIVE:
2046         /*
2047          * Resolve via the user provided DPO
2048          */
2049         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2050         break;
2051     }
2052
2053     return (fib_path_is_resolved(path_index));
2054 }
2055
2056 u32
2057 fib_path_get_resolving_interface (fib_node_index_t path_index)
2058 {
2059     fib_path_t *path;
2060
2061     path = fib_path_get(path_index);
2062
2063     switch (path->fp_type)
2064     {
2065     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2066         return (path->attached_next_hop.fp_interface);
2067     case FIB_PATH_TYPE_ATTACHED:
2068         return (path->attached.fp_interface);
2069     case FIB_PATH_TYPE_RECEIVE:
2070         return (path->receive.fp_interface);
2071     case FIB_PATH_TYPE_RECURSIVE:
2072         if (fib_path_is_resolved(path_index))
2073         {
2074             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2075         }
2076         break;
2077     case FIB_PATH_TYPE_INTF_RX:
2078     case FIB_PATH_TYPE_UDP_ENCAP:
2079     case FIB_PATH_TYPE_SPECIAL:
2080     case FIB_PATH_TYPE_DEAG:
2081     case FIB_PATH_TYPE_EXCLUSIVE:
2082     case FIB_PATH_TYPE_BIER_FMASK:
2083     case FIB_PATH_TYPE_BIER_TABLE:
2084     case FIB_PATH_TYPE_BIER_IMP:
2085         break;
2086     }
2087     return (~0);
2088 }
2089
2090 index_t
2091 fib_path_get_resolving_index (fib_node_index_t path_index)
2092 {
2093     fib_path_t *path;
2094
2095     path = fib_path_get(path_index);
2096
2097     switch (path->fp_type)
2098     {
2099     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2100     case FIB_PATH_TYPE_ATTACHED:
2101     case FIB_PATH_TYPE_RECEIVE:
2102     case FIB_PATH_TYPE_INTF_RX:
2103     case FIB_PATH_TYPE_SPECIAL:
2104     case FIB_PATH_TYPE_DEAG:
2105     case FIB_PATH_TYPE_EXCLUSIVE:
2106         break;
2107     case FIB_PATH_TYPE_UDP_ENCAP:
2108         return (path->udp_encap.fp_udp_encap_id);
2109     case FIB_PATH_TYPE_RECURSIVE:
2110         return (path->fp_via_fib);
2111     case FIB_PATH_TYPE_BIER_FMASK:
2112         return (path->fp_via_bier_fmask);
2113    case FIB_PATH_TYPE_BIER_TABLE:
2114        return (path->fp_via_bier_tbl);
2115    case FIB_PATH_TYPE_BIER_IMP:
2116        return (path->bier_imp.fp_bier_imp);
2117     }
2118     return (~0);
2119 }
2120
2121 adj_index_t
2122 fib_path_get_adj (fib_node_index_t path_index)
2123 {
2124     fib_path_t *path;
2125
2126     path = fib_path_get(path_index);
2127
2128     ASSERT(dpo_is_adj(&path->fp_dpo));
2129     if (dpo_is_adj(&path->fp_dpo))
2130     {
2131         return (path->fp_dpo.dpoi_index);
2132     }
2133     return (ADJ_INDEX_INVALID);
2134 }
2135
2136 u16
2137 fib_path_get_weight (fib_node_index_t path_index)
2138 {
2139     fib_path_t *path;
2140
2141     path = fib_path_get(path_index);
2142
2143     ASSERT(path);
2144
2145     return (path->fp_weight);
2146 }
2147
2148 u16
2149 fib_path_get_preference (fib_node_index_t path_index)
2150 {
2151     fib_path_t *path;
2152
2153     path = fib_path_get(path_index);
2154
2155     ASSERT(path);
2156
2157     return (path->fp_preference);
2158 }
2159
2160 u32
2161 fib_path_get_rpf_id (fib_node_index_t path_index)
2162 {
2163     fib_path_t *path;
2164
2165     path = fib_path_get(path_index);
2166
2167     ASSERT(path);
2168
2169     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2170     {
2171         return (path->deag.fp_rpf_id);
2172     }
2173
2174     return (~0);
2175 }
2176
2177 /**
2178  * @brief Contribute the path's adjacency to the list passed.
2179  * By calling this function over all paths, recursively, a child
2180  * can construct its full set of forwarding adjacencies, and hence its
2181  * uRPF list.
2182  */
2183 void
2184 fib_path_contribute_urpf (fib_node_index_t path_index,
2185                           index_t urpf)
2186 {
2187     fib_path_t *path;
2188
2189     path = fib_path_get(path_index);
2190
2191     /*
2192      * resolved and unresolved paths contribute to the RPF list.
2193      */
2194     switch (path->fp_type)
2195     {
2196     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2197         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2198         break;
2199
2200     case FIB_PATH_TYPE_ATTACHED:
2201         fib_urpf_list_append(urpf, path->attached.fp_interface);
2202         break;
2203
2204     case FIB_PATH_TYPE_RECURSIVE:
2205         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2206             !fib_path_is_looped(path_index))
2207         {
2208             /*
2209              * there's unresolved due to constraints, and there's unresolved
2210              * due to ain't got no via. can't do nowt w'out via.
2211              */
2212             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2213         }
2214         break;
2215
2216     case FIB_PATH_TYPE_EXCLUSIVE:
2217     case FIB_PATH_TYPE_SPECIAL:
2218     {
2219         /*
2220          * these path types may link to an adj, if that's what
2221          * the clinet gave
2222          */
2223         u32 rpf_sw_if_index;
2224
2225         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2226
2227         if (~0 != rpf_sw_if_index)
2228         {
2229             fib_urpf_list_append(urpf, rpf_sw_if_index);
2230         }
2231         break;
2232     }
2233     case FIB_PATH_TYPE_DEAG:
2234     case FIB_PATH_TYPE_RECEIVE:
2235     case FIB_PATH_TYPE_INTF_RX:
2236     case FIB_PATH_TYPE_UDP_ENCAP:
2237     case FIB_PATH_TYPE_BIER_FMASK:
2238     case FIB_PATH_TYPE_BIER_TABLE:
2239     case FIB_PATH_TYPE_BIER_IMP:
2240         /*
2241          * these path types don't link to an adj
2242          */
2243         break;
2244     }
2245 }
2246
2247 void
2248 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2249                           dpo_proto_t payload_proto,
2250                           dpo_id_t *dpo)
2251 {
2252     fib_path_t *path;
2253
2254     path = fib_path_get(path_index);
2255
2256     ASSERT(path);
2257
2258     switch (path->fp_type)
2259     {
2260     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2261     {
2262         dpo_id_t tmp = DPO_INVALID;
2263
2264         dpo_copy(&tmp, dpo);
2265         dpo_set(dpo,
2266                 DPO_MPLS_DISPOSITION,
2267                 payload_proto,
2268                 mpls_disp_dpo_create(payload_proto, ~0, &tmp));
2269         dpo_reset(&tmp);
2270         break;
2271     }                
2272     case FIB_PATH_TYPE_DEAG:
2273     {
2274         dpo_id_t tmp = DPO_INVALID;
2275
2276         dpo_copy(&tmp, dpo);
2277         dpo_set(dpo,
2278                 DPO_MPLS_DISPOSITION,
2279                 payload_proto,
2280                 mpls_disp_dpo_create(payload_proto,
2281                                      path->deag.fp_rpf_id,
2282                                      &tmp));
2283         dpo_reset(&tmp);
2284         break;
2285     }
2286     case FIB_PATH_TYPE_RECEIVE:
2287     case FIB_PATH_TYPE_ATTACHED:
2288     case FIB_PATH_TYPE_RECURSIVE:
2289     case FIB_PATH_TYPE_INTF_RX:
2290     case FIB_PATH_TYPE_UDP_ENCAP:
2291     case FIB_PATH_TYPE_EXCLUSIVE:
2292     case FIB_PATH_TYPE_SPECIAL:
2293     case FIB_PATH_TYPE_BIER_FMASK:
2294     case FIB_PATH_TYPE_BIER_TABLE:
2295     case FIB_PATH_TYPE_BIER_IMP:
2296         break;
2297     }
2298 }
2299
2300 void
2301 fib_path_contribute_forwarding (fib_node_index_t path_index,
2302                                 fib_forward_chain_type_t fct,
2303                                 dpo_id_t *dpo)
2304 {
2305     fib_path_t *path;
2306
2307     path = fib_path_get(path_index);
2308
2309     ASSERT(path);
2310     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2311
2312     FIB_PATH_DBG(path, "contribute");
2313
2314     /*
2315      * The DPO stored in the path was created when the path was resolved.
2316      * This then represents the path's 'native' protocol; IP.
2317      * For all others will need to go find something else.
2318      */
2319     if (fib_path_to_chain_type(path) == fct)
2320     {
2321         dpo_copy(dpo, &path->fp_dpo);
2322     }
2323     else
2324     {
2325         switch (path->fp_type)
2326         {
2327         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2328             switch (fct)
2329             {
2330             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2331             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2332             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2333             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2334             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2335             case FIB_FORW_CHAIN_TYPE_NSH:
2336             {
2337                 adj_index_t ai;
2338
2339                 /*
2340                  * get a appropriate link type adj.
2341                  */
2342                 ai = fib_path_attached_next_hop_get_adj(
2343                          path,
2344                          fib_forw_chain_type_to_link_type(fct));
2345                 dpo_set(dpo, DPO_ADJACENCY,
2346                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2347                 adj_unlock(ai);
2348
2349                 break;
2350             }
2351             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2352             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2353             case FIB_FORW_CHAIN_TYPE_BIER:
2354                 break;
2355             }
2356             break;
2357         case FIB_PATH_TYPE_RECURSIVE:
2358             switch (fct)
2359             {
2360             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2361             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2362             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2363             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2364             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2365             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2366             case FIB_FORW_CHAIN_TYPE_BIER:
2367                 fib_path_recursive_adj_update(path, fct, dpo);
2368                 break;
2369             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2370             case FIB_FORW_CHAIN_TYPE_NSH:
2371                 ASSERT(0);
2372                 break;
2373             }
2374             break;
2375         case FIB_PATH_TYPE_BIER_TABLE:
2376             switch (fct)
2377             {
2378             case FIB_FORW_CHAIN_TYPE_BIER:
2379                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2380                 break;
2381             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2382             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2383             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2384             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2385             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2386             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2387             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2388             case FIB_FORW_CHAIN_TYPE_NSH:
2389                 ASSERT(0);
2390                 break;
2391             }
2392             break;
2393         case FIB_PATH_TYPE_BIER_FMASK:
2394             switch (fct)
2395             {
2396             case FIB_FORW_CHAIN_TYPE_BIER:
2397                 fib_path_bier_fmask_update(path, dpo);
2398                 break;
2399             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2400             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2401             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2402             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2403             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2404             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2405             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2406             case FIB_FORW_CHAIN_TYPE_NSH:
2407                 ASSERT(0);
2408                 break;
2409             }
2410             break;
2411         case FIB_PATH_TYPE_BIER_IMP:
2412             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2413                                            fib_forw_chain_type_to_dpo_proto(fct),
2414                                            dpo);
2415             break;
2416         case FIB_PATH_TYPE_DEAG:
2417             switch (fct)
2418             {
2419             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2420                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2421                                                   DPO_PROTO_MPLS,
2422                                                   LOOKUP_UNICAST,
2423                                                   LOOKUP_INPUT_DST_ADDR,
2424                                                   LOOKUP_TABLE_FROM_CONFIG,
2425                                                   dpo);
2426                 break;
2427             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2428             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2429             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2430                 dpo_copy(dpo, &path->fp_dpo);
2431                 break;
2432             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2433             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2434             case FIB_FORW_CHAIN_TYPE_BIER:
2435                 break;
2436             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2437             case FIB_FORW_CHAIN_TYPE_NSH:
2438                 ASSERT(0);
2439                 break;
2440             }
2441             break;
2442         case FIB_PATH_TYPE_EXCLUSIVE:
2443             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2444             break;
2445         case FIB_PATH_TYPE_ATTACHED:
2446             if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
2447             {
2448                 dpo_copy(dpo, &path->fp_dpo);
2449                 break;
2450             }
2451             switch (fct)
2452             {
2453             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2454             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2455             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2456             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2457             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2458             case FIB_FORW_CHAIN_TYPE_NSH:
2459             case FIB_FORW_CHAIN_TYPE_BIER:
2460                 {
2461                     adj_index_t ai;
2462
2463                     /*
2464                      * get a appropriate link type adj.
2465                      */
2466                     ai = fib_path_attached_get_adj(
2467                             path,
2468                             fib_forw_chain_type_to_link_type(fct));
2469                     dpo_set(dpo, DPO_ADJACENCY,
2470                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2471                     adj_unlock(ai);
2472                     break;
2473                 }
2474             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2475             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2476                 {
2477                     adj_index_t ai;
2478
2479                     /*
2480                      * Create the adj needed for sending IP multicast traffic
2481                      */
2482                     ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2483                                                fib_forw_chain_type_to_link_type(fct),
2484                                                path->attached.fp_interface);
2485                     dpo_set(dpo, DPO_ADJACENCY,
2486                             fib_forw_chain_type_to_dpo_proto(fct),
2487                             ai);
2488                     adj_unlock(ai);
2489                 }
2490                 break;
2491             }
2492             break;
2493         case FIB_PATH_TYPE_INTF_RX:
2494             /*
2495              * Create the adj needed for sending IP multicast traffic
2496              */
2497             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2498                                          path->attached.fp_interface,
2499                                          dpo);
2500             break;
2501         case FIB_PATH_TYPE_UDP_ENCAP:
2502             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2503                                             path->fp_nh_proto,
2504                                             dpo);
2505             break;
2506         case FIB_PATH_TYPE_RECEIVE:
2507         case FIB_PATH_TYPE_SPECIAL:
2508             dpo_copy(dpo, &path->fp_dpo);
2509             break;
2510         }
2511     }
2512 }
2513
2514 load_balance_path_t *
2515 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2516                                        fib_forward_chain_type_t fct,
2517                                        load_balance_path_t *hash_key)
2518 {
2519     load_balance_path_t *mnh;
2520     fib_path_t *path;
2521
2522     path = fib_path_get(path_index);
2523
2524     ASSERT(path);
2525
2526     if (fib_path_is_resolved(path_index))
2527     {
2528         vec_add2(hash_key, mnh, 1);
2529
2530         mnh->path_weight = path->fp_weight;
2531         mnh->path_index = path_index;
2532         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2533     }
2534
2535     return (hash_key);
2536 }
2537
2538 int
2539 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2540 {
2541     fib_path_t *path;
2542
2543     path = fib_path_get(path_index);
2544
2545     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2546             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2547              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2548 }
2549
2550 int
2551 fib_path_is_exclusive (fib_node_index_t path_index)
2552 {
2553     fib_path_t *path;
2554
2555     path = fib_path_get(path_index);
2556
2557     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2558 }
2559
2560 int
2561 fib_path_is_deag (fib_node_index_t path_index)
2562 {
2563     fib_path_t *path;
2564
2565     path = fib_path_get(path_index);
2566
2567     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2568 }
2569
2570 int
2571 fib_path_is_resolved (fib_node_index_t path_index)
2572 {
2573     fib_path_t *path;
2574
2575     path = fib_path_get(path_index);
2576
2577     return (dpo_id_is_valid(&path->fp_dpo) &&
2578             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2579             !fib_path_is_looped(path_index) &&
2580             !fib_path_is_permanent_drop(path));
2581 }
2582
2583 int
2584 fib_path_is_looped (fib_node_index_t path_index)
2585 {
2586     fib_path_t *path;
2587
2588     path = fib_path_get(path_index);
2589
2590     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2591 }
2592
2593 fib_path_list_walk_rc_t
2594 fib_path_encode (fib_node_index_t path_list_index,
2595                  fib_node_index_t path_index,
2596                  void *ctx)
2597 {
2598     fib_route_path_encode_t **api_rpaths = ctx;
2599     fib_route_path_encode_t *api_rpath;
2600     fib_path_t *path;
2601
2602     path = fib_path_get(path_index);
2603     if (!path)
2604       return (FIB_PATH_LIST_WALK_CONTINUE);
2605     vec_add2(*api_rpaths, api_rpath, 1);
2606     api_rpath->rpath.frp_weight = path->fp_weight;
2607     api_rpath->rpath.frp_preference = path->fp_preference;
2608     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2609     api_rpath->rpath.frp_sw_if_index = ~0;
2610     api_rpath->dpo = path->fp_dpo;
2611
2612     switch (path->fp_type)
2613       {
2614       case FIB_PATH_TYPE_RECEIVE:
2615         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2616         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2617         break;
2618       case FIB_PATH_TYPE_ATTACHED:
2619         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2620         break;
2621       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2622         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2623         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2624         break;
2625       case FIB_PATH_TYPE_BIER_FMASK:
2626         api_rpath->rpath.frp_fib_index = path->bier_fmask.fp_bier_fib;
2627         api_rpath->rpath.frp_addr = path->bier_fmask.fp_nh;
2628         break;
2629       case FIB_PATH_TYPE_SPECIAL:
2630         break;
2631       case FIB_PATH_TYPE_DEAG:
2632         api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2633         break;
2634       case FIB_PATH_TYPE_RECURSIVE:
2635         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2636         break;
2637       default:
2638         break;
2639       }
2640
2641     return (FIB_PATH_LIST_WALK_CONTINUE);
2642 }
2643
2644 dpo_proto_t
2645 fib_path_get_proto (fib_node_index_t path_index)
2646 {
2647     fib_path_t *path;
2648
2649     path = fib_path_get(path_index);
2650
2651     return (path->fp_nh_proto);
2652 }
2653
2654 void
2655 fib_path_module_init (void)
2656 {
2657     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2658 }
2659
2660 static clib_error_t *
2661 show_fib_path_command (vlib_main_t * vm,
2662                         unformat_input_t * input,
2663                         vlib_cli_command_t * cmd)
2664 {
2665     fib_node_index_t pi;
2666     fib_path_t *path;
2667
2668     if (unformat (input, "%d", &pi))
2669     {
2670         /*
2671          * show one in detail
2672          */
2673         if (!pool_is_free_index(fib_path_pool, pi))
2674         {
2675             path = fib_path_get(pi);
2676             u8 *s = fib_path_format(pi, NULL);
2677             s = format(s, "children:");
2678             s = fib_node_children_format(path->fp_node.fn_children, s);
2679             vlib_cli_output (vm, "%s", s);
2680             vec_free(s);
2681         }
2682         else
2683         {
2684             vlib_cli_output (vm, "path %d invalid", pi);
2685         }
2686     }
2687     else
2688     {
2689         vlib_cli_output (vm, "FIB Paths");
2690         pool_foreach(path, fib_path_pool,
2691         ({
2692             vlib_cli_output (vm, "%U", format_fib_path, path);
2693         }));
2694     }
2695
2696     return (NULL);
2697 }
2698
2699 VLIB_CLI_COMMAND (show_fib_path, static) = {
2700   .path = "show fib paths",
2701   .function = show_fib_path_command,
2702   .short_help = "show fib paths",
2703 };