fib: P2P interfaces do not need specific multicast adjacencies
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/ip_null_dpo.h>
28 #include <vnet/dpo/classify_dpo.h>
29 #include <vnet/dpo/pw_cw.h>
30
31 #include <vnet/adj/adj.h>
32 #include <vnet/adj/adj_mcast.h>
33
34 #include <vnet/fib/fib_path.h>
35 #include <vnet/fib/fib_node.h>
36 #include <vnet/fib/fib_table.h>
37 #include <vnet/fib/fib_entry.h>
38 #include <vnet/fib/fib_path_list.h>
39 #include <vnet/fib/fib_internal.h>
40 #include <vnet/fib/fib_urpf_list.h>
41 #include <vnet/fib/mpls_fib.h>
42 #include <vnet/fib/fib_path_ext.h>
43 #include <vnet/udp/udp_encap.h>
44 #include <vnet/bier/bier_fmask.h>
45 #include <vnet/bier/bier_table.h>
46 #include <vnet/bier/bier_imp.h>
47 #include <vnet/bier/bier_disp_table.h>
48
49 /**
50  * Enurmeration of path types
51  */
52 typedef enum fib_path_type_t_ {
53     /**
54      * Marker. Add new types after this one.
55      */
56     FIB_PATH_TYPE_FIRST = 0,
57     /**
58      * Attached-nexthop. An interface and a nexthop are known.
59      */
60     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
61     /**
62      * attached. Only the interface is known.
63      */
64     FIB_PATH_TYPE_ATTACHED,
65     /**
66      * recursive. Only the next-hop is known.
67      */
68     FIB_PATH_TYPE_RECURSIVE,
69     /**
70      * special. nothing is known. so we drop.
71      */
72     FIB_PATH_TYPE_SPECIAL,
73     /**
74      * exclusive. user provided adj.
75      */
76     FIB_PATH_TYPE_EXCLUSIVE,
77     /**
78      * deag. Link to a lookup adj in the next table
79      */
80     FIB_PATH_TYPE_DEAG,
81     /**
82      * interface receive.
83      */
84     FIB_PATH_TYPE_INTF_RX,
85     /**
86      * Path resolves via a UDP encap object.
87      */
88     FIB_PATH_TYPE_UDP_ENCAP,
89     /**
90      * receive. it's for-us.
91      */
92     FIB_PATH_TYPE_RECEIVE,
93     /**
94      * bier-imp. it's via a BIER imposition.
95      */
96     FIB_PATH_TYPE_BIER_IMP,
97     /**
98      * bier-fmask. it's via a BIER ECMP-table.
99      */
100     FIB_PATH_TYPE_BIER_TABLE,
101     /**
102      * bier-fmask. it's via a BIER f-mask.
103      */
104     FIB_PATH_TYPE_BIER_FMASK,
105     /**
106      * via a DVR.
107      */
108     FIB_PATH_TYPE_DVR,
109     /**
110      * Marker. Add new types before this one, then update it.
111      */
112     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
113 } __attribute__ ((packed)) fib_path_type_t;
114
115 /**
116  * The maximum number of path_types
117  */
118 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
119
120 #define FIB_PATH_TYPES {                                        \
121     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
122     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
123     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
124     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
125     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
126     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
127     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
128     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
129     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
130     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
131     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
132     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
133     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
134 }
135
136 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
137     for (_item = FIB_PATH_TYPE_FIRST;           \
138          _item <= FIB_PATH_TYPE_LAST;           \
139          _item++)
140
141 /**
142  * Enurmeration of path operational (i.e. derived) attributes
143  */
144 typedef enum fib_path_oper_attribute_t_ {
145     /**
146      * Marker. Add new types after this one.
147      */
148     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
149     /**
150      * The path forms part of a recursive loop.
151      */
152     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
153     /**
154      * The path is resolved
155      */
156     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
157     /**
158      * The path is attached, despite what the next-hop may say.
159      */
160     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
161     /**
162      * The path has become a permanent drop.
163      */
164     FIB_PATH_OPER_ATTRIBUTE_DROP,
165     /**
166      * Marker. Add new types before this one, then update it.
167      */
168     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
169 } __attribute__ ((packed)) fib_path_oper_attribute_t;
170
171 /**
172  * The maximum number of path operational attributes
173  */
174 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
175
176 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
177     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
178     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
179     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
180 }
181
182 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
183     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
184          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
185          _item++)
186
187 /**
188  * Path flags from the attributes
189  */
190 typedef enum fib_path_oper_flags_t_ {
191     FIB_PATH_OPER_FLAG_NONE = 0,
192     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
193     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
194     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
195     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
196 } __attribute__ ((packed)) fib_path_oper_flags_t;
197
198 /**
199  * A FIB path
200  */
201 typedef struct fib_path_t_ {
202     /**
203      * A path is a node in the FIB graph.
204      */
205     fib_node_t fp_node;
206
207     /**
208      * The index of the path-list to which this path belongs
209      */
210     u32 fp_pl_index;
211
212     /**
213      * This marks the start of the memory area used to hash
214      * the path
215      */
216     STRUCT_MARK(path_hash_start);
217
218     /**
219      * Configuration Flags
220      */
221     fib_path_cfg_flags_t fp_cfg_flags;
222
223     /**
224      * The type of the path. This is the selector for the union
225      */
226     fib_path_type_t fp_type;
227
228     /**
229      * The protocol of the next-hop, i.e. the address family of the
230      * next-hop's address. We can't derive this from the address itself
231      * since the address can be all zeros
232      */
233     dpo_proto_t fp_nh_proto;
234
235     /**
236      * UCMP [unnormalised] weigth
237      */
238     u8 fp_weight;
239
240     /**
241      * A path preference. 0 is the best.
242      * Only paths of the best preference, that are 'up', are considered
243      * for forwarding.
244      */
245     u8 fp_preference;
246
247     /**
248      * per-type union of the data required to resolve the path
249      */
250     union {
251         struct {
252             /**
253              * The next-hop
254              */
255             ip46_address_t fp_nh;
256             /**
257              * The interface
258              */
259             u32 fp_interface;
260         } attached_next_hop;
261         struct {
262             /**
263              * The interface
264              */
265             u32 fp_interface;
266         } attached;
267         struct {
268             union
269             {
270                 /**
271                  * The next-hop
272                  */
273                 ip46_address_t fp_ip;
274                 struct {
275                     /**
276                      * The local label to resolve through.
277                      */
278                     mpls_label_t fp_local_label;
279                     /**
280                      * The EOS bit of the resolving label
281                      */
282                     mpls_eos_bit_t fp_eos;
283                 };
284             } fp_nh;
285             union {
286                 /**
287                  * The FIB table index in which to find the next-hop.
288                  */
289                 fib_node_index_t fp_tbl_id;
290                 /**
291                  * The BIER FIB the fmask is in
292                  */
293                 index_t fp_bier_fib;
294             };
295         } recursive;
296         struct {
297             /**
298              * BIER FMask ID
299              */
300             index_t fp_bier_fmask;
301         } bier_fmask;
302         struct {
303             /**
304              * The BIER table's ID
305              */
306             bier_table_id_t fp_bier_tbl;
307         } bier_table;
308         struct {
309             /**
310              * The BIER imposition object
311              * this is part of the path's key, since the index_t
312              * of an imposition object is the object's key.
313              */
314             index_t fp_bier_imp;
315         } bier_imp;
316         struct {
317             /**
318              * The FIB index in which to perfom the next lookup
319              */
320             fib_node_index_t fp_tbl_id;
321             /**
322              * The RPF-ID to tag the packets with
323              */
324             fib_rpf_id_t fp_rpf_id;
325         } deag;
326         struct {
327         } special;
328         struct {
329             /**
330              * The user provided 'exclusive' DPO
331              */
332             dpo_id_t fp_ex_dpo;
333         } exclusive;
334         struct {
335             /**
336              * The interface on which the local address is configured
337              */
338             u32 fp_interface;
339             /**
340              * The next-hop
341              */
342             ip46_address_t fp_addr;
343         } receive;
344         struct {
345             /**
346              * The interface on which the packets will be input.
347              */
348             u32 fp_interface;
349         } intf_rx;
350         struct {
351             /**
352              * The UDP Encap object this path resolves through
353              */
354             u32 fp_udp_encap_id;
355         } udp_encap;
356         struct {
357             /**
358              * The UDP Encap object this path resolves through
359              */
360             u32 fp_classify_table_id;
361         } classify;
362         struct {
363             /**
364              * The interface
365              */
366             u32 fp_interface;
367         } dvr;
368     };
369     STRUCT_MARK(path_hash_end);
370
371     /**
372      * Memebers in this last section represent information that is
373      * dervied during resolution. It should not be copied to new paths
374      * nor compared.
375      */
376
377     /**
378      * Operational Flags
379      */
380     fib_path_oper_flags_t fp_oper_flags;
381
382     union {
383         /**
384          * the resolving via fib. not part of the union, since it it not part
385          * of the path's hash.
386          */
387         fib_node_index_t fp_via_fib;
388         /**
389          * the resolving bier-table
390          */
391         index_t fp_via_bier_tbl;
392         /**
393          * the resolving bier-fmask
394          */
395         index_t fp_via_bier_fmask;
396     };
397
398     /**
399      * The Data-path objects through which this path resolves for IP.
400      */
401     dpo_id_t fp_dpo;
402
403     /**
404      * the index of this path in the parent's child list.
405      */
406     u32 fp_sibling;
407 } fib_path_t;
408
409 /*
410  * Array of strings/names for the path types and attributes
411  */
412 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
413 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
414 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
415
416 /*
417  * The memory pool from which we allocate all the paths
418  */
419 static fib_path_t *fib_path_pool;
420
421 /**
422  * the logger
423  */
424 vlib_log_class_t fib_path_logger;
425
426 /*
427  * Debug macro
428  */
429 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
430 {                                                                       \
431     vlib_log_debug (fib_path_logger,                                    \
432                     "[%U]: " _fmt,                                      \
433                     format_fib_path, fib_path_get_index(_p), 0,         \
434                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
435                     ##_args);                                           \
436 }
437
438 static fib_path_t *
439 fib_path_get (fib_node_index_t index)
440 {
441     return (pool_elt_at_index(fib_path_pool, index));
442 }
443
444 static fib_node_index_t 
445 fib_path_get_index (fib_path_t *path)
446 {
447     return (path - fib_path_pool);
448 }
449
450 static fib_node_t *
451 fib_path_get_node (fib_node_index_t index)
452 {
453     return ((fib_node_t*)fib_path_get(index));
454 }
455
456 static fib_path_t*
457 fib_path_from_fib_node (fib_node_t *node)
458 {
459     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
460     return ((fib_path_t*)node);
461 }
462
463 u8 *
464 format_fib_path (u8 * s, va_list * args)
465 {
466     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
467     u32 indent = va_arg (*args, u32);
468     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
469     vnet_main_t * vnm = vnet_get_main();
470     fib_path_oper_attribute_t oattr;
471     fib_path_cfg_attribute_t cattr;
472     fib_path_t *path;
473     const char *eol;
474
475     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
476     {
477         eol = "";
478     }
479     else
480     {
481         eol = "\n";
482     }
483
484     path = fib_path_get(path_index);
485
486     s = format (s, "%Upath:[%d] ", format_white_space, indent,
487                 fib_path_get_index(path));
488     s = format (s, "pl-index:%d ", path->fp_pl_index);
489     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
490     s = format (s, "weight=%d ", path->fp_weight);
491     s = format (s, "pref=%d ", path->fp_preference);
492     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
493     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
494         s = format(s, " oper-flags:");
495         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
496             if ((1<<oattr) & path->fp_oper_flags) {
497                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
498             }
499         }
500     }
501     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
502         s = format(s, " cfg-flags:");
503         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
504             if ((1<<cattr) & path->fp_cfg_flags) {
505                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
506             }
507         }
508     }
509     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
510         s = format(s, "\n%U", format_white_space, indent+2);
511
512     switch (path->fp_type)
513     {
514     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
515         s = format (s, "%U", format_ip46_address,
516                     &path->attached_next_hop.fp_nh,
517                     IP46_TYPE_ANY);
518         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
519         {
520             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
521         }
522         else
523         {
524             s = format (s, " %U",
525                         format_vnet_sw_interface_name,
526                         vnm,
527                         vnet_get_sw_interface(
528                             vnm,
529                             path->attached_next_hop.fp_interface));
530             if (vnet_sw_interface_is_p2p(vnet_get_main(),
531                                          path->attached_next_hop.fp_interface))
532             {
533                 s = format (s, " (p2p)");
534             }
535         }
536         if (!dpo_id_is_valid(&path->fp_dpo))
537         {
538             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
539         }
540         else
541         {
542             s = format(s, "%s%U%U", eol,
543                        format_white_space, indent,
544                        format_dpo_id,
545                        &path->fp_dpo, 13);
546         }
547         break;
548     case FIB_PATH_TYPE_ATTACHED:
549         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
550         {
551             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
552         }
553         else
554         {
555             s = format (s, " %U",
556                         format_vnet_sw_interface_name,
557                         vnm,
558                         vnet_get_sw_interface(
559                             vnm,
560                             path->attached.fp_interface));
561         }
562         break;
563     case FIB_PATH_TYPE_RECURSIVE:
564         if (DPO_PROTO_MPLS == path->fp_nh_proto)
565         {
566             s = format (s, "via %U %U",
567                         format_mpls_unicast_label,
568                         path->recursive.fp_nh.fp_local_label,
569                         format_mpls_eos_bit,
570                         path->recursive.fp_nh.fp_eos);
571         }
572         else
573         {
574             s = format (s, "via %U",
575                         format_ip46_address,
576                         &path->recursive.fp_nh.fp_ip,
577                         IP46_TYPE_ANY);
578         }
579         s = format (s, " in fib:%d",
580                     path->recursive.fp_tbl_id,
581                     path->fp_via_fib); 
582         s = format (s, " via-fib:%d", path->fp_via_fib); 
583         s = format (s, " via-dpo:[%U:%d]",
584                     format_dpo_type, path->fp_dpo.dpoi_type, 
585                     path->fp_dpo.dpoi_index);
586
587         break;
588     case FIB_PATH_TYPE_UDP_ENCAP:
589         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
590         break;
591     case FIB_PATH_TYPE_BIER_TABLE:
592         s = format (s, "via bier-table:[%U}",
593                     format_bier_table_id,
594                     &path->bier_table.fp_bier_tbl);
595         s = format (s, " via-dpo:[%U:%d]",
596                     format_dpo_type, path->fp_dpo.dpoi_type,
597                     path->fp_dpo.dpoi_index);
598         break;
599     case FIB_PATH_TYPE_BIER_FMASK:
600         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
601         s = format (s, " via-dpo:[%U:%d]",
602                     format_dpo_type, path->fp_dpo.dpoi_type, 
603                     path->fp_dpo.dpoi_index);
604         break;
605     case FIB_PATH_TYPE_BIER_IMP:
606         s = format (s, "via %U", format_bier_imp,
607                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
608         break;
609     case FIB_PATH_TYPE_DVR:
610         s = format (s, " %U",
611                     format_vnet_sw_interface_name,
612                     vnm,
613                     vnet_get_sw_interface(
614                         vnm,
615                         path->dvr.fp_interface));
616         break;
617     case FIB_PATH_TYPE_DEAG:
618         s = format (s, " %sfib-index:%d",
619                     (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?  "m" : ""),
620                     path->deag.fp_tbl_id);
621         break;
622     case FIB_PATH_TYPE_RECEIVE:
623     case FIB_PATH_TYPE_INTF_RX:
624     case FIB_PATH_TYPE_SPECIAL:
625     case FIB_PATH_TYPE_EXCLUSIVE:
626         if (dpo_id_is_valid(&path->fp_dpo))
627         {
628             s = format(s, "%U", format_dpo_id,
629                        &path->fp_dpo, indent+2);
630         }
631         break;
632     }
633     return (s);
634 }
635
636 /*
637  * fib_path_last_lock_gone
638  *
639  * We don't share paths, we share path lists, so the [un]lock functions
640  * are no-ops
641  */
642 static void
643 fib_path_last_lock_gone (fib_node_t *node)
644 {
645     ASSERT(0);
646 }
647
648 static const adj_index_t
649 fib_path_attached_next_hop_get_adj (fib_path_t *path,
650                                     vnet_link_t link)
651 {
652     if (vnet_sw_interface_is_p2p(vnet_get_main(),
653                                  path->attached_next_hop.fp_interface))
654     {
655         /*
656          * if the interface is p2p then the adj for the specific
657          * neighbour on that link will never exist. on p2p links
658          * the subnet address (the attached route) links to the
659          * auto-adj (see below), we want that adj here too.
660          */
661         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
662                                     link,
663                                     &zero_addr,
664                                     path->attached_next_hop.fp_interface));
665     }
666     else
667     {
668         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
669                                     link,
670                                     &path->attached_next_hop.fp_nh,
671                                     path->attached_next_hop.fp_interface));
672     }
673 }
674
675 static void
676 fib_path_attached_next_hop_set (fib_path_t *path)
677 {
678     /*
679      * resolve directly via the adjacency discribed by the
680      * interface and next-hop
681      */
682     dpo_set(&path->fp_dpo,
683             DPO_ADJACENCY,
684             path->fp_nh_proto,
685             fib_path_attached_next_hop_get_adj(
686                  path,
687                  dpo_proto_to_link(path->fp_nh_proto)));
688
689     /*
690      * become a child of the adjacency so we receive updates
691      * when its rewrite changes
692      */
693     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
694                                      FIB_NODE_TYPE_PATH,
695                                      fib_path_get_index(path));
696
697     if (!vnet_sw_interface_is_up(vnet_get_main(),
698                                  path->attached_next_hop.fp_interface) ||
699         !adj_is_up(path->fp_dpo.dpoi_index))
700     {
701         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
702     }
703 }
704
705 static const adj_index_t
706 fib_path_attached_get_adj (fib_path_t *path,
707                            vnet_link_t link)
708 {
709     if (vnet_sw_interface_is_p2p(vnet_get_main(),
710                                  path->attached.fp_interface))
711     {
712         /*
713          * point-2-point interfaces do not require a glean, since
714          * there is nothing to ARP. Install a rewrite/nbr adj instead
715          */
716         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
717                                     link,
718                                     &zero_addr,
719                                     path->attached.fp_interface));
720     }
721     else
722     {
723         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
724                                       link,
725                                       path->attached.fp_interface,
726                                       NULL));
727     }
728 }
729
730 /*
731  * create of update the paths recursive adj
732  */
733 static void
734 fib_path_recursive_adj_update (fib_path_t *path,
735                                fib_forward_chain_type_t fct,
736                                dpo_id_t *dpo)
737 {
738     dpo_id_t via_dpo = DPO_INVALID;
739
740     /*
741      * get the DPO to resolve through from the via-entry
742      */
743     fib_entry_contribute_forwarding(path->fp_via_fib,
744                                     fct,
745                                     &via_dpo);
746
747
748     /*
749      * hope for the best - clear if restrictions apply.
750      */
751     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
752
753     /*
754      * Validate any recursion constraints and over-ride the via
755      * adj if not met
756      */
757     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
758     {
759         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
760         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
761     }
762     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
763     {
764         /*
765          * the via FIB must be a host route.
766          * note the via FIB just added will always be a host route
767          * since it is an RR source added host route. So what we need to
768          * check is whether the route has other sources. If it does then
769          * some other source has added it as a host route. If it doesn't
770          * then it was added only here and inherits forwarding from a cover.
771          * the cover is not a host route.
772          * The RR source is the lowest priority source, so we check if it
773          * is the best. if it is there are no other sources.
774          */
775         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
776         {
777             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
778             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
779
780             /*
781              * PIC edge trigger. let the load-balance maps know
782              */
783             load_balance_map_path_state_change(fib_path_get_index(path));
784         }
785     }
786     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
787     {
788         /*
789          * RR source entries inherit the flags from the cover, so
790          * we can check the via directly
791          */
792         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
793         {
794             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
795             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
796
797             /*
798              * PIC edge trigger. let the load-balance maps know
799              */
800             load_balance_map_path_state_change(fib_path_get_index(path));
801         }
802     }
803     /*
804      * check for over-riding factors on the FIB entry itself
805      */
806     if (!fib_entry_is_resolved(path->fp_via_fib))
807     {
808         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
809         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
810
811         /*
812          * PIC edge trigger. let the load-balance maps know
813          */
814         load_balance_map_path_state_change(fib_path_get_index(path));
815     }
816
817     /*
818      * If this path is contributing a drop, then it's not resolved
819      */
820     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
821     {
822         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
823     }
824
825     /*
826      * update the path's contributed DPO
827      */
828     dpo_copy(dpo, &via_dpo);
829
830     FIB_PATH_DBG(path, "recursive update:");
831
832     dpo_reset(&via_dpo);
833 }
834
835 /*
836  * re-evaulate the forwarding state for a via fmask path
837  */
838 static void
839 fib_path_bier_fmask_update (fib_path_t *path,
840                             dpo_id_t *dpo)
841 {
842     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
843
844     /*
845      * if we are stakcing on the drop, then the path is not resolved
846      */
847     if (dpo_is_drop(dpo))
848     {
849         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
850     }
851     else
852     {
853         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
854     }
855 }
856
857 /*
858  * fib_path_is_permanent_drop
859  *
860  * Return !0 if the path is configured to permanently drop,
861  * despite other attributes.
862  */
863 static int
864 fib_path_is_permanent_drop (fib_path_t *path)
865 {
866     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
867             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
868 }
869
870 /*
871  * fib_path_unresolve
872  *
873  * Remove our dependency on the resolution target
874  */
875 static void
876 fib_path_unresolve (fib_path_t *path)
877 {
878     /*
879      * the forced drop path does not need unresolving
880      */
881     if (fib_path_is_permanent_drop(path))
882     {
883         return;
884     }
885
886     switch (path->fp_type)
887     {
888     case FIB_PATH_TYPE_RECURSIVE:
889         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
890         {
891             fib_entry_child_remove(path->fp_via_fib,
892                                    path->fp_sibling);
893             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
894                                            fib_entry_get_prefix(path->fp_via_fib),
895                                            FIB_SOURCE_RR);
896             fib_table_unlock(path->recursive.fp_tbl_id,
897                              dpo_proto_to_fib(path->fp_nh_proto),
898                              FIB_SOURCE_RR);
899             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
900         }
901         break;
902     case FIB_PATH_TYPE_BIER_FMASK:
903         bier_fmask_child_remove(path->fp_via_bier_fmask,
904                                 path->fp_sibling);
905         break;
906     case FIB_PATH_TYPE_BIER_IMP:
907         bier_imp_unlock(path->fp_dpo.dpoi_index);
908         break;
909     case FIB_PATH_TYPE_BIER_TABLE:
910         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
911         break;
912     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
913         adj_child_remove(path->fp_dpo.dpoi_index,
914                          path->fp_sibling);
915         adj_unlock(path->fp_dpo.dpoi_index);
916         break;
917     case FIB_PATH_TYPE_ATTACHED:
918         adj_child_remove(path->fp_dpo.dpoi_index,
919                          path->fp_sibling);
920         adj_unlock(path->fp_dpo.dpoi_index);
921         break;
922     case FIB_PATH_TYPE_UDP_ENCAP:
923         udp_encap_unlock(path->fp_dpo.dpoi_index);
924         break;
925     case FIB_PATH_TYPE_EXCLUSIVE:
926         dpo_reset(&path->exclusive.fp_ex_dpo);
927         break;
928     case FIB_PATH_TYPE_SPECIAL:
929     case FIB_PATH_TYPE_RECEIVE:
930     case FIB_PATH_TYPE_INTF_RX:
931     case FIB_PATH_TYPE_DEAG:
932     case FIB_PATH_TYPE_DVR:
933         /*
934          * these hold only the path's DPO, which is reset below.
935          */
936         break;
937     }
938
939     /*
940      * release the adj we were holding and pick up the
941      * drop just in case.
942      */
943     dpo_reset(&path->fp_dpo);
944     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
945
946     return;
947 }
948
949 static fib_forward_chain_type_t
950 fib_path_to_chain_type (const fib_path_t *path)
951 {
952     if (DPO_PROTO_MPLS == path->fp_nh_proto)
953     {
954         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
955             MPLS_EOS == path->recursive.fp_nh.fp_eos)
956         {
957             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
958         }
959         else
960         {
961             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
962         }
963     }
964     else
965     {
966         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
967     }
968 }
969
970 /*
971  * fib_path_back_walk_notify
972  *
973  * A back walk has reach this path.
974  */
975 static fib_node_back_walk_rc_t
976 fib_path_back_walk_notify (fib_node_t *node,
977                            fib_node_back_walk_ctx_t *ctx)
978 {
979     fib_path_t *path;
980
981     path = fib_path_from_fib_node(node);
982
983     FIB_PATH_DBG(path, "bw:%U",
984                  format_fib_node_bw_reason, ctx->fnbw_reason);
985
986     switch (path->fp_type)
987     {
988     case FIB_PATH_TYPE_RECURSIVE:
989         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
990         {
991             /*
992              * modify the recursive adjacency to use the new forwarding
993              * of the via-fib.
994              * this update is visible to packets in flight in the DP.
995              */
996             fib_path_recursive_adj_update(
997                 path,
998                 fib_path_to_chain_type(path),
999                 &path->fp_dpo);
1000         }
1001         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1002             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1003         {
1004             /*
1005              * ADJ updates (complete<->incomplete) do not need to propagate to
1006              * recursive entries.
1007              * The only reason its needed as far back as here, is that the adj
1008              * and the incomplete adj are a different DPO type, so the LBs need
1009              * to re-stack.
1010              * If this walk was quashed in the fib_entry, then any non-fib_path
1011              * children (like tunnels that collapse out the LB when they stack)
1012              * would not see the update.
1013              */
1014             return (FIB_NODE_BACK_WALK_CONTINUE);
1015         }
1016         break;
1017     case FIB_PATH_TYPE_BIER_FMASK:
1018         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1019         {
1020             /*
1021              * update to use the BIER fmask's new forwading
1022              */
1023             fib_path_bier_fmask_update(path, &path->fp_dpo);
1024         }
1025         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1026             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1027         {
1028             /*
1029              * ADJ updates (complete<->incomplete) do not need to propagate to
1030              * recursive entries.
1031              * The only reason its needed as far back as here, is that the adj
1032              * and the incomplete adj are a different DPO type, so the LBs need
1033              * to re-stack.
1034              * If this walk was quashed in the fib_entry, then any non-fib_path
1035              * children (like tunnels that collapse out the LB when they stack)
1036              * would not see the update.
1037              */
1038             return (FIB_NODE_BACK_WALK_CONTINUE);
1039         }
1040         break;
1041     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1042         /*
1043 FIXME comment
1044          * ADJ_UPDATE backwalk pass silently through here and up to
1045          * the path-list when the multipath adj collapse occurs.
1046          * The reason we do this is that the assumtption is that VPP
1047          * runs in an environment where the Control-Plane is remote
1048          * and hence reacts slowly to link up down. In order to remove
1049          * this down link from the ECMP set quickly, we back-walk.
1050          * VPP also has dedicated CPUs, so we are not stealing resources
1051          * from the CP to do so.
1052          */
1053         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1054         {
1055             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1056             {
1057                 /*
1058                  * alreday resolved. no need to walk back again
1059                  */
1060                 return (FIB_NODE_BACK_WALK_CONTINUE);
1061             }
1062             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1063         }
1064         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1065         {
1066             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1067             {
1068                 /*
1069                  * alreday unresolved. no need to walk back again
1070                  */
1071                 return (FIB_NODE_BACK_WALK_CONTINUE);
1072             }
1073             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1074         }
1075         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1076         {
1077             /*
1078              * The interface this path resolves through has been deleted.
1079              * This will leave the path in a permanent drop state. The route
1080              * needs to be removed and readded (and hence the path-list deleted)
1081              * before it can forward again.
1082              */
1083             fib_path_unresolve(path);
1084             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1085         }
1086         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1087         {
1088             /*
1089              * restack the DPO to pick up the correct DPO sub-type
1090              */
1091             uword if_is_up;
1092             adj_index_t ai;
1093
1094             if_is_up = vnet_sw_interface_is_up(
1095                            vnet_get_main(),
1096                            path->attached_next_hop.fp_interface);
1097
1098             ai = fib_path_attached_next_hop_get_adj(
1099                      path,
1100                      dpo_proto_to_link(path->fp_nh_proto));
1101
1102             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1103             if (if_is_up && adj_is_up(ai))
1104             {
1105                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1106             }
1107
1108             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1109             adj_unlock(ai);
1110
1111             if (!if_is_up)
1112             {
1113                 /*
1114                  * If the interface is not up there is no reason to walk
1115                  * back to children. if we did they would only evalute
1116                  * that this path is unresolved and hence it would
1117                  * not contribute the adjacency - so it would be wasted
1118                  * CPU time.
1119                  */
1120                 return (FIB_NODE_BACK_WALK_CONTINUE);
1121             }
1122         }
1123         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1124         {
1125             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1126             {
1127                 /*
1128                  * alreday unresolved. no need to walk back again
1129                  */
1130                 return (FIB_NODE_BACK_WALK_CONTINUE);
1131             }
1132             /*
1133              * the adj has gone down. the path is no longer resolved.
1134              */
1135             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1136         }
1137         break;
1138     case FIB_PATH_TYPE_ATTACHED:
1139     case FIB_PATH_TYPE_DVR:
1140         /*
1141          * FIXME; this could schedule a lower priority walk, since attached
1142          * routes are not usually in ECMP configurations so the backwalk to
1143          * the FIB entry does not need to be high priority
1144          */
1145         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1146         {
1147             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1148         }
1149         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1150         {
1151             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1152         }
1153         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1154         {
1155             fib_path_unresolve(path);
1156             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1157         }
1158         break;
1159     case FIB_PATH_TYPE_UDP_ENCAP:
1160     {
1161         dpo_id_t via_dpo = DPO_INVALID;
1162
1163         /*
1164          * hope for the best - clear if restrictions apply.
1165          */
1166         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1167
1168         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1169                                         path->fp_nh_proto,
1170                                         &via_dpo);
1171         /*
1172          * If this path is contributing a drop, then it's not resolved
1173          */
1174         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1175         {
1176             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1177         }
1178
1179         /*
1180          * update the path's contributed DPO
1181          */
1182         dpo_copy(&path->fp_dpo, &via_dpo);
1183         dpo_reset(&via_dpo);
1184         break;
1185     }
1186     case FIB_PATH_TYPE_INTF_RX:
1187         ASSERT(0);
1188     case FIB_PATH_TYPE_DEAG:
1189         /*
1190          * FIXME When VRF delete is allowed this will need a poke.
1191          */
1192     case FIB_PATH_TYPE_SPECIAL:
1193     case FIB_PATH_TYPE_RECEIVE:
1194     case FIB_PATH_TYPE_EXCLUSIVE:
1195     case FIB_PATH_TYPE_BIER_TABLE:
1196     case FIB_PATH_TYPE_BIER_IMP:
1197         /*
1198          * these path types have no parents. so to be
1199          * walked from one is unexpected.
1200          */
1201         ASSERT(0);
1202         break;
1203     }
1204
1205     /*
1206      * propagate the backwalk further to the path-list
1207      */
1208     fib_path_list_back_walk(path->fp_pl_index, ctx);
1209
1210     return (FIB_NODE_BACK_WALK_CONTINUE);
1211 }
1212
1213 static void
1214 fib_path_memory_show (void)
1215 {
1216     fib_show_memory_usage("Path",
1217                           pool_elts(fib_path_pool),
1218                           pool_len(fib_path_pool),
1219                           sizeof(fib_path_t));
1220 }
1221
1222 /*
1223  * The FIB path's graph node virtual function table
1224  */
1225 static const fib_node_vft_t fib_path_vft = {
1226     .fnv_get = fib_path_get_node,
1227     .fnv_last_lock = fib_path_last_lock_gone,
1228     .fnv_back_walk = fib_path_back_walk_notify,
1229     .fnv_mem_show = fib_path_memory_show,
1230 };
1231
1232 static fib_path_cfg_flags_t
1233 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1234 {
1235     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1236
1237     if (rpath->frp_flags & FIB_ROUTE_PATH_POP_PW_CW)
1238         cfg_flags |= FIB_PATH_CFG_FLAG_POP_PW_CW;
1239     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1240         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1241     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1242         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1243     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1244         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1245     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1246         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1247     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1248         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1249     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1250         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1251     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1252         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1253     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1254         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1255     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1256         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1257     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_UNREACH)
1258         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_UNREACH;
1259     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_PROHIBIT)
1260         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_PROHIBIT;
1261
1262     return (cfg_flags);
1263 }
1264
1265 /*
1266  * fib_path_create
1267  *
1268  * Create and initialise a new path object.
1269  * return the index of the path.
1270  */
1271 fib_node_index_t
1272 fib_path_create (fib_node_index_t pl_index,
1273                  const fib_route_path_t *rpath)
1274 {
1275     fib_path_t *path;
1276
1277     pool_get(fib_path_pool, path);
1278     clib_memset(path, 0, sizeof(*path));
1279
1280     fib_node_init(&path->fp_node,
1281                   FIB_NODE_TYPE_PATH);
1282
1283     dpo_reset(&path->fp_dpo);
1284     path->fp_pl_index = pl_index;
1285     path->fp_nh_proto = rpath->frp_proto;
1286     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1287     path->fp_weight = rpath->frp_weight;
1288     if (0 == path->fp_weight)
1289     {
1290         /*
1291          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1292          * clients to always use 1, or we can accept it and fixup approrpiately.
1293          */
1294         path->fp_weight = 1;
1295     }
1296     path->fp_preference = rpath->frp_preference;
1297     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1298
1299     /*
1300      * deduce the path's tpye from the parementers and save what is needed.
1301      */
1302     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1303     {
1304         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1305         path->receive.fp_interface = rpath->frp_sw_if_index;
1306         path->receive.fp_addr = rpath->frp_addr;
1307     }
1308     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1309     {
1310         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1311         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1312     }
1313     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1314     {
1315         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1316         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1317     }
1318     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1319     {
1320         path->fp_type = FIB_PATH_TYPE_DEAG;
1321         path->deag.fp_tbl_id = rpath->frp_fib_index;
1322         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1323     }
1324     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1325     {
1326         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1327         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1328     }
1329     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1330     {
1331         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1332         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1333     }
1334     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1335     {
1336         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1337         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1338     }
1339     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1340     {
1341         path->fp_type = FIB_PATH_TYPE_DEAG;
1342         path->deag.fp_tbl_id = rpath->frp_fib_index;
1343     }
1344     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1345     {
1346         path->fp_type = FIB_PATH_TYPE_DVR;
1347         path->dvr.fp_interface = rpath->frp_sw_if_index;
1348     }
1349     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1350     {
1351         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1352         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1353     }
1354     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT) ||
1355         (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH))
1356     {
1357         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1358     }
1359     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY))
1360     {
1361         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1362         path->classify.fp_classify_table_id = rpath->frp_classify_table_id;
1363     }
1364     else if (~0 != rpath->frp_sw_if_index)
1365     {
1366         if (ip46_address_is_zero(&rpath->frp_addr))
1367         {
1368             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1369             path->attached.fp_interface = rpath->frp_sw_if_index;
1370         }
1371         else
1372         {
1373             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1374             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1375             path->attached_next_hop.fp_nh = rpath->frp_addr;
1376         }
1377     }
1378     else
1379     {
1380         if (ip46_address_is_zero(&rpath->frp_addr))
1381         {
1382             if (~0 == rpath->frp_fib_index)
1383             {
1384                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1385             }
1386             else
1387             {
1388                 path->fp_type = FIB_PATH_TYPE_DEAG;
1389                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1390                 path->deag.fp_rpf_id = ~0;
1391             }
1392         }
1393         else
1394         {
1395             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1396             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1397             {
1398                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1399                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1400             }
1401             else
1402             {
1403                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1404             }
1405             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1406         }
1407     }
1408
1409     FIB_PATH_DBG(path, "create");
1410
1411     return (fib_path_get_index(path));
1412 }
1413
1414 /*
1415  * fib_path_create_special
1416  *
1417  * Create and initialise a new path object.
1418  * return the index of the path.
1419  */
1420 fib_node_index_t
1421 fib_path_create_special (fib_node_index_t pl_index,
1422                          dpo_proto_t nh_proto,
1423                          fib_path_cfg_flags_t flags,
1424                          const dpo_id_t *dpo)
1425 {
1426     fib_path_t *path;
1427
1428     pool_get(fib_path_pool, path);
1429     clib_memset(path, 0, sizeof(*path));
1430
1431     fib_node_init(&path->fp_node,
1432                   FIB_NODE_TYPE_PATH);
1433     dpo_reset(&path->fp_dpo);
1434
1435     path->fp_pl_index = pl_index;
1436     path->fp_weight = 1;
1437     path->fp_preference = 0;
1438     path->fp_nh_proto = nh_proto;
1439     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1440     path->fp_cfg_flags = flags;
1441
1442     if (FIB_PATH_CFG_FLAG_DROP & flags)
1443     {
1444         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1445     }
1446     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1447     {
1448         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1449         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1450     }
1451     else
1452     {
1453         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1454         ASSERT(NULL != dpo);
1455         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1456     }
1457
1458     return (fib_path_get_index(path));
1459 }
1460
1461 /*
1462  * fib_path_copy
1463  *
1464  * Copy a path. return index of new path.
1465  */
1466 fib_node_index_t
1467 fib_path_copy (fib_node_index_t path_index,
1468                fib_node_index_t path_list_index)
1469 {
1470     fib_path_t *path, *orig_path;
1471
1472     pool_get(fib_path_pool, path);
1473
1474     orig_path = fib_path_get(path_index);
1475     ASSERT(NULL != orig_path);
1476
1477     memcpy(path, orig_path, sizeof(*path));
1478
1479     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1480
1481     /*
1482      * reset the dynamic section
1483      */
1484     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1485     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1486     path->fp_pl_index  = path_list_index;
1487     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1488     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1489     dpo_reset(&path->fp_dpo);
1490
1491     return (fib_path_get_index(path));
1492 }
1493
1494 /*
1495  * fib_path_destroy
1496  *
1497  * destroy a path that is no longer required
1498  */
1499 void
1500 fib_path_destroy (fib_node_index_t path_index)
1501 {
1502     fib_path_t *path;
1503
1504     path = fib_path_get(path_index);
1505
1506     ASSERT(NULL != path);
1507     FIB_PATH_DBG(path, "destroy");
1508
1509     fib_path_unresolve(path);
1510
1511     fib_node_deinit(&path->fp_node);
1512     pool_put(fib_path_pool, path);
1513 }
1514
1515 /*
1516  * fib_path_destroy
1517  *
1518  * destroy a path that is no longer required
1519  */
1520 uword
1521 fib_path_hash (fib_node_index_t path_index)
1522 {
1523     fib_path_t *path;
1524
1525     path = fib_path_get(path_index);
1526
1527     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1528                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1529                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1530                         0));
1531 }
1532
1533 /*
1534  * fib_path_cmp_i
1535  *
1536  * Compare two paths for equivalence.
1537  */
1538 static int
1539 fib_path_cmp_i (const fib_path_t *path1,
1540                 const fib_path_t *path2)
1541 {
1542     int res;
1543
1544     res = 1;
1545
1546     /*
1547      * paths of different types and protocol are not equal.
1548      * different weights and/or preference only are the same path.
1549      */
1550     if (path1->fp_type != path2->fp_type)
1551     {
1552         res = (path1->fp_type - path2->fp_type);
1553     }
1554     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1555     {
1556         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1557     }
1558     else
1559     {
1560         /*
1561          * both paths are of the same type.
1562          * consider each type and its attributes in turn.
1563          */
1564         switch (path1->fp_type)
1565         {
1566         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1567             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1568                                    &path2->attached_next_hop.fp_nh);
1569             if (0 == res) {
1570                 res = (path1->attached_next_hop.fp_interface -
1571                        path2->attached_next_hop.fp_interface);
1572             }
1573             break;
1574         case FIB_PATH_TYPE_ATTACHED:
1575             res = (path1->attached.fp_interface -
1576                    path2->attached.fp_interface);
1577             break;
1578         case FIB_PATH_TYPE_RECURSIVE:
1579             res = ip46_address_cmp(&path1->recursive.fp_nh,
1580                                    &path2->recursive.fp_nh);
1581  
1582             if (0 == res)
1583             {
1584                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1585             }
1586             break;
1587         case FIB_PATH_TYPE_BIER_FMASK:
1588             res = (path1->bier_fmask.fp_bier_fmask -
1589                    path2->bier_fmask.fp_bier_fmask);
1590             break;
1591         case FIB_PATH_TYPE_BIER_IMP:
1592             res = (path1->bier_imp.fp_bier_imp -
1593                    path2->bier_imp.fp_bier_imp);
1594             break;
1595         case FIB_PATH_TYPE_BIER_TABLE:
1596             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1597                                     &path2->bier_table.fp_bier_tbl);
1598             break;
1599         case FIB_PATH_TYPE_DEAG:
1600             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1601             if (0 == res)
1602             {
1603                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1604             }
1605             break;
1606         case FIB_PATH_TYPE_INTF_RX:
1607             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1608             break;
1609         case FIB_PATH_TYPE_UDP_ENCAP:
1610             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1611             break;
1612         case FIB_PATH_TYPE_DVR:
1613             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1614             break;
1615         case FIB_PATH_TYPE_EXCLUSIVE:
1616             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1617             break;
1618         case FIB_PATH_TYPE_SPECIAL:
1619         case FIB_PATH_TYPE_RECEIVE:
1620             res = 0;
1621             break;
1622         }
1623     }
1624     return (res);
1625 }
1626
1627 /*
1628  * fib_path_cmp_for_sort
1629  *
1630  * Compare two paths for equivalence. Used during path sorting.
1631  * As usual 0 means equal.
1632  */
1633 int
1634 fib_path_cmp_for_sort (void * v1,
1635                        void * v2)
1636 {
1637     fib_node_index_t *pi1 = v1, *pi2 = v2;
1638     fib_path_t *path1, *path2;
1639
1640     path1 = fib_path_get(*pi1);
1641     path2 = fib_path_get(*pi2);
1642
1643     /*
1644      * when sorting paths we want the highest preference paths
1645      * first, so that the choices set built is in prefernce order
1646      */
1647     if (path1->fp_preference != path2->fp_preference)
1648     {
1649         return (path1->fp_preference - path2->fp_preference);
1650     }
1651
1652     return (fib_path_cmp_i(path1, path2));
1653 }
1654
1655 /*
1656  * fib_path_cmp
1657  *
1658  * Compare two paths for equivalence.
1659  */
1660 int
1661 fib_path_cmp (fib_node_index_t pi1,
1662               fib_node_index_t pi2)
1663 {
1664     fib_path_t *path1, *path2;
1665
1666     path1 = fib_path_get(pi1);
1667     path2 = fib_path_get(pi2);
1668
1669     return (fib_path_cmp_i(path1, path2));
1670 }
1671
1672 int
1673 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1674                            const fib_route_path_t *rpath)
1675 {
1676     fib_path_t *path;
1677     int res;
1678
1679     path = fib_path_get(path_index);
1680
1681     res = 1;
1682
1683     if (path->fp_weight != rpath->frp_weight)
1684     {
1685         res = (path->fp_weight - rpath->frp_weight);
1686     }
1687     else
1688     {
1689         /*
1690          * both paths are of the same type.
1691          * consider each type and its attributes in turn.
1692          */
1693         switch (path->fp_type)
1694         {
1695         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1696             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1697                                    &rpath->frp_addr);
1698             if (0 == res)
1699             {
1700                 res = (path->attached_next_hop.fp_interface -
1701                        rpath->frp_sw_if_index);
1702             }
1703             break;
1704         case FIB_PATH_TYPE_ATTACHED:
1705             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1706             break;
1707         case FIB_PATH_TYPE_RECURSIVE:
1708             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1709             {
1710                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1711
1712                 if (res == 0)
1713                 {
1714                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1715                 }
1716             }
1717             else
1718             {
1719                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1720                                        &rpath->frp_addr);
1721             }
1722
1723             if (0 == res)
1724             {
1725                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1726             }
1727             break;
1728         case FIB_PATH_TYPE_BIER_FMASK:
1729             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1730             break;
1731         case FIB_PATH_TYPE_BIER_IMP:
1732             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1733             break;
1734         case FIB_PATH_TYPE_BIER_TABLE:
1735             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1736                                     &rpath->frp_bier_tbl);
1737             break;
1738         case FIB_PATH_TYPE_INTF_RX:
1739             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1740             break;
1741         case FIB_PATH_TYPE_UDP_ENCAP:
1742             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1743             break;
1744         case FIB_PATH_TYPE_DEAG:
1745             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1746             if (0 == res)
1747             {
1748                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1749             }
1750             break;
1751         case FIB_PATH_TYPE_DVR:
1752             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1753             break;
1754         case FIB_PATH_TYPE_EXCLUSIVE:
1755             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1756             break;
1757         case FIB_PATH_TYPE_RECEIVE:
1758             if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1759             {
1760                 res = 0;
1761             }
1762             else
1763             {
1764                 res = 1;
1765             }
1766             break;
1767         case FIB_PATH_TYPE_SPECIAL:
1768             res = 0;
1769             break;
1770         }
1771     }
1772     return (res);
1773 }
1774
1775 /*
1776  * fib_path_recursive_loop_detect
1777  *
1778  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1779  * walk is initiated when an entry is linking to a new path list or from an old.
1780  * The entry vector passed contains all the FIB entrys that are children of this
1781  * path (it is all the entries encountered on the walk so far). If this vector
1782  * contains the entry this path resolve via, then a loop is about to form.
1783  * The loop must be allowed to form, since we need the dependencies in place
1784  * so that we can track when the loop breaks.
1785  * However, we MUST not produce a loop in the forwarding graph (else packets
1786  * would loop around the switch path until the loop breaks), so we mark recursive
1787  * paths as looped so that they do not contribute forwarding information.
1788  * By marking the path as looped, an etry such as;
1789  *    X/Y
1790  *     via a.a.a.a (looped)
1791  *     via b.b.b.b (not looped)
1792  * can still forward using the info provided by b.b.b.b only
1793  */
1794 int
1795 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1796                                 fib_node_index_t **entry_indicies)
1797 {
1798     fib_path_t *path;
1799
1800     path = fib_path_get(path_index);
1801
1802     /*
1803      * the forced drop path is never looped, cos it is never resolved.
1804      */
1805     if (fib_path_is_permanent_drop(path))
1806     {
1807         return (0);
1808     }
1809
1810     switch (path->fp_type)
1811     {
1812     case FIB_PATH_TYPE_RECURSIVE:
1813     {
1814         fib_node_index_t *entry_index, *entries;
1815         int looped = 0;
1816         entries = *entry_indicies;
1817
1818         vec_foreach(entry_index, entries) {
1819             if (*entry_index == path->fp_via_fib)
1820             {
1821                 /*
1822                  * the entry that is about to link to this path-list (or
1823                  * one of this path-list's children) is the same entry that
1824                  * this recursive path resolves through. this is a cycle.
1825                  * abort the walk.
1826                  */
1827                 looped = 1;
1828                 break;
1829             }
1830         }
1831
1832         if (looped)
1833         {
1834             FIB_PATH_DBG(path, "recursive loop formed");
1835             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1836
1837             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1838         }
1839         else
1840         {
1841             /*
1842              * no loop here yet. keep forward walking the graph.
1843              */
1844             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1845             {
1846                 FIB_PATH_DBG(path, "recursive loop formed");
1847                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1848             }
1849             else
1850             {
1851                 FIB_PATH_DBG(path, "recursive loop cleared");
1852                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1853             }
1854         }
1855         break;
1856     }
1857     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1858     case FIB_PATH_TYPE_ATTACHED:
1859         if (adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1860                                       entry_indicies))
1861         {
1862             FIB_PATH_DBG(path, "recursive loop formed");
1863             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1864         }
1865         else
1866         {
1867             FIB_PATH_DBG(path, "recursive loop cleared");
1868             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1869         }
1870         break;
1871     case FIB_PATH_TYPE_SPECIAL:
1872     case FIB_PATH_TYPE_DEAG:
1873     case FIB_PATH_TYPE_DVR:
1874     case FIB_PATH_TYPE_RECEIVE:
1875     case FIB_PATH_TYPE_INTF_RX:
1876     case FIB_PATH_TYPE_UDP_ENCAP:
1877     case FIB_PATH_TYPE_EXCLUSIVE:
1878     case FIB_PATH_TYPE_BIER_FMASK:
1879     case FIB_PATH_TYPE_BIER_TABLE:
1880     case FIB_PATH_TYPE_BIER_IMP:
1881         /*
1882          * these path types cannot be part of a loop, since they are the leaves
1883          * of the graph.
1884          */
1885         break;
1886     }
1887
1888     return (fib_path_is_looped(path_index));
1889 }
1890
1891 int
1892 fib_path_resolve (fib_node_index_t path_index)
1893 {
1894     fib_path_t *path;
1895
1896     path = fib_path_get(path_index);
1897
1898     /*
1899      * hope for the best.
1900      */
1901     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1902
1903     /*
1904      * the forced drop path resolves via the drop adj
1905      */
1906     if (fib_path_is_permanent_drop(path))
1907     {
1908         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1909         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1910         return (fib_path_is_resolved(path_index));
1911     }
1912
1913     switch (path->fp_type)
1914     {
1915     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1916         fib_path_attached_next_hop_set(path);
1917         break;
1918     case FIB_PATH_TYPE_ATTACHED:
1919     {
1920         dpo_id_t tmp = DPO_INVALID;
1921
1922         /*
1923          * path->attached.fp_interface
1924          */
1925         if (!vnet_sw_interface_is_up(vnet_get_main(),
1926                                      path->attached.fp_interface))
1927         {
1928             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1929         }
1930         dpo_set(&tmp,
1931                 DPO_ADJACENCY,
1932                 path->fp_nh_proto,
1933                 fib_path_attached_get_adj(path,
1934                                           dpo_proto_to_link(path->fp_nh_proto)));
1935
1936         /*
1937          * re-fetch after possible mem realloc
1938          */
1939         path = fib_path_get(path_index);
1940         dpo_copy(&path->fp_dpo, &tmp);
1941
1942         /*
1943          * become a child of the adjacency so we receive updates
1944          * when the interface state changes
1945          */
1946         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1947                                          FIB_NODE_TYPE_PATH,
1948                                          fib_path_get_index(path));
1949         dpo_reset(&tmp);
1950         break;
1951     }
1952     case FIB_PATH_TYPE_RECURSIVE:
1953     {
1954         /*
1955          * Create a RR source entry in the table for the address
1956          * that this path recurses through.
1957          * This resolve action is recursive, hence we may create
1958          * more paths in the process. more creates mean maybe realloc
1959          * of this path.
1960          */
1961         fib_node_index_t fei;
1962         fib_prefix_t pfx;
1963
1964         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1965
1966         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1967         {
1968             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1969                                        path->recursive.fp_nh.fp_eos,
1970                                        &pfx);
1971         }
1972         else
1973         {
1974             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1975         }
1976
1977         fib_table_lock(path->recursive.fp_tbl_id,
1978                        dpo_proto_to_fib(path->fp_nh_proto),
1979                        FIB_SOURCE_RR);
1980         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1981                                           &pfx,
1982                                           FIB_SOURCE_RR,
1983                                           FIB_ENTRY_FLAG_NONE);
1984
1985         path = fib_path_get(path_index);
1986         path->fp_via_fib = fei;
1987
1988         /*
1989          * become a dependent child of the entry so the path is 
1990          * informed when the forwarding for the entry changes.
1991          */
1992         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1993                                                FIB_NODE_TYPE_PATH,
1994                                                fib_path_get_index(path));
1995
1996         /*
1997          * create and configure the IP DPO
1998          */
1999         fib_path_recursive_adj_update(
2000             path,
2001             fib_path_to_chain_type(path),
2002             &path->fp_dpo);
2003
2004         break;
2005     }
2006     case FIB_PATH_TYPE_BIER_FMASK:
2007     {
2008         /*
2009          * become a dependent child of the entry so the path is
2010          * informed when the forwarding for the entry changes.
2011          */
2012         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
2013                                                 FIB_NODE_TYPE_PATH,
2014                                                 fib_path_get_index(path));
2015
2016         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
2017         fib_path_bier_fmask_update(path, &path->fp_dpo);
2018
2019         break;
2020     }
2021     case FIB_PATH_TYPE_BIER_IMP:
2022         bier_imp_lock(path->bier_imp.fp_bier_imp);
2023         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2024                                        DPO_PROTO_IP4,
2025                                        &path->fp_dpo);
2026         break;
2027     case FIB_PATH_TYPE_BIER_TABLE:
2028     {
2029         /*
2030          * Find/create the BIER table to link to
2031          */
2032         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
2033
2034         path->fp_via_bier_tbl =
2035             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
2036
2037         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
2038                                          &path->fp_dpo);
2039         break;
2040     }
2041     case FIB_PATH_TYPE_SPECIAL:
2042         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2043         {
2044             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2045                                       IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
2046                                       &path->fp_dpo);
2047         }
2048         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2049         {
2050             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2051                                       IP_NULL_ACTION_SEND_ICMP_UNREACH,
2052                                       &path->fp_dpo);
2053         }
2054         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY)
2055         {
2056             dpo_set (&path->fp_dpo, DPO_CLASSIFY,
2057                      path->fp_nh_proto,
2058                      classify_dpo_create (path->fp_nh_proto,
2059                                           path->classify.fp_classify_table_id));
2060         }
2061         else
2062         {
2063             /*
2064              * Resolve via the drop
2065              */
2066             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
2067         }
2068         break;
2069     case FIB_PATH_TYPE_DEAG:
2070     {
2071         if (DPO_PROTO_BIER == path->fp_nh_proto)
2072         {
2073             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2074                                                   &path->fp_dpo);
2075         }
2076         else
2077         {
2078             /*
2079              * Resolve via a lookup DPO.
2080              * FIXME. control plane should add routes with a table ID
2081              */
2082             lookup_input_t input;
2083             lookup_cast_t cast;
2084
2085             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2086                     LOOKUP_MULTICAST :
2087                     LOOKUP_UNICAST);
2088             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2089                      LOOKUP_INPUT_SRC_ADDR :
2090                      LOOKUP_INPUT_DST_ADDR);
2091
2092             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2093                                                path->fp_nh_proto,
2094                                                cast,
2095                                                input,
2096                                                LOOKUP_TABLE_FROM_CONFIG,
2097                                                &path->fp_dpo);
2098         }
2099         break;
2100     }
2101     case FIB_PATH_TYPE_DVR:
2102         dvr_dpo_add_or_lock(path->attached.fp_interface,
2103                             path->fp_nh_proto,
2104                             &path->fp_dpo);
2105         break;
2106     case FIB_PATH_TYPE_RECEIVE:
2107         /*
2108          * Resolve via a receive DPO.
2109          */
2110         receive_dpo_add_or_lock(path->fp_nh_proto,
2111                                 path->receive.fp_interface,
2112                                 &path->receive.fp_addr,
2113                                 &path->fp_dpo);
2114         break;
2115     case FIB_PATH_TYPE_UDP_ENCAP:
2116         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2117         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2118                                         path->fp_nh_proto,
2119                                         &path->fp_dpo);
2120         break;
2121     case FIB_PATH_TYPE_INTF_RX: {
2122         /*
2123          * Resolve via a receive DPO.
2124          */
2125         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2126                                      path->intf_rx.fp_interface,
2127                                      &path->fp_dpo);
2128         break;
2129     }
2130     case FIB_PATH_TYPE_EXCLUSIVE:
2131         /*
2132          * Resolve via the user provided DPO
2133          */
2134         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2135         break;
2136     }
2137
2138     return (fib_path_is_resolved(path_index));
2139 }
2140
2141 u32
2142 fib_path_get_resolving_interface (fib_node_index_t path_index)
2143 {
2144     fib_path_t *path;
2145
2146     path = fib_path_get(path_index);
2147
2148     switch (path->fp_type)
2149     {
2150     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2151         return (path->attached_next_hop.fp_interface);
2152     case FIB_PATH_TYPE_ATTACHED:
2153         return (path->attached.fp_interface);
2154     case FIB_PATH_TYPE_RECEIVE:
2155         return (path->receive.fp_interface);
2156     case FIB_PATH_TYPE_RECURSIVE:
2157         if (fib_path_is_resolved(path_index))
2158         {
2159             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2160         }
2161         break;
2162     case FIB_PATH_TYPE_DVR:
2163         return (path->dvr.fp_interface);
2164     case FIB_PATH_TYPE_INTF_RX:
2165     case FIB_PATH_TYPE_UDP_ENCAP:
2166     case FIB_PATH_TYPE_SPECIAL:
2167     case FIB_PATH_TYPE_DEAG:
2168     case FIB_PATH_TYPE_EXCLUSIVE:
2169     case FIB_PATH_TYPE_BIER_FMASK:
2170     case FIB_PATH_TYPE_BIER_TABLE:
2171     case FIB_PATH_TYPE_BIER_IMP:
2172         break;
2173     }
2174     return (dpo_get_urpf(&path->fp_dpo));
2175 }
2176
2177 index_t
2178 fib_path_get_resolving_index (fib_node_index_t path_index)
2179 {
2180     fib_path_t *path;
2181
2182     path = fib_path_get(path_index);
2183
2184     switch (path->fp_type)
2185     {
2186     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2187     case FIB_PATH_TYPE_ATTACHED:
2188     case FIB_PATH_TYPE_RECEIVE:
2189     case FIB_PATH_TYPE_INTF_RX:
2190     case FIB_PATH_TYPE_SPECIAL:
2191     case FIB_PATH_TYPE_DEAG:
2192     case FIB_PATH_TYPE_DVR:
2193     case FIB_PATH_TYPE_EXCLUSIVE:
2194         break;
2195     case FIB_PATH_TYPE_UDP_ENCAP:
2196         return (path->udp_encap.fp_udp_encap_id);
2197     case FIB_PATH_TYPE_RECURSIVE:
2198         return (path->fp_via_fib);
2199     case FIB_PATH_TYPE_BIER_FMASK:
2200         return (path->bier_fmask.fp_bier_fmask);
2201    case FIB_PATH_TYPE_BIER_TABLE:
2202        return (path->fp_via_bier_tbl);
2203    case FIB_PATH_TYPE_BIER_IMP:
2204        return (path->bier_imp.fp_bier_imp);
2205     }
2206     return (~0);
2207 }
2208
2209 adj_index_t
2210 fib_path_get_adj (fib_node_index_t path_index)
2211 {
2212     fib_path_t *path;
2213
2214     path = fib_path_get(path_index);
2215
2216     ASSERT(dpo_is_adj(&path->fp_dpo));
2217     if (dpo_is_adj(&path->fp_dpo))
2218     {
2219         return (path->fp_dpo.dpoi_index);
2220     }
2221     return (ADJ_INDEX_INVALID);
2222 }
2223
2224 u16
2225 fib_path_get_weight (fib_node_index_t path_index)
2226 {
2227     fib_path_t *path;
2228
2229     path = fib_path_get(path_index);
2230
2231     ASSERT(path);
2232
2233     return (path->fp_weight);
2234 }
2235
2236 u16
2237 fib_path_get_preference (fib_node_index_t path_index)
2238 {
2239     fib_path_t *path;
2240
2241     path = fib_path_get(path_index);
2242
2243     ASSERT(path);
2244
2245     return (path->fp_preference);
2246 }
2247
2248 u32
2249 fib_path_get_rpf_id (fib_node_index_t path_index)
2250 {
2251     fib_path_t *path;
2252
2253     path = fib_path_get(path_index);
2254
2255     ASSERT(path);
2256
2257     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2258     {
2259         return (path->deag.fp_rpf_id);
2260     }
2261
2262     return (~0);
2263 }
2264
2265 /**
2266  * @brief Contribute the path's adjacency to the list passed.
2267  * By calling this function over all paths, recursively, a child
2268  * can construct its full set of forwarding adjacencies, and hence its
2269  * uRPF list.
2270  */
2271 void
2272 fib_path_contribute_urpf (fib_node_index_t path_index,
2273                           index_t urpf)
2274 {
2275     fib_path_t *path;
2276
2277     path = fib_path_get(path_index);
2278
2279     /*
2280      * resolved and unresolved paths contribute to the RPF list.
2281      */
2282     switch (path->fp_type)
2283     {
2284     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2285         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2286         break;
2287
2288     case FIB_PATH_TYPE_ATTACHED:
2289         fib_urpf_list_append(urpf, path->attached.fp_interface);
2290         break;
2291
2292     case FIB_PATH_TYPE_RECURSIVE:
2293         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2294             !fib_path_is_looped(path_index))
2295         {
2296             /*
2297              * there's unresolved due to constraints, and there's unresolved
2298              * due to ain't got no via. can't do nowt w'out via.
2299              */
2300             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2301         }
2302         break;
2303
2304     case FIB_PATH_TYPE_EXCLUSIVE:
2305     case FIB_PATH_TYPE_SPECIAL:
2306     {
2307         /*
2308          * these path types may link to an adj, if that's what
2309          * the clinet gave
2310          */
2311         u32 rpf_sw_if_index;
2312
2313         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2314
2315         if (~0 != rpf_sw_if_index)
2316         {
2317             fib_urpf_list_append(urpf, rpf_sw_if_index);
2318         }
2319         break;
2320     }
2321     case FIB_PATH_TYPE_DVR:
2322         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2323         break;
2324     case FIB_PATH_TYPE_DEAG:
2325     case FIB_PATH_TYPE_RECEIVE:
2326     case FIB_PATH_TYPE_INTF_RX:
2327     case FIB_PATH_TYPE_UDP_ENCAP:
2328     case FIB_PATH_TYPE_BIER_FMASK:
2329     case FIB_PATH_TYPE_BIER_TABLE:
2330     case FIB_PATH_TYPE_BIER_IMP:
2331         /*
2332          * these path types don't link to an adj
2333          */
2334         break;
2335     }
2336 }
2337
2338 void
2339 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2340                           dpo_proto_t payload_proto,
2341                           fib_mpls_lsp_mode_t mode,
2342                           dpo_id_t *dpo)
2343 {
2344     fib_path_t *path;
2345
2346     path = fib_path_get(path_index);
2347
2348     ASSERT(path);
2349
2350     switch (path->fp_type)
2351     {
2352     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2353     {
2354         dpo_id_t tmp = DPO_INVALID;
2355
2356         dpo_copy(&tmp, dpo);
2357
2358         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2359         dpo_reset(&tmp);
2360         break;
2361     }                
2362     case FIB_PATH_TYPE_DEAG:
2363     {
2364         dpo_id_t tmp = DPO_INVALID;
2365
2366         dpo_copy(&tmp, dpo);
2367
2368         mpls_disp_dpo_create(payload_proto,
2369                              path->deag.fp_rpf_id,
2370                              mode, &tmp, dpo);
2371         dpo_reset(&tmp);
2372         break;
2373     }
2374     case FIB_PATH_TYPE_RECEIVE:
2375     case FIB_PATH_TYPE_ATTACHED:
2376     case FIB_PATH_TYPE_RECURSIVE:
2377     case FIB_PATH_TYPE_INTF_RX:
2378     case FIB_PATH_TYPE_UDP_ENCAP:
2379     case FIB_PATH_TYPE_EXCLUSIVE:
2380     case FIB_PATH_TYPE_SPECIAL:
2381     case FIB_PATH_TYPE_BIER_FMASK:
2382     case FIB_PATH_TYPE_BIER_TABLE:
2383     case FIB_PATH_TYPE_BIER_IMP:
2384     case FIB_PATH_TYPE_DVR:
2385         break;
2386     }
2387
2388     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_POP_PW_CW)
2389     {
2390         dpo_id_t tmp = DPO_INVALID;
2391
2392         dpo_copy(&tmp, dpo);
2393
2394         pw_cw_dpo_create(&tmp, dpo);
2395         dpo_reset(&tmp);
2396     }
2397 }
2398
2399 void
2400 fib_path_contribute_forwarding (fib_node_index_t path_index,
2401                                 fib_forward_chain_type_t fct,
2402                                 dpo_id_t *dpo)
2403 {
2404     fib_path_t *path;
2405
2406     path = fib_path_get(path_index);
2407
2408     ASSERT(path);
2409     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2410
2411     /*
2412      * The DPO stored in the path was created when the path was resolved.
2413      * This then represents the path's 'native' protocol; IP.
2414      * For all others will need to go find something else.
2415      */
2416     if (fib_path_to_chain_type(path) == fct)
2417     {
2418         dpo_copy(dpo, &path->fp_dpo);
2419     }
2420     else
2421     {
2422         switch (path->fp_type)
2423         {
2424         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2425             switch (fct)
2426             {
2427             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2428             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2429             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2430             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2431             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2432             case FIB_FORW_CHAIN_TYPE_NSH:
2433             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2434             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2435             {
2436                 adj_index_t ai;
2437
2438                 /*
2439                  * get a appropriate link type adj.
2440                  */
2441                 ai = fib_path_attached_next_hop_get_adj(
2442                          path,
2443                          fib_forw_chain_type_to_link_type(fct));
2444                 dpo_set(dpo, DPO_ADJACENCY,
2445                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2446                 adj_unlock(ai);
2447
2448                 break;
2449             }
2450             case FIB_FORW_CHAIN_TYPE_BIER:
2451                 break;
2452             }
2453             break;
2454         case FIB_PATH_TYPE_RECURSIVE:
2455             switch (fct)
2456             {
2457             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2458             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2459             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2460             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2461             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2462             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2463             case FIB_FORW_CHAIN_TYPE_BIER:
2464                 fib_path_recursive_adj_update(path, fct, dpo);
2465                 break;
2466             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2467             case FIB_FORW_CHAIN_TYPE_NSH:
2468                 ASSERT(0);
2469                 break;
2470             }
2471             break;
2472         case FIB_PATH_TYPE_BIER_TABLE:
2473             switch (fct)
2474             {
2475             case FIB_FORW_CHAIN_TYPE_BIER:
2476                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2477                 break;
2478             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2479             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2480             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2481             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2482             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2483             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2484             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2485             case FIB_FORW_CHAIN_TYPE_NSH:
2486                 ASSERT(0);
2487                 break;
2488             }
2489             break;
2490         case FIB_PATH_TYPE_BIER_FMASK:
2491             switch (fct)
2492             {
2493             case FIB_FORW_CHAIN_TYPE_BIER:
2494                 fib_path_bier_fmask_update(path, dpo);
2495                 break;
2496             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2497             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2498             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2499             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2500             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2501             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2502             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2503             case FIB_FORW_CHAIN_TYPE_NSH:
2504                 ASSERT(0);
2505                 break;
2506             }
2507             break;
2508         case FIB_PATH_TYPE_BIER_IMP:
2509             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2510                                            fib_forw_chain_type_to_dpo_proto(fct),
2511                                            dpo);
2512             break;
2513         case FIB_PATH_TYPE_DEAG:
2514             switch (fct)
2515             {
2516             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2517                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2518                                                   DPO_PROTO_MPLS,
2519                                                   LOOKUP_UNICAST,
2520                                                   LOOKUP_INPUT_DST_ADDR,
2521                                                   LOOKUP_TABLE_FROM_CONFIG,
2522                                                   dpo);
2523                 break;
2524             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2525             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2526             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2527             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2528             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2529                 dpo_copy(dpo, &path->fp_dpo);
2530                 break;
2531             case FIB_FORW_CHAIN_TYPE_BIER:
2532                 break;
2533             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2534             case FIB_FORW_CHAIN_TYPE_NSH:
2535                 ASSERT(0);
2536                 break;
2537             }
2538             break;
2539         case FIB_PATH_TYPE_EXCLUSIVE:
2540             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2541             break;
2542         case FIB_PATH_TYPE_ATTACHED:
2543             switch (fct)
2544             {
2545             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2546             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2547             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2548             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2549             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2550             case FIB_FORW_CHAIN_TYPE_NSH:
2551             case FIB_FORW_CHAIN_TYPE_BIER:
2552                 {
2553                     adj_index_t ai;
2554
2555                     /*
2556                      * get a appropriate link type adj.
2557                      */
2558                     ai = fib_path_attached_get_adj(
2559                             path,
2560                             fib_forw_chain_type_to_link_type(fct));
2561                     dpo_set(dpo, DPO_ADJACENCY,
2562                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2563                     adj_unlock(ai);
2564                     break;
2565                 }
2566             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2567             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2568                 {
2569                     adj_index_t ai;
2570
2571                     /*
2572                      * Create the adj needed for sending IP multicast traffic
2573                      */
2574                     if (vnet_sw_interface_is_p2p(vnet_get_main(),
2575                                                  path->attached.fp_interface))
2576                     {
2577                         /*
2578                          * point-2-point interfaces do not require a glean, since
2579                          * there is nothing to ARP. Install a rewrite/nbr adj instead
2580                          */
2581                         ai = adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2582                                                  fib_forw_chain_type_to_link_type(fct),
2583                                                  &zero_addr,
2584                                                  path->attached.fp_interface);
2585                     }
2586                     else
2587                     {
2588                         ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2589                                                    fib_forw_chain_type_to_link_type(fct),
2590                                                    path->attached.fp_interface);
2591                     }
2592                     dpo_set(dpo, DPO_ADJACENCY,
2593                             fib_forw_chain_type_to_dpo_proto(fct),
2594                             ai);
2595                     adj_unlock(ai);
2596                 }
2597                 break;
2598             }
2599             break;
2600         case FIB_PATH_TYPE_INTF_RX:
2601             /*
2602              * Create the adj needed for sending IP multicast traffic
2603              */
2604             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2605                                          path->attached.fp_interface,
2606                                          dpo);
2607             break;
2608         case FIB_PATH_TYPE_UDP_ENCAP:
2609             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2610                                             path->fp_nh_proto,
2611                                             dpo);
2612             break;
2613         case FIB_PATH_TYPE_RECEIVE:
2614         case FIB_PATH_TYPE_SPECIAL:
2615         case FIB_PATH_TYPE_DVR:
2616             dpo_copy(dpo, &path->fp_dpo);
2617             break;
2618         }
2619     }
2620 }
2621
2622 load_balance_path_t *
2623 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2624                                        fib_forward_chain_type_t fct,
2625                                        load_balance_path_t *hash_key)
2626 {
2627     load_balance_path_t *mnh;
2628     fib_path_t *path;
2629
2630     path = fib_path_get(path_index);
2631
2632     ASSERT(path);
2633
2634     vec_add2(hash_key, mnh, 1);
2635
2636     mnh->path_weight = path->fp_weight;
2637     mnh->path_index = path_index;
2638
2639     if (fib_path_is_resolved(path_index))
2640     {
2641         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2642     }
2643     else
2644     {
2645         dpo_copy(&mnh->path_dpo,
2646                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2647     }
2648     return (hash_key);
2649 }
2650
2651 int
2652 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2653 {
2654     fib_path_t *path;
2655
2656     path = fib_path_get(path_index);
2657
2658     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2659             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2660              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2661 }
2662
2663 int
2664 fib_path_is_exclusive (fib_node_index_t path_index)
2665 {
2666     fib_path_t *path;
2667
2668     path = fib_path_get(path_index);
2669
2670     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2671 }
2672
2673 int
2674 fib_path_is_deag (fib_node_index_t path_index)
2675 {
2676     fib_path_t *path;
2677
2678     path = fib_path_get(path_index);
2679
2680     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2681 }
2682
2683 int
2684 fib_path_is_resolved (fib_node_index_t path_index)
2685 {
2686     fib_path_t *path;
2687
2688     path = fib_path_get(path_index);
2689
2690     return (dpo_id_is_valid(&path->fp_dpo) &&
2691             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2692             !fib_path_is_looped(path_index) &&
2693             !fib_path_is_permanent_drop(path));
2694 }
2695
2696 int
2697 fib_path_is_looped (fib_node_index_t path_index)
2698 {
2699     fib_path_t *path;
2700
2701     path = fib_path_get(path_index);
2702
2703     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2704 }
2705
2706 fib_path_list_walk_rc_t
2707 fib_path_encode (fib_node_index_t path_list_index,
2708                  fib_node_index_t path_index,
2709                  const fib_path_ext_t *path_ext,
2710                  void *args)
2711 {
2712     fib_path_encode_ctx_t *ctx = args;
2713     fib_route_path_t *rpath;
2714     fib_path_t *path;
2715
2716     path = fib_path_get(path_index);
2717     if (!path)
2718       return (FIB_PATH_LIST_WALK_CONTINUE);
2719
2720     vec_add2(ctx->rpaths, rpath, 1);
2721     rpath->frp_weight = path->fp_weight;
2722     rpath->frp_preference = path->fp_preference;
2723     rpath->frp_proto = path->fp_nh_proto;
2724     rpath->frp_sw_if_index = ~0;
2725     rpath->frp_fib_index = 0;
2726
2727     switch (path->fp_type)
2728     {
2729       case FIB_PATH_TYPE_RECEIVE:
2730         rpath->frp_addr = path->receive.fp_addr;
2731         rpath->frp_sw_if_index = path->receive.fp_interface;
2732         rpath->frp_flags |= FIB_ROUTE_PATH_LOCAL;
2733         break;
2734       case FIB_PATH_TYPE_ATTACHED:
2735         rpath->frp_sw_if_index = path->attached.fp_interface;
2736         break;
2737       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2738         rpath->frp_sw_if_index = path->attached_next_hop.fp_interface;
2739         rpath->frp_addr = path->attached_next_hop.fp_nh;
2740         break;
2741       case FIB_PATH_TYPE_BIER_FMASK:
2742         rpath->frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2743         break;
2744       case FIB_PATH_TYPE_SPECIAL:
2745         break;
2746       case FIB_PATH_TYPE_DEAG:
2747         rpath->frp_fib_index = path->deag.fp_tbl_id;
2748         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
2749         {
2750             rpath->frp_flags |= FIB_ROUTE_PATH_RPF_ID;
2751         }
2752         break;
2753       case FIB_PATH_TYPE_RECURSIVE:
2754         rpath->frp_addr = path->recursive.fp_nh.fp_ip;
2755         rpath->frp_fib_index = path->recursive.fp_tbl_id;
2756         break;
2757       case FIB_PATH_TYPE_DVR:
2758           rpath->frp_sw_if_index = path->dvr.fp_interface;
2759           rpath->frp_flags |= FIB_ROUTE_PATH_DVR;
2760           break;
2761       case FIB_PATH_TYPE_UDP_ENCAP:
2762           rpath->frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2763           rpath->frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2764           break;
2765       case FIB_PATH_TYPE_INTF_RX:
2766           rpath->frp_sw_if_index = path->receive.fp_interface;
2767           rpath->frp_flags |= FIB_ROUTE_PATH_INTF_RX;
2768           break;
2769       case FIB_PATH_TYPE_EXCLUSIVE:
2770         rpath->frp_flags |= FIB_ROUTE_PATH_EXCLUSIVE;
2771       default:
2772         break;
2773     }
2774
2775     if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS) 
2776     {
2777         rpath->frp_label_stack = path_ext->fpe_path.frp_label_stack;
2778     }
2779
2780     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP)
2781         rpath->frp_flags |= FIB_ROUTE_PATH_DROP;
2782     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2783         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_UNREACH;
2784     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2785         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_PROHIBIT;
2786
2787     return (FIB_PATH_LIST_WALK_CONTINUE);
2788 }
2789
2790 dpo_proto_t
2791 fib_path_get_proto (fib_node_index_t path_index)
2792 {
2793     fib_path_t *path;
2794
2795     path = fib_path_get(path_index);
2796
2797     return (path->fp_nh_proto);
2798 }
2799
2800 void
2801 fib_path_module_init (void)
2802 {
2803     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2804     fib_path_logger = vlib_log_register_class ("fib", "path");
2805 }
2806
2807 static clib_error_t *
2808 show_fib_path_command (vlib_main_t * vm,
2809                         unformat_input_t * input,
2810                         vlib_cli_command_t * cmd)
2811 {
2812     fib_node_index_t pi;
2813     fib_path_t *path;
2814
2815     if (unformat (input, "%d", &pi))
2816     {
2817         /*
2818          * show one in detail
2819          */
2820         if (!pool_is_free_index(fib_path_pool, pi))
2821         {
2822             path = fib_path_get(pi);
2823             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2824                            FIB_PATH_FORMAT_FLAGS_NONE);
2825             s = format(s, "\n  children:");
2826             s = fib_node_children_format(path->fp_node.fn_children, s);
2827             vlib_cli_output (vm, "%s", s);
2828             vec_free(s);
2829         }
2830         else
2831         {
2832             vlib_cli_output (vm, "path %d invalid", pi);
2833         }
2834     }
2835     else
2836     {
2837         vlib_cli_output (vm, "FIB Paths");
2838         pool_foreach_index (pi, fib_path_pool,
2839         ({
2840             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2841                              FIB_PATH_FORMAT_FLAGS_NONE);
2842         }));
2843     }
2844
2845     return (NULL);
2846 }
2847
2848 VLIB_CLI_COMMAND (show_fib_path, static) = {
2849   .path = "show fib paths",
2850   .function = show_fib_path_command,
2851   .short_help = "show fib paths",
2852 };