fib: fib api updates
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/ip_null_dpo.h>
28 #include <vnet/dpo/classify_dpo.h>
29
30 #include <vnet/adj/adj.h>
31 #include <vnet/adj/adj_mcast.h>
32
33 #include <vnet/fib/fib_path.h>
34 #include <vnet/fib/fib_node.h>
35 #include <vnet/fib/fib_table.h>
36 #include <vnet/fib/fib_entry.h>
37 #include <vnet/fib/fib_path_list.h>
38 #include <vnet/fib/fib_internal.h>
39 #include <vnet/fib/fib_urpf_list.h>
40 #include <vnet/fib/mpls_fib.h>
41 #include <vnet/fib/fib_path_ext.h>
42 #include <vnet/udp/udp_encap.h>
43 #include <vnet/bier/bier_fmask.h>
44 #include <vnet/bier/bier_table.h>
45 #include <vnet/bier/bier_imp.h>
46 #include <vnet/bier/bier_disp_table.h>
47
48 /**
49  * Enurmeration of path types
50  */
51 typedef enum fib_path_type_t_ {
52     /**
53      * Marker. Add new types after this one.
54      */
55     FIB_PATH_TYPE_FIRST = 0,
56     /**
57      * Attached-nexthop. An interface and a nexthop are known.
58      */
59     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
60     /**
61      * attached. Only the interface is known.
62      */
63     FIB_PATH_TYPE_ATTACHED,
64     /**
65      * recursive. Only the next-hop is known.
66      */
67     FIB_PATH_TYPE_RECURSIVE,
68     /**
69      * special. nothing is known. so we drop.
70      */
71     FIB_PATH_TYPE_SPECIAL,
72     /**
73      * exclusive. user provided adj.
74      */
75     FIB_PATH_TYPE_EXCLUSIVE,
76     /**
77      * deag. Link to a lookup adj in the next table
78      */
79     FIB_PATH_TYPE_DEAG,
80     /**
81      * interface receive.
82      */
83     FIB_PATH_TYPE_INTF_RX,
84     /**
85      * Path resolves via a UDP encap object.
86      */
87     FIB_PATH_TYPE_UDP_ENCAP,
88     /**
89      * receive. it's for-us.
90      */
91     FIB_PATH_TYPE_RECEIVE,
92     /**
93      * bier-imp. it's via a BIER imposition.
94      */
95     FIB_PATH_TYPE_BIER_IMP,
96     /**
97      * bier-fmask. it's via a BIER ECMP-table.
98      */
99     FIB_PATH_TYPE_BIER_TABLE,
100     /**
101      * bier-fmask. it's via a BIER f-mask.
102      */
103     FIB_PATH_TYPE_BIER_FMASK,
104     /**
105      * via a DVR.
106      */
107     FIB_PATH_TYPE_DVR,
108     /**
109      * Marker. Add new types before this one, then update it.
110      */
111     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
112 } __attribute__ ((packed)) fib_path_type_t;
113
114 /**
115  * The maximum number of path_types
116  */
117 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
118
119 #define FIB_PATH_TYPES {                                        \
120     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
121     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
122     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
123     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
124     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
125     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
126     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
127     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
128     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
129     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
130     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
131     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
132     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
133 }
134
135 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
136     for (_item = FIB_PATH_TYPE_FIRST;           \
137          _item <= FIB_PATH_TYPE_LAST;           \
138          _item++)
139
140 /**
141  * Enurmeration of path operational (i.e. derived) attributes
142  */
143 typedef enum fib_path_oper_attribute_t_ {
144     /**
145      * Marker. Add new types after this one.
146      */
147     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
148     /**
149      * The path forms part of a recursive loop.
150      */
151     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
152     /**
153      * The path is resolved
154      */
155     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
156     /**
157      * The path is attached, despite what the next-hop may say.
158      */
159     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
160     /**
161      * The path has become a permanent drop.
162      */
163     FIB_PATH_OPER_ATTRIBUTE_DROP,
164     /**
165      * Marker. Add new types before this one, then update it.
166      */
167     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
168 } __attribute__ ((packed)) fib_path_oper_attribute_t;
169
170 /**
171  * The maximum number of path operational attributes
172  */
173 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
174
175 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
176     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
177     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
178     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
179 }
180
181 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
182     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
183          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
184          _item++)
185
186 /**
187  * Path flags from the attributes
188  */
189 typedef enum fib_path_oper_flags_t_ {
190     FIB_PATH_OPER_FLAG_NONE = 0,
191     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
192     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
193     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
194     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
195 } __attribute__ ((packed)) fib_path_oper_flags_t;
196
197 /**
198  * A FIB path
199  */
200 typedef struct fib_path_t_ {
201     /**
202      * A path is a node in the FIB graph.
203      */
204     fib_node_t fp_node;
205
206     /**
207      * The index of the path-list to which this path belongs
208      */
209     u32 fp_pl_index;
210
211     /**
212      * This marks the start of the memory area used to hash
213      * the path
214      */
215     STRUCT_MARK(path_hash_start);
216
217     /**
218      * Configuration Flags
219      */
220     fib_path_cfg_flags_t fp_cfg_flags;
221
222     /**
223      * The type of the path. This is the selector for the union
224      */
225     fib_path_type_t fp_type;
226
227     /**
228      * The protocol of the next-hop, i.e. the address family of the
229      * next-hop's address. We can't derive this from the address itself
230      * since the address can be all zeros
231      */
232     dpo_proto_t fp_nh_proto;
233
234     /**
235      * UCMP [unnormalised] weigth
236      */
237     u8 fp_weight;
238
239     /**
240      * A path preference. 0 is the best.
241      * Only paths of the best preference, that are 'up', are considered
242      * for forwarding.
243      */
244     u8 fp_preference;
245
246     /**
247      * per-type union of the data required to resolve the path
248      */
249     union {
250         struct {
251             /**
252              * The next-hop
253              */
254             ip46_address_t fp_nh;
255             /**
256              * The interface
257              */
258             u32 fp_interface;
259         } attached_next_hop;
260         struct {
261             /**
262              * The interface
263              */
264             u32 fp_interface;
265         } attached;
266         struct {
267             union
268             {
269                 /**
270                  * The next-hop
271                  */
272                 ip46_address_t fp_ip;
273                 struct {
274                     /**
275                      * The local label to resolve through.
276                      */
277                     mpls_label_t fp_local_label;
278                     /**
279                      * The EOS bit of the resolving label
280                      */
281                     mpls_eos_bit_t fp_eos;
282                 };
283             } fp_nh;
284             union {
285                 /**
286                  * The FIB table index in which to find the next-hop.
287                  */
288                 fib_node_index_t fp_tbl_id;
289                 /**
290                  * The BIER FIB the fmask is in
291                  */
292                 index_t fp_bier_fib;
293             };
294         } recursive;
295         struct {
296             /**
297              * BIER FMask ID
298              */
299             index_t fp_bier_fmask;
300         } bier_fmask;
301         struct {
302             /**
303              * The BIER table's ID
304              */
305             bier_table_id_t fp_bier_tbl;
306         } bier_table;
307         struct {
308             /**
309              * The BIER imposition object
310              * this is part of the path's key, since the index_t
311              * of an imposition object is the object's key.
312              */
313             index_t fp_bier_imp;
314         } bier_imp;
315         struct {
316             /**
317              * The FIB index in which to perfom the next lookup
318              */
319             fib_node_index_t fp_tbl_id;
320             /**
321              * The RPF-ID to tag the packets with
322              */
323             fib_rpf_id_t fp_rpf_id;
324         } deag;
325         struct {
326         } special;
327         struct {
328             /**
329              * The user provided 'exclusive' DPO
330              */
331             dpo_id_t fp_ex_dpo;
332         } exclusive;
333         struct {
334             /**
335              * The interface on which the local address is configured
336              */
337             u32 fp_interface;
338             /**
339              * The next-hop
340              */
341             ip46_address_t fp_addr;
342         } receive;
343         struct {
344             /**
345              * The interface on which the packets will be input.
346              */
347             u32 fp_interface;
348         } intf_rx;
349         struct {
350             /**
351              * The UDP Encap object this path resolves through
352              */
353             u32 fp_udp_encap_id;
354         } udp_encap;
355         struct {
356             /**
357              * The UDP Encap object this path resolves through
358              */
359             u32 fp_classify_table_id;
360         } classify;
361         struct {
362             /**
363              * The interface
364              */
365             u32 fp_interface;
366         } dvr;
367     };
368     STRUCT_MARK(path_hash_end);
369
370     /**
371      * Memebers in this last section represent information that is
372      * dervied during resolution. It should not be copied to new paths
373      * nor compared.
374      */
375
376     /**
377      * Operational Flags
378      */
379     fib_path_oper_flags_t fp_oper_flags;
380
381     union {
382         /**
383          * the resolving via fib. not part of the union, since it it not part
384          * of the path's hash.
385          */
386         fib_node_index_t fp_via_fib;
387         /**
388          * the resolving bier-table
389          */
390         index_t fp_via_bier_tbl;
391         /**
392          * the resolving bier-fmask
393          */
394         index_t fp_via_bier_fmask;
395     };
396
397     /**
398      * The Data-path objects through which this path resolves for IP.
399      */
400     dpo_id_t fp_dpo;
401
402     /**
403      * the index of this path in the parent's child list.
404      */
405     u32 fp_sibling;
406 } fib_path_t;
407
408 /*
409  * Array of strings/names for the path types and attributes
410  */
411 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
412 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
413 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
414
415 /*
416  * The memory pool from which we allocate all the paths
417  */
418 static fib_path_t *fib_path_pool;
419
420 /**
421  * the logger
422  */
423 vlib_log_class_t fib_path_logger;
424
425 /*
426  * Debug macro
427  */
428 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
429 {                                                                       \
430     vlib_log_debug (fib_path_logger,                                    \
431                     "[%U]: " _fmt,                                      \
432                     format_fib_path, fib_path_get_index(_p), 0,         \
433                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
434                     ##_args);                                           \
435 }
436
437 static fib_path_t *
438 fib_path_get (fib_node_index_t index)
439 {
440     return (pool_elt_at_index(fib_path_pool, index));
441 }
442
443 static fib_node_index_t 
444 fib_path_get_index (fib_path_t *path)
445 {
446     return (path - fib_path_pool);
447 }
448
449 static fib_node_t *
450 fib_path_get_node (fib_node_index_t index)
451 {
452     return ((fib_node_t*)fib_path_get(index));
453 }
454
455 static fib_path_t*
456 fib_path_from_fib_node (fib_node_t *node)
457 {
458     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
459     return ((fib_path_t*)node);
460 }
461
462 u8 *
463 format_fib_path (u8 * s, va_list * args)
464 {
465     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
466     u32 indent = va_arg (*args, u32);
467     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
468     vnet_main_t * vnm = vnet_get_main();
469     fib_path_oper_attribute_t oattr;
470     fib_path_cfg_attribute_t cattr;
471     fib_path_t *path;
472     const char *eol;
473
474     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
475     {
476         eol = "";
477     }
478     else
479     {
480         eol = "\n";
481     }
482
483     path = fib_path_get(path_index);
484
485     s = format (s, "%Upath:[%d] ", format_white_space, indent,
486                 fib_path_get_index(path));
487     s = format (s, "pl-index:%d ", path->fp_pl_index);
488     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
489     s = format (s, "weight=%d ", path->fp_weight);
490     s = format (s, "pref=%d ", path->fp_preference);
491     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
492     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
493         s = format(s, " oper-flags:");
494         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
495             if ((1<<oattr) & path->fp_oper_flags) {
496                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
497             }
498         }
499     }
500     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
501         s = format(s, " cfg-flags:");
502         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
503             if ((1<<cattr) & path->fp_cfg_flags) {
504                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
505             }
506         }
507     }
508     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
509         s = format(s, "\n%U", format_white_space, indent+2);
510
511     switch (path->fp_type)
512     {
513     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
514         s = format (s, "%U", format_ip46_address,
515                     &path->attached_next_hop.fp_nh,
516                     IP46_TYPE_ANY);
517         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
518         {
519             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
520         }
521         else
522         {
523             s = format (s, " %U",
524                         format_vnet_sw_interface_name,
525                         vnm,
526                         vnet_get_sw_interface(
527                             vnm,
528                             path->attached_next_hop.fp_interface));
529             if (vnet_sw_interface_is_p2p(vnet_get_main(),
530                                          path->attached_next_hop.fp_interface))
531             {
532                 s = format (s, " (p2p)");
533             }
534         }
535         if (!dpo_id_is_valid(&path->fp_dpo))
536         {
537             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
538         }
539         else
540         {
541             s = format(s, "%s%U%U", eol,
542                        format_white_space, indent,
543                        format_dpo_id,
544                        &path->fp_dpo, 13);
545         }
546         break;
547     case FIB_PATH_TYPE_ATTACHED:
548         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
549         {
550             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
551         }
552         else
553         {
554             s = format (s, " %U",
555                         format_vnet_sw_interface_name,
556                         vnm,
557                         vnet_get_sw_interface(
558                             vnm,
559                             path->attached.fp_interface));
560         }
561         break;
562     case FIB_PATH_TYPE_RECURSIVE:
563         if (DPO_PROTO_MPLS == path->fp_nh_proto)
564         {
565             s = format (s, "via %U %U",
566                         format_mpls_unicast_label,
567                         path->recursive.fp_nh.fp_local_label,
568                         format_mpls_eos_bit,
569                         path->recursive.fp_nh.fp_eos);
570         }
571         else
572         {
573             s = format (s, "via %U",
574                         format_ip46_address,
575                         &path->recursive.fp_nh.fp_ip,
576                         IP46_TYPE_ANY);
577         }
578         s = format (s, " in fib:%d",
579                     path->recursive.fp_tbl_id,
580                     path->fp_via_fib); 
581         s = format (s, " via-fib:%d", path->fp_via_fib); 
582         s = format (s, " via-dpo:[%U:%d]",
583                     format_dpo_type, path->fp_dpo.dpoi_type, 
584                     path->fp_dpo.dpoi_index);
585
586         break;
587     case FIB_PATH_TYPE_UDP_ENCAP:
588         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
589         break;
590     case FIB_PATH_TYPE_BIER_TABLE:
591         s = format (s, "via bier-table:[%U}",
592                     format_bier_table_id,
593                     &path->bier_table.fp_bier_tbl);
594         s = format (s, " via-dpo:[%U:%d]",
595                     format_dpo_type, path->fp_dpo.dpoi_type,
596                     path->fp_dpo.dpoi_index);
597         break;
598     case FIB_PATH_TYPE_BIER_FMASK:
599         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
600         s = format (s, " via-dpo:[%U:%d]",
601                     format_dpo_type, path->fp_dpo.dpoi_type, 
602                     path->fp_dpo.dpoi_index);
603         break;
604     case FIB_PATH_TYPE_BIER_IMP:
605         s = format (s, "via %U", format_bier_imp,
606                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
607         break;
608     case FIB_PATH_TYPE_DVR:
609         s = format (s, " %U",
610                     format_vnet_sw_interface_name,
611                     vnm,
612                     vnet_get_sw_interface(
613                         vnm,
614                         path->dvr.fp_interface));
615         break;
616     case FIB_PATH_TYPE_DEAG:
617         s = format (s, " %sfib-index:%d",
618                     (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?  "m" : ""),
619                     path->deag.fp_tbl_id);
620         break;
621     case FIB_PATH_TYPE_RECEIVE:
622     case FIB_PATH_TYPE_INTF_RX:
623     case FIB_PATH_TYPE_SPECIAL:
624     case FIB_PATH_TYPE_EXCLUSIVE:
625         if (dpo_id_is_valid(&path->fp_dpo))
626         {
627             s = format(s, "%U", format_dpo_id,
628                        &path->fp_dpo, indent+2);
629         }
630         break;
631     }
632     return (s);
633 }
634
635 /*
636  * fib_path_last_lock_gone
637  *
638  * We don't share paths, we share path lists, so the [un]lock functions
639  * are no-ops
640  */
641 static void
642 fib_path_last_lock_gone (fib_node_t *node)
643 {
644     ASSERT(0);
645 }
646
647 static const adj_index_t
648 fib_path_attached_next_hop_get_adj (fib_path_t *path,
649                                     vnet_link_t link)
650 {
651     if (vnet_sw_interface_is_p2p(vnet_get_main(),
652                                  path->attached_next_hop.fp_interface))
653     {
654         /*
655          * if the interface is p2p then the adj for the specific
656          * neighbour on that link will never exist. on p2p links
657          * the subnet address (the attached route) links to the
658          * auto-adj (see below), we want that adj here too.
659          */
660         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
661                                     link,
662                                     &zero_addr,
663                                     path->attached_next_hop.fp_interface));
664     }
665     else
666     {
667         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
668                                     link,
669                                     &path->attached_next_hop.fp_nh,
670                                     path->attached_next_hop.fp_interface));
671     }
672 }
673
674 static void
675 fib_path_attached_next_hop_set (fib_path_t *path)
676 {
677     /*
678      * resolve directly via the adjacnecy discribed by the
679      * interface and next-hop
680      */
681     dpo_set(&path->fp_dpo,
682             DPO_ADJACENCY,
683             path->fp_nh_proto,
684             fib_path_attached_next_hop_get_adj(
685                  path,
686                  dpo_proto_to_link(path->fp_nh_proto)));
687
688     /*
689      * become a child of the adjacency so we receive updates
690      * when its rewrite changes
691      */
692     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
693                                      FIB_NODE_TYPE_PATH,
694                                      fib_path_get_index(path));
695
696     if (!vnet_sw_interface_is_up(vnet_get_main(),
697                                  path->attached_next_hop.fp_interface) ||
698         !adj_is_up(path->fp_dpo.dpoi_index))
699     {
700         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
701     }
702 }
703
704 static const adj_index_t
705 fib_path_attached_get_adj (fib_path_t *path,
706                            vnet_link_t link)
707 {
708     if (vnet_sw_interface_is_p2p(vnet_get_main(),
709                                  path->attached.fp_interface))
710     {
711         /*
712          * point-2-point interfaces do not require a glean, since
713          * there is nothing to ARP. Install a rewrite/nbr adj instead
714          */
715         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
716                                     link,
717                                     &zero_addr,
718                                     path->attached.fp_interface));
719     }
720     else
721     {
722         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
723                                       link,
724                                       path->attached.fp_interface,
725                                       NULL));
726     }
727 }
728
729 /*
730  * create of update the paths recursive adj
731  */
732 static void
733 fib_path_recursive_adj_update (fib_path_t *path,
734                                fib_forward_chain_type_t fct,
735                                dpo_id_t *dpo)
736 {
737     dpo_id_t via_dpo = DPO_INVALID;
738
739     /*
740      * get the DPO to resolve through from the via-entry
741      */
742     fib_entry_contribute_forwarding(path->fp_via_fib,
743                                     fct,
744                                     &via_dpo);
745
746
747     /*
748      * hope for the best - clear if restrictions apply.
749      */
750     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
751
752     /*
753      * Validate any recursion constraints and over-ride the via
754      * adj if not met
755      */
756     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
757     {
758         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
759         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
760     }
761     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
762     {
763         /*
764          * the via FIB must be a host route.
765          * note the via FIB just added will always be a host route
766          * since it is an RR source added host route. So what we need to
767          * check is whether the route has other sources. If it does then
768          * some other source has added it as a host route. If it doesn't
769          * then it was added only here and inherits forwarding from a cover.
770          * the cover is not a host route.
771          * The RR source is the lowest priority source, so we check if it
772          * is the best. if it is there are no other sources.
773          */
774         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
775         {
776             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
777             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
778
779             /*
780              * PIC edge trigger. let the load-balance maps know
781              */
782             load_balance_map_path_state_change(fib_path_get_index(path));
783         }
784     }
785     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
786     {
787         /*
788          * RR source entries inherit the flags from the cover, so
789          * we can check the via directly
790          */
791         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
792         {
793             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
794             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
795
796             /*
797              * PIC edge trigger. let the load-balance maps know
798              */
799             load_balance_map_path_state_change(fib_path_get_index(path));
800         }
801     }
802     /*
803      * check for over-riding factors on the FIB entry itself
804      */
805     if (!fib_entry_is_resolved(path->fp_via_fib))
806     {
807         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
808         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
809
810         /*
811          * PIC edge trigger. let the load-balance maps know
812          */
813         load_balance_map_path_state_change(fib_path_get_index(path));
814     }
815
816     /*
817      * If this path is contributing a drop, then it's not resolved
818      */
819     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
820     {
821         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
822     }
823
824     /*
825      * update the path's contributed DPO
826      */
827     dpo_copy(dpo, &via_dpo);
828
829     FIB_PATH_DBG(path, "recursive update:");
830
831     dpo_reset(&via_dpo);
832 }
833
834 /*
835  * re-evaulate the forwarding state for a via fmask path
836  */
837 static void
838 fib_path_bier_fmask_update (fib_path_t *path,
839                             dpo_id_t *dpo)
840 {
841     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
842
843     /*
844      * if we are stakcing on the drop, then the path is not resolved
845      */
846     if (dpo_is_drop(dpo))
847     {
848         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
849     }
850     else
851     {
852         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
853     }
854 }
855
856 /*
857  * fib_path_is_permanent_drop
858  *
859  * Return !0 if the path is configured to permanently drop,
860  * despite other attributes.
861  */
862 static int
863 fib_path_is_permanent_drop (fib_path_t *path)
864 {
865     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
866             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
867 }
868
869 /*
870  * fib_path_unresolve
871  *
872  * Remove our dependency on the resolution target
873  */
874 static void
875 fib_path_unresolve (fib_path_t *path)
876 {
877     /*
878      * the forced drop path does not need unresolving
879      */
880     if (fib_path_is_permanent_drop(path))
881     {
882         return;
883     }
884
885     switch (path->fp_type)
886     {
887     case FIB_PATH_TYPE_RECURSIVE:
888         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
889         {
890             fib_entry_child_remove(path->fp_via_fib,
891                                    path->fp_sibling);
892             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
893                                            fib_entry_get_prefix(path->fp_via_fib),
894                                            FIB_SOURCE_RR);
895             fib_table_unlock(path->recursive.fp_tbl_id,
896                              dpo_proto_to_fib(path->fp_nh_proto),
897                              FIB_SOURCE_RR);
898             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
899         }
900         break;
901     case FIB_PATH_TYPE_BIER_FMASK:
902         bier_fmask_child_remove(path->fp_via_bier_fmask,
903                                 path->fp_sibling);
904         break;
905     case FIB_PATH_TYPE_BIER_IMP:
906         bier_imp_unlock(path->fp_dpo.dpoi_index);
907         break;
908     case FIB_PATH_TYPE_BIER_TABLE:
909         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
910         break;
911     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
912         adj_child_remove(path->fp_dpo.dpoi_index,
913                          path->fp_sibling);
914         adj_unlock(path->fp_dpo.dpoi_index);
915         break;
916     case FIB_PATH_TYPE_ATTACHED:
917         adj_child_remove(path->fp_dpo.dpoi_index,
918                          path->fp_sibling);
919         adj_unlock(path->fp_dpo.dpoi_index);
920         break;
921     case FIB_PATH_TYPE_UDP_ENCAP:
922         udp_encap_unlock(path->fp_dpo.dpoi_index);
923         break;
924     case FIB_PATH_TYPE_EXCLUSIVE:
925         dpo_reset(&path->exclusive.fp_ex_dpo);
926         break;
927     case FIB_PATH_TYPE_SPECIAL:
928     case FIB_PATH_TYPE_RECEIVE:
929     case FIB_PATH_TYPE_INTF_RX:
930     case FIB_PATH_TYPE_DEAG:
931     case FIB_PATH_TYPE_DVR:
932         /*
933          * these hold only the path's DPO, which is reset below.
934          */
935         break;
936     }
937
938     /*
939      * release the adj we were holding and pick up the
940      * drop just in case.
941      */
942     dpo_reset(&path->fp_dpo);
943     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
944
945     return;
946 }
947
948 static fib_forward_chain_type_t
949 fib_path_to_chain_type (const fib_path_t *path)
950 {
951     if (DPO_PROTO_MPLS == path->fp_nh_proto)
952     {
953         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
954             MPLS_EOS == path->recursive.fp_nh.fp_eos)
955         {
956             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
957         }
958         else
959         {
960             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
961         }
962     }
963     else
964     {
965         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
966     }
967 }
968
969 /*
970  * fib_path_back_walk_notify
971  *
972  * A back walk has reach this path.
973  */
974 static fib_node_back_walk_rc_t
975 fib_path_back_walk_notify (fib_node_t *node,
976                            fib_node_back_walk_ctx_t *ctx)
977 {
978     fib_path_t *path;
979
980     path = fib_path_from_fib_node(node);
981
982     FIB_PATH_DBG(path, "bw:%U",
983                  format_fib_node_bw_reason, ctx->fnbw_reason);
984
985     switch (path->fp_type)
986     {
987     case FIB_PATH_TYPE_RECURSIVE:
988         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
989         {
990             /*
991              * modify the recursive adjacency to use the new forwarding
992              * of the via-fib.
993              * this update is visible to packets in flight in the DP.
994              */
995             fib_path_recursive_adj_update(
996                 path,
997                 fib_path_to_chain_type(path),
998                 &path->fp_dpo);
999         }
1000         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1001             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1002         {
1003             /*
1004              * ADJ updates (complete<->incomplete) do not need to propagate to
1005              * recursive entries.
1006              * The only reason its needed as far back as here, is that the adj
1007              * and the incomplete adj are a different DPO type, so the LBs need
1008              * to re-stack.
1009              * If this walk was quashed in the fib_entry, then any non-fib_path
1010              * children (like tunnels that collapse out the LB when they stack)
1011              * would not see the update.
1012              */
1013             return (FIB_NODE_BACK_WALK_CONTINUE);
1014         }
1015         break;
1016     case FIB_PATH_TYPE_BIER_FMASK:
1017         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1018         {
1019             /*
1020              * update to use the BIER fmask's new forwading
1021              */
1022             fib_path_bier_fmask_update(path, &path->fp_dpo);
1023         }
1024         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1025             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1026         {
1027             /*
1028              * ADJ updates (complete<->incomplete) do not need to propagate to
1029              * recursive entries.
1030              * The only reason its needed as far back as here, is that the adj
1031              * and the incomplete adj are a different DPO type, so the LBs need
1032              * to re-stack.
1033              * If this walk was quashed in the fib_entry, then any non-fib_path
1034              * children (like tunnels that collapse out the LB when they stack)
1035              * would not see the update.
1036              */
1037             return (FIB_NODE_BACK_WALK_CONTINUE);
1038         }
1039         break;
1040     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1041         /*
1042 FIXME comment
1043          * ADJ_UPDATE backwalk pass silently through here and up to
1044          * the path-list when the multipath adj collapse occurs.
1045          * The reason we do this is that the assumtption is that VPP
1046          * runs in an environment where the Control-Plane is remote
1047          * and hence reacts slowly to link up down. In order to remove
1048          * this down link from the ECMP set quickly, we back-walk.
1049          * VPP also has dedicated CPUs, so we are not stealing resources
1050          * from the CP to do so.
1051          */
1052         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1053         {
1054             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1055             {
1056                 /*
1057                  * alreday resolved. no need to walk back again
1058                  */
1059                 return (FIB_NODE_BACK_WALK_CONTINUE);
1060             }
1061             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1062         }
1063         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1064         {
1065             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1066             {
1067                 /*
1068                  * alreday unresolved. no need to walk back again
1069                  */
1070                 return (FIB_NODE_BACK_WALK_CONTINUE);
1071             }
1072             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1073         }
1074         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1075         {
1076             /*
1077              * The interface this path resolves through has been deleted.
1078              * This will leave the path in a permanent drop state. The route
1079              * needs to be removed and readded (and hence the path-list deleted)
1080              * before it can forward again.
1081              */
1082             fib_path_unresolve(path);
1083             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1084         }
1085         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1086         {
1087             /*
1088              * restack the DPO to pick up the correct DPO sub-type
1089              */
1090             uword if_is_up;
1091             adj_index_t ai;
1092
1093             if_is_up = vnet_sw_interface_is_up(
1094                            vnet_get_main(),
1095                            path->attached_next_hop.fp_interface);
1096
1097             ai = fib_path_attached_next_hop_get_adj(
1098                      path,
1099                      dpo_proto_to_link(path->fp_nh_proto));
1100
1101             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1102             if (if_is_up && adj_is_up(ai))
1103             {
1104                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1105             }
1106
1107             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1108             adj_unlock(ai);
1109
1110             if (!if_is_up)
1111             {
1112                 /*
1113                  * If the interface is not up there is no reason to walk
1114                  * back to children. if we did they would only evalute
1115                  * that this path is unresolved and hence it would
1116                  * not contribute the adjacency - so it would be wasted
1117                  * CPU time.
1118                  */
1119                 return (FIB_NODE_BACK_WALK_CONTINUE);
1120             }
1121         }
1122         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1123         {
1124             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1125             {
1126                 /*
1127                  * alreday unresolved. no need to walk back again
1128                  */
1129                 return (FIB_NODE_BACK_WALK_CONTINUE);
1130             }
1131             /*
1132              * the adj has gone down. the path is no longer resolved.
1133              */
1134             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1135         }
1136         break;
1137     case FIB_PATH_TYPE_ATTACHED:
1138     case FIB_PATH_TYPE_DVR:
1139         /*
1140          * FIXME; this could schedule a lower priority walk, since attached
1141          * routes are not usually in ECMP configurations so the backwalk to
1142          * the FIB entry does not need to be high priority
1143          */
1144         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1145         {
1146             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1147         }
1148         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1149         {
1150             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1151         }
1152         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1153         {
1154             fib_path_unresolve(path);
1155             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1156         }
1157         break;
1158     case FIB_PATH_TYPE_UDP_ENCAP:
1159     {
1160         dpo_id_t via_dpo = DPO_INVALID;
1161
1162         /*
1163          * hope for the best - clear if restrictions apply.
1164          */
1165         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1166
1167         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1168                                         path->fp_nh_proto,
1169                                         &via_dpo);
1170         /*
1171          * If this path is contributing a drop, then it's not resolved
1172          */
1173         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1174         {
1175             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1176         }
1177
1178         /*
1179          * update the path's contributed DPO
1180          */
1181         dpo_copy(&path->fp_dpo, &via_dpo);
1182         dpo_reset(&via_dpo);
1183         break;
1184     }
1185     case FIB_PATH_TYPE_INTF_RX:
1186         ASSERT(0);
1187     case FIB_PATH_TYPE_DEAG:
1188         /*
1189          * FIXME When VRF delete is allowed this will need a poke.
1190          */
1191     case FIB_PATH_TYPE_SPECIAL:
1192     case FIB_PATH_TYPE_RECEIVE:
1193     case FIB_PATH_TYPE_EXCLUSIVE:
1194     case FIB_PATH_TYPE_BIER_TABLE:
1195     case FIB_PATH_TYPE_BIER_IMP:
1196         /*
1197          * these path types have no parents. so to be
1198          * walked from one is unexpected.
1199          */
1200         ASSERT(0);
1201         break;
1202     }
1203
1204     /*
1205      * propagate the backwalk further to the path-list
1206      */
1207     fib_path_list_back_walk(path->fp_pl_index, ctx);
1208
1209     return (FIB_NODE_BACK_WALK_CONTINUE);
1210 }
1211
1212 static void
1213 fib_path_memory_show (void)
1214 {
1215     fib_show_memory_usage("Path",
1216                           pool_elts(fib_path_pool),
1217                           pool_len(fib_path_pool),
1218                           sizeof(fib_path_t));
1219 }
1220
1221 /*
1222  * The FIB path's graph node virtual function table
1223  */
1224 static const fib_node_vft_t fib_path_vft = {
1225     .fnv_get = fib_path_get_node,
1226     .fnv_last_lock = fib_path_last_lock_gone,
1227     .fnv_back_walk = fib_path_back_walk_notify,
1228     .fnv_mem_show = fib_path_memory_show,
1229 };
1230
1231 static fib_path_cfg_flags_t
1232 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1233 {
1234     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1235
1236     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1237         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1238     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1239         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1240     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1241         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1242     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1243         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1244     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1245         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1246     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1247         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1248     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1249         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1250     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1251         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1252     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1253         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1254     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_UNREACH)
1255         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_UNREACH;
1256     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_PROHIBIT)
1257         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_PROHIBIT;
1258
1259     return (cfg_flags);
1260 }
1261
1262 /*
1263  * fib_path_create
1264  *
1265  * Create and initialise a new path object.
1266  * return the index of the path.
1267  */
1268 fib_node_index_t
1269 fib_path_create (fib_node_index_t pl_index,
1270                  const fib_route_path_t *rpath)
1271 {
1272     fib_path_t *path;
1273
1274     pool_get(fib_path_pool, path);
1275     clib_memset(path, 0, sizeof(*path));
1276
1277     fib_node_init(&path->fp_node,
1278                   FIB_NODE_TYPE_PATH);
1279
1280     dpo_reset(&path->fp_dpo);
1281     path->fp_pl_index = pl_index;
1282     path->fp_nh_proto = rpath->frp_proto;
1283     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1284     path->fp_weight = rpath->frp_weight;
1285     if (0 == path->fp_weight)
1286     {
1287         /*
1288          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1289          * clients to always use 1, or we can accept it and fixup approrpiately.
1290          */
1291         path->fp_weight = 1;
1292     }
1293     path->fp_preference = rpath->frp_preference;
1294     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1295
1296     /*
1297      * deduce the path's tpye from the parementers and save what is needed.
1298      */
1299     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1300     {
1301         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1302         path->receive.fp_interface = rpath->frp_sw_if_index;
1303         path->receive.fp_addr = rpath->frp_addr;
1304     }
1305     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1306     {
1307         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1308         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1309     }
1310     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1311     {
1312         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1313         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1314     }
1315     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1316     {
1317         path->fp_type = FIB_PATH_TYPE_DEAG;
1318         path->deag.fp_tbl_id = rpath->frp_fib_index;
1319         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1320     }
1321     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1322     {
1323         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1324         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1325     }
1326     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1327     {
1328         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1329         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1330     }
1331     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1332     {
1333         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1334         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1335     }
1336     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1337     {
1338         path->fp_type = FIB_PATH_TYPE_DEAG;
1339         path->deag.fp_tbl_id = rpath->frp_fib_index;
1340     }
1341     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1342     {
1343         path->fp_type = FIB_PATH_TYPE_DVR;
1344         path->dvr.fp_interface = rpath->frp_sw_if_index;
1345     }
1346     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1347     {
1348         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1349         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1350     }
1351     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT) ||
1352         (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH))
1353     {
1354         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1355     }
1356     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY))
1357     {
1358         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1359         path->classify.fp_classify_table_id = rpath->frp_classify_table_id;
1360     }
1361     else if (~0 != rpath->frp_sw_if_index)
1362     {
1363         if (ip46_address_is_zero(&rpath->frp_addr))
1364         {
1365             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1366             path->attached.fp_interface = rpath->frp_sw_if_index;
1367         }
1368         else
1369         {
1370             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1371             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1372             path->attached_next_hop.fp_nh = rpath->frp_addr;
1373         }
1374     }
1375     else
1376     {
1377         if (ip46_address_is_zero(&rpath->frp_addr))
1378         {
1379             if (~0 == rpath->frp_fib_index)
1380             {
1381                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1382             }
1383             else
1384             {
1385                 path->fp_type = FIB_PATH_TYPE_DEAG;
1386                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1387                 path->deag.fp_rpf_id = ~0;
1388             }
1389         }
1390         else
1391         {
1392             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1393             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1394             {
1395                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1396                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1397             }
1398             else
1399             {
1400                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1401             }
1402             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1403         }
1404     }
1405
1406     FIB_PATH_DBG(path, "create");
1407
1408     return (fib_path_get_index(path));
1409 }
1410
1411 /*
1412  * fib_path_create_special
1413  *
1414  * Create and initialise a new path object.
1415  * return the index of the path.
1416  */
1417 fib_node_index_t
1418 fib_path_create_special (fib_node_index_t pl_index,
1419                          dpo_proto_t nh_proto,
1420                          fib_path_cfg_flags_t flags,
1421                          const dpo_id_t *dpo)
1422 {
1423     fib_path_t *path;
1424
1425     pool_get(fib_path_pool, path);
1426     clib_memset(path, 0, sizeof(*path));
1427
1428     fib_node_init(&path->fp_node,
1429                   FIB_NODE_TYPE_PATH);
1430     dpo_reset(&path->fp_dpo);
1431
1432     path->fp_pl_index = pl_index;
1433     path->fp_weight = 1;
1434     path->fp_preference = 0;
1435     path->fp_nh_proto = nh_proto;
1436     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1437     path->fp_cfg_flags = flags;
1438
1439     if (FIB_PATH_CFG_FLAG_DROP & flags)
1440     {
1441         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1442     }
1443     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1444     {
1445         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1446         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1447     }
1448     else
1449     {
1450         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1451         ASSERT(NULL != dpo);
1452         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1453     }
1454
1455     return (fib_path_get_index(path));
1456 }
1457
1458 /*
1459  * fib_path_copy
1460  *
1461  * Copy a path. return index of new path.
1462  */
1463 fib_node_index_t
1464 fib_path_copy (fib_node_index_t path_index,
1465                fib_node_index_t path_list_index)
1466 {
1467     fib_path_t *path, *orig_path;
1468
1469     pool_get(fib_path_pool, path);
1470
1471     orig_path = fib_path_get(path_index);
1472     ASSERT(NULL != orig_path);
1473
1474     memcpy(path, orig_path, sizeof(*path));
1475
1476     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1477
1478     /*
1479      * reset the dynamic section
1480      */
1481     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1482     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1483     path->fp_pl_index  = path_list_index;
1484     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1485     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1486     dpo_reset(&path->fp_dpo);
1487
1488     return (fib_path_get_index(path));
1489 }
1490
1491 /*
1492  * fib_path_destroy
1493  *
1494  * destroy a path that is no longer required
1495  */
1496 void
1497 fib_path_destroy (fib_node_index_t path_index)
1498 {
1499     fib_path_t *path;
1500
1501     path = fib_path_get(path_index);
1502
1503     ASSERT(NULL != path);
1504     FIB_PATH_DBG(path, "destroy");
1505
1506     fib_path_unresolve(path);
1507
1508     fib_node_deinit(&path->fp_node);
1509     pool_put(fib_path_pool, path);
1510 }
1511
1512 /*
1513  * fib_path_destroy
1514  *
1515  * destroy a path that is no longer required
1516  */
1517 uword
1518 fib_path_hash (fib_node_index_t path_index)
1519 {
1520     fib_path_t *path;
1521
1522     path = fib_path_get(path_index);
1523
1524     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1525                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1526                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1527                         0));
1528 }
1529
1530 /*
1531  * fib_path_cmp_i
1532  *
1533  * Compare two paths for equivalence.
1534  */
1535 static int
1536 fib_path_cmp_i (const fib_path_t *path1,
1537                 const fib_path_t *path2)
1538 {
1539     int res;
1540
1541     res = 1;
1542
1543     /*
1544      * paths of different types and protocol are not equal.
1545      * different weights and/or preference only are the same path.
1546      */
1547     if (path1->fp_type != path2->fp_type)
1548     {
1549         res = (path1->fp_type - path2->fp_type);
1550     }
1551     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1552     {
1553         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1554     }
1555     else
1556     {
1557         /*
1558          * both paths are of the same type.
1559          * consider each type and its attributes in turn.
1560          */
1561         switch (path1->fp_type)
1562         {
1563         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1564             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1565                                    &path2->attached_next_hop.fp_nh);
1566             if (0 == res) {
1567                 res = (path1->attached_next_hop.fp_interface -
1568                        path2->attached_next_hop.fp_interface);
1569             }
1570             break;
1571         case FIB_PATH_TYPE_ATTACHED:
1572             res = (path1->attached.fp_interface -
1573                    path2->attached.fp_interface);
1574             break;
1575         case FIB_PATH_TYPE_RECURSIVE:
1576             res = ip46_address_cmp(&path1->recursive.fp_nh,
1577                                    &path2->recursive.fp_nh);
1578  
1579             if (0 == res)
1580             {
1581                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1582             }
1583             break;
1584         case FIB_PATH_TYPE_BIER_FMASK:
1585             res = (path1->bier_fmask.fp_bier_fmask -
1586                    path2->bier_fmask.fp_bier_fmask);
1587             break;
1588         case FIB_PATH_TYPE_BIER_IMP:
1589             res = (path1->bier_imp.fp_bier_imp -
1590                    path2->bier_imp.fp_bier_imp);
1591             break;
1592         case FIB_PATH_TYPE_BIER_TABLE:
1593             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1594                                     &path2->bier_table.fp_bier_tbl);
1595             break;
1596         case FIB_PATH_TYPE_DEAG:
1597             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1598             if (0 == res)
1599             {
1600                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1601             }
1602             break;
1603         case FIB_PATH_TYPE_INTF_RX:
1604             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1605             break;
1606         case FIB_PATH_TYPE_UDP_ENCAP:
1607             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1608             break;
1609         case FIB_PATH_TYPE_DVR:
1610             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1611             break;
1612         case FIB_PATH_TYPE_EXCLUSIVE:
1613             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1614             break;
1615         case FIB_PATH_TYPE_SPECIAL:
1616         case FIB_PATH_TYPE_RECEIVE:
1617             res = 0;
1618             break;
1619         }
1620     }
1621     return (res);
1622 }
1623
1624 /*
1625  * fib_path_cmp_for_sort
1626  *
1627  * Compare two paths for equivalence. Used during path sorting.
1628  * As usual 0 means equal.
1629  */
1630 int
1631 fib_path_cmp_for_sort (void * v1,
1632                        void * v2)
1633 {
1634     fib_node_index_t *pi1 = v1, *pi2 = v2;
1635     fib_path_t *path1, *path2;
1636
1637     path1 = fib_path_get(*pi1);
1638     path2 = fib_path_get(*pi2);
1639
1640     /*
1641      * when sorting paths we want the highest preference paths
1642      * first, so that the choices set built is in prefernce order
1643      */
1644     if (path1->fp_preference != path2->fp_preference)
1645     {
1646         return (path1->fp_preference - path2->fp_preference);
1647     }
1648
1649     return (fib_path_cmp_i(path1, path2));
1650 }
1651
1652 /*
1653  * fib_path_cmp
1654  *
1655  * Compare two paths for equivalence.
1656  */
1657 int
1658 fib_path_cmp (fib_node_index_t pi1,
1659               fib_node_index_t pi2)
1660 {
1661     fib_path_t *path1, *path2;
1662
1663     path1 = fib_path_get(pi1);
1664     path2 = fib_path_get(pi2);
1665
1666     return (fib_path_cmp_i(path1, path2));
1667 }
1668
1669 int
1670 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1671                            const fib_route_path_t *rpath)
1672 {
1673     fib_path_t *path;
1674     int res;
1675
1676     path = fib_path_get(path_index);
1677
1678     res = 1;
1679
1680     if (path->fp_weight != rpath->frp_weight)
1681     {
1682         res = (path->fp_weight - rpath->frp_weight);
1683     }
1684     else
1685     {
1686         /*
1687          * both paths are of the same type.
1688          * consider each type and its attributes in turn.
1689          */
1690         switch (path->fp_type)
1691         {
1692         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1693             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1694                                    &rpath->frp_addr);
1695             if (0 == res)
1696             {
1697                 res = (path->attached_next_hop.fp_interface -
1698                        rpath->frp_sw_if_index);
1699             }
1700             break;
1701         case FIB_PATH_TYPE_ATTACHED:
1702             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1703             break;
1704         case FIB_PATH_TYPE_RECURSIVE:
1705             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1706             {
1707                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1708
1709                 if (res == 0)
1710                 {
1711                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1712                 }
1713             }
1714             else
1715             {
1716                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1717                                        &rpath->frp_addr);
1718             }
1719
1720             if (0 == res)
1721             {
1722                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1723             }
1724             break;
1725         case FIB_PATH_TYPE_BIER_FMASK:
1726             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1727             break;
1728         case FIB_PATH_TYPE_BIER_IMP:
1729             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1730             break;
1731         case FIB_PATH_TYPE_BIER_TABLE:
1732             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1733                                     &rpath->frp_bier_tbl);
1734             break;
1735         case FIB_PATH_TYPE_INTF_RX:
1736             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1737             break;
1738         case FIB_PATH_TYPE_UDP_ENCAP:
1739             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1740             break;
1741         case FIB_PATH_TYPE_DEAG:
1742             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1743             if (0 == res)
1744             {
1745                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1746             }
1747             break;
1748         case FIB_PATH_TYPE_DVR:
1749             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1750             break;
1751         case FIB_PATH_TYPE_EXCLUSIVE:
1752             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1753             break;
1754         case FIB_PATH_TYPE_RECEIVE:
1755             if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1756             {
1757                 res = 0;
1758             }
1759             else
1760             {
1761                 res = 1;
1762             }
1763             break;
1764         case FIB_PATH_TYPE_SPECIAL:
1765             res = 0;
1766             break;
1767         }
1768     }
1769     return (res);
1770 }
1771
1772 /*
1773  * fib_path_recursive_loop_detect
1774  *
1775  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1776  * walk is initiated when an entry is linking to a new path list or from an old.
1777  * The entry vector passed contains all the FIB entrys that are children of this
1778  * path (it is all the entries encountered on the walk so far). If this vector
1779  * contains the entry this path resolve via, then a loop is about to form.
1780  * The loop must be allowed to form, since we need the dependencies in place
1781  * so that we can track when the loop breaks.
1782  * However, we MUST not produce a loop in the forwarding graph (else packets
1783  * would loop around the switch path until the loop breaks), so we mark recursive
1784  * paths as looped so that they do not contribute forwarding information.
1785  * By marking the path as looped, an etry such as;
1786  *    X/Y
1787  *     via a.a.a.a (looped)
1788  *     via b.b.b.b (not looped)
1789  * can still forward using the info provided by b.b.b.b only
1790  */
1791 int
1792 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1793                                 fib_node_index_t **entry_indicies)
1794 {
1795     fib_path_t *path;
1796
1797     path = fib_path_get(path_index);
1798
1799     /*
1800      * the forced drop path is never looped, cos it is never resolved.
1801      */
1802     if (fib_path_is_permanent_drop(path))
1803     {
1804         return (0);
1805     }
1806
1807     switch (path->fp_type)
1808     {
1809     case FIB_PATH_TYPE_RECURSIVE:
1810     {
1811         fib_node_index_t *entry_index, *entries;
1812         int looped = 0;
1813         entries = *entry_indicies;
1814
1815         vec_foreach(entry_index, entries) {
1816             if (*entry_index == path->fp_via_fib)
1817             {
1818                 /*
1819                  * the entry that is about to link to this path-list (or
1820                  * one of this path-list's children) is the same entry that
1821                  * this recursive path resolves through. this is a cycle.
1822                  * abort the walk.
1823                  */
1824                 looped = 1;
1825                 break;
1826             }
1827         }
1828
1829         if (looped)
1830         {
1831             FIB_PATH_DBG(path, "recursive loop formed");
1832             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1833
1834             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1835         }
1836         else
1837         {
1838             /*
1839              * no loop here yet. keep forward walking the graph.
1840              */
1841             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1842             {
1843                 FIB_PATH_DBG(path, "recursive loop formed");
1844                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1845             }
1846             else
1847             {
1848                 FIB_PATH_DBG(path, "recursive loop cleared");
1849                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1850             }
1851         }
1852         break;
1853     }
1854     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1855     case FIB_PATH_TYPE_ATTACHED:
1856         if (adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1857                                       entry_indicies))
1858         {
1859             FIB_PATH_DBG(path, "recursive loop formed");
1860             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1861         }
1862         else
1863         {
1864             FIB_PATH_DBG(path, "recursive loop cleared");
1865             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1866         }
1867         break;
1868     case FIB_PATH_TYPE_SPECIAL:
1869     case FIB_PATH_TYPE_DEAG:
1870     case FIB_PATH_TYPE_DVR:
1871     case FIB_PATH_TYPE_RECEIVE:
1872     case FIB_PATH_TYPE_INTF_RX:
1873     case FIB_PATH_TYPE_UDP_ENCAP:
1874     case FIB_PATH_TYPE_EXCLUSIVE:
1875     case FIB_PATH_TYPE_BIER_FMASK:
1876     case FIB_PATH_TYPE_BIER_TABLE:
1877     case FIB_PATH_TYPE_BIER_IMP:
1878         /*
1879          * these path types cannot be part of a loop, since they are the leaves
1880          * of the graph.
1881          */
1882         break;
1883     }
1884
1885     return (fib_path_is_looped(path_index));
1886 }
1887
1888 int
1889 fib_path_resolve (fib_node_index_t path_index)
1890 {
1891     fib_path_t *path;
1892
1893     path = fib_path_get(path_index);
1894
1895     /*
1896      * hope for the best.
1897      */
1898     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1899
1900     /*
1901      * the forced drop path resolves via the drop adj
1902      */
1903     if (fib_path_is_permanent_drop(path))
1904     {
1905         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1906         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1907         return (fib_path_is_resolved(path_index));
1908     }
1909
1910     switch (path->fp_type)
1911     {
1912     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1913         fib_path_attached_next_hop_set(path);
1914         break;
1915     case FIB_PATH_TYPE_ATTACHED:
1916     {
1917         dpo_id_t tmp = DPO_INVALID;
1918
1919         /*
1920          * path->attached.fp_interface
1921          */
1922         if (!vnet_sw_interface_is_up(vnet_get_main(),
1923                                      path->attached.fp_interface))
1924         {
1925             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1926         }
1927         dpo_set(&tmp,
1928                 DPO_ADJACENCY,
1929                 path->fp_nh_proto,
1930                 fib_path_attached_get_adj(path,
1931                                           dpo_proto_to_link(path->fp_nh_proto)));
1932
1933         /*
1934          * re-fetch after possible mem realloc
1935          */
1936         path = fib_path_get(path_index);
1937         dpo_copy(&path->fp_dpo, &tmp);
1938
1939         /*
1940          * become a child of the adjacency so we receive updates
1941          * when the interface state changes
1942          */
1943         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1944                                          FIB_NODE_TYPE_PATH,
1945                                          fib_path_get_index(path));
1946         dpo_reset(&tmp);
1947         break;
1948     }
1949     case FIB_PATH_TYPE_RECURSIVE:
1950     {
1951         /*
1952          * Create a RR source entry in the table for the address
1953          * that this path recurses through.
1954          * This resolve action is recursive, hence we may create
1955          * more paths in the process. more creates mean maybe realloc
1956          * of this path.
1957          */
1958         fib_node_index_t fei;
1959         fib_prefix_t pfx;
1960
1961         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1962
1963         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1964         {
1965             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1966                                        path->recursive.fp_nh.fp_eos,
1967                                        &pfx);
1968         }
1969         else
1970         {
1971             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1972         }
1973
1974         fib_table_lock(path->recursive.fp_tbl_id,
1975                        dpo_proto_to_fib(path->fp_nh_proto),
1976                        FIB_SOURCE_RR);
1977         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1978                                           &pfx,
1979                                           FIB_SOURCE_RR,
1980                                           FIB_ENTRY_FLAG_NONE);
1981
1982         path = fib_path_get(path_index);
1983         path->fp_via_fib = fei;
1984
1985         /*
1986          * become a dependent child of the entry so the path is 
1987          * informed when the forwarding for the entry changes.
1988          */
1989         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1990                                                FIB_NODE_TYPE_PATH,
1991                                                fib_path_get_index(path));
1992
1993         /*
1994          * create and configure the IP DPO
1995          */
1996         fib_path_recursive_adj_update(
1997             path,
1998             fib_path_to_chain_type(path),
1999             &path->fp_dpo);
2000
2001         break;
2002     }
2003     case FIB_PATH_TYPE_BIER_FMASK:
2004     {
2005         /*
2006          * become a dependent child of the entry so the path is
2007          * informed when the forwarding for the entry changes.
2008          */
2009         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
2010                                                 FIB_NODE_TYPE_PATH,
2011                                                 fib_path_get_index(path));
2012
2013         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
2014         fib_path_bier_fmask_update(path, &path->fp_dpo);
2015
2016         break;
2017     }
2018     case FIB_PATH_TYPE_BIER_IMP:
2019         bier_imp_lock(path->bier_imp.fp_bier_imp);
2020         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2021                                        DPO_PROTO_IP4,
2022                                        &path->fp_dpo);
2023         break;
2024     case FIB_PATH_TYPE_BIER_TABLE:
2025     {
2026         /*
2027          * Find/create the BIER table to link to
2028          */
2029         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
2030
2031         path->fp_via_bier_tbl =
2032             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
2033
2034         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
2035                                          &path->fp_dpo);
2036         break;
2037     }
2038     case FIB_PATH_TYPE_SPECIAL:
2039         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2040         {
2041             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2042                                       IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
2043                                       &path->fp_dpo);
2044         }
2045         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2046         {
2047             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2048                                       IP_NULL_ACTION_SEND_ICMP_UNREACH,
2049                                       &path->fp_dpo);
2050         }
2051         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY)
2052         {
2053             dpo_set (&path->fp_dpo, DPO_CLASSIFY,
2054                      path->fp_nh_proto,
2055                      classify_dpo_create (path->fp_nh_proto,
2056                                           path->classify.fp_classify_table_id));
2057         }
2058         else
2059         {
2060             /*
2061              * Resolve via the drop
2062              */
2063             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
2064         }
2065         break;
2066     case FIB_PATH_TYPE_DEAG:
2067     {
2068         if (DPO_PROTO_BIER == path->fp_nh_proto)
2069         {
2070             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2071                                                   &path->fp_dpo);
2072         }
2073         else
2074         {
2075             /*
2076              * Resolve via a lookup DPO.
2077              * FIXME. control plane should add routes with a table ID
2078              */
2079             lookup_input_t input;
2080             lookup_cast_t cast;
2081
2082             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2083                     LOOKUP_MULTICAST :
2084                     LOOKUP_UNICAST);
2085             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2086                      LOOKUP_INPUT_SRC_ADDR :
2087                      LOOKUP_INPUT_DST_ADDR);
2088
2089             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2090                                                path->fp_nh_proto,
2091                                                cast,
2092                                                input,
2093                                                LOOKUP_TABLE_FROM_CONFIG,
2094                                                &path->fp_dpo);
2095         }
2096         break;
2097     }
2098     case FIB_PATH_TYPE_DVR:
2099         dvr_dpo_add_or_lock(path->attached.fp_interface,
2100                             path->fp_nh_proto,
2101                             &path->fp_dpo);
2102         break;
2103     case FIB_PATH_TYPE_RECEIVE:
2104         /*
2105          * Resolve via a receive DPO.
2106          */
2107         receive_dpo_add_or_lock(path->fp_nh_proto,
2108                                 path->receive.fp_interface,
2109                                 &path->receive.fp_addr,
2110                                 &path->fp_dpo);
2111         break;
2112     case FIB_PATH_TYPE_UDP_ENCAP:
2113         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2114         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2115                                         path->fp_nh_proto,
2116                                         &path->fp_dpo);
2117         break;
2118     case FIB_PATH_TYPE_INTF_RX: {
2119         /*
2120          * Resolve via a receive DPO.
2121          */
2122         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2123                                      path->intf_rx.fp_interface,
2124                                      &path->fp_dpo);
2125         break;
2126     }
2127     case FIB_PATH_TYPE_EXCLUSIVE:
2128         /*
2129          * Resolve via the user provided DPO
2130          */
2131         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2132         break;
2133     }
2134
2135     return (fib_path_is_resolved(path_index));
2136 }
2137
2138 u32
2139 fib_path_get_resolving_interface (fib_node_index_t path_index)
2140 {
2141     fib_path_t *path;
2142
2143     path = fib_path_get(path_index);
2144
2145     switch (path->fp_type)
2146     {
2147     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2148         return (path->attached_next_hop.fp_interface);
2149     case FIB_PATH_TYPE_ATTACHED:
2150         return (path->attached.fp_interface);
2151     case FIB_PATH_TYPE_RECEIVE:
2152         return (path->receive.fp_interface);
2153     case FIB_PATH_TYPE_RECURSIVE:
2154         if (fib_path_is_resolved(path_index))
2155         {
2156             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2157         }
2158         break;
2159     case FIB_PATH_TYPE_DVR:
2160         return (path->dvr.fp_interface);
2161     case FIB_PATH_TYPE_INTF_RX:
2162     case FIB_PATH_TYPE_UDP_ENCAP:
2163     case FIB_PATH_TYPE_SPECIAL:
2164     case FIB_PATH_TYPE_DEAG:
2165     case FIB_PATH_TYPE_EXCLUSIVE:
2166     case FIB_PATH_TYPE_BIER_FMASK:
2167     case FIB_PATH_TYPE_BIER_TABLE:
2168     case FIB_PATH_TYPE_BIER_IMP:
2169         break;
2170     }
2171     return (dpo_get_urpf(&path->fp_dpo));
2172 }
2173
2174 index_t
2175 fib_path_get_resolving_index (fib_node_index_t path_index)
2176 {
2177     fib_path_t *path;
2178
2179     path = fib_path_get(path_index);
2180
2181     switch (path->fp_type)
2182     {
2183     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2184     case FIB_PATH_TYPE_ATTACHED:
2185     case FIB_PATH_TYPE_RECEIVE:
2186     case FIB_PATH_TYPE_INTF_RX:
2187     case FIB_PATH_TYPE_SPECIAL:
2188     case FIB_PATH_TYPE_DEAG:
2189     case FIB_PATH_TYPE_DVR:
2190     case FIB_PATH_TYPE_EXCLUSIVE:
2191         break;
2192     case FIB_PATH_TYPE_UDP_ENCAP:
2193         return (path->udp_encap.fp_udp_encap_id);
2194     case FIB_PATH_TYPE_RECURSIVE:
2195         return (path->fp_via_fib);
2196     case FIB_PATH_TYPE_BIER_FMASK:
2197         return (path->bier_fmask.fp_bier_fmask);
2198    case FIB_PATH_TYPE_BIER_TABLE:
2199        return (path->fp_via_bier_tbl);
2200    case FIB_PATH_TYPE_BIER_IMP:
2201        return (path->bier_imp.fp_bier_imp);
2202     }
2203     return (~0);
2204 }
2205
2206 adj_index_t
2207 fib_path_get_adj (fib_node_index_t path_index)
2208 {
2209     fib_path_t *path;
2210
2211     path = fib_path_get(path_index);
2212
2213     ASSERT(dpo_is_adj(&path->fp_dpo));
2214     if (dpo_is_adj(&path->fp_dpo))
2215     {
2216         return (path->fp_dpo.dpoi_index);
2217     }
2218     return (ADJ_INDEX_INVALID);
2219 }
2220
2221 u16
2222 fib_path_get_weight (fib_node_index_t path_index)
2223 {
2224     fib_path_t *path;
2225
2226     path = fib_path_get(path_index);
2227
2228     ASSERT(path);
2229
2230     return (path->fp_weight);
2231 }
2232
2233 u16
2234 fib_path_get_preference (fib_node_index_t path_index)
2235 {
2236     fib_path_t *path;
2237
2238     path = fib_path_get(path_index);
2239
2240     ASSERT(path);
2241
2242     return (path->fp_preference);
2243 }
2244
2245 u32
2246 fib_path_get_rpf_id (fib_node_index_t path_index)
2247 {
2248     fib_path_t *path;
2249
2250     path = fib_path_get(path_index);
2251
2252     ASSERT(path);
2253
2254     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2255     {
2256         return (path->deag.fp_rpf_id);
2257     }
2258
2259     return (~0);
2260 }
2261
2262 /**
2263  * @brief Contribute the path's adjacency to the list passed.
2264  * By calling this function over all paths, recursively, a child
2265  * can construct its full set of forwarding adjacencies, and hence its
2266  * uRPF list.
2267  */
2268 void
2269 fib_path_contribute_urpf (fib_node_index_t path_index,
2270                           index_t urpf)
2271 {
2272     fib_path_t *path;
2273
2274     path = fib_path_get(path_index);
2275
2276     /*
2277      * resolved and unresolved paths contribute to the RPF list.
2278      */
2279     switch (path->fp_type)
2280     {
2281     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2282         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2283         break;
2284
2285     case FIB_PATH_TYPE_ATTACHED:
2286         fib_urpf_list_append(urpf, path->attached.fp_interface);
2287         break;
2288
2289     case FIB_PATH_TYPE_RECURSIVE:
2290         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2291             !fib_path_is_looped(path_index))
2292         {
2293             /*
2294              * there's unresolved due to constraints, and there's unresolved
2295              * due to ain't got no via. can't do nowt w'out via.
2296              */
2297             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2298         }
2299         break;
2300
2301     case FIB_PATH_TYPE_EXCLUSIVE:
2302     case FIB_PATH_TYPE_SPECIAL:
2303     {
2304         /*
2305          * these path types may link to an adj, if that's what
2306          * the clinet gave
2307          */
2308         u32 rpf_sw_if_index;
2309
2310         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2311
2312         if (~0 != rpf_sw_if_index)
2313         {
2314             fib_urpf_list_append(urpf, rpf_sw_if_index);
2315         }
2316         break;
2317     }
2318     case FIB_PATH_TYPE_DVR:
2319         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2320         break;
2321     case FIB_PATH_TYPE_DEAG:
2322     case FIB_PATH_TYPE_RECEIVE:
2323     case FIB_PATH_TYPE_INTF_RX:
2324     case FIB_PATH_TYPE_UDP_ENCAP:
2325     case FIB_PATH_TYPE_BIER_FMASK:
2326     case FIB_PATH_TYPE_BIER_TABLE:
2327     case FIB_PATH_TYPE_BIER_IMP:
2328         /*
2329          * these path types don't link to an adj
2330          */
2331         break;
2332     }
2333 }
2334
2335 void
2336 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2337                           dpo_proto_t payload_proto,
2338                           fib_mpls_lsp_mode_t mode,
2339                           dpo_id_t *dpo)
2340 {
2341     fib_path_t *path;
2342
2343     path = fib_path_get(path_index);
2344
2345     ASSERT(path);
2346
2347     switch (path->fp_type)
2348     {
2349     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2350     {
2351         dpo_id_t tmp = DPO_INVALID;
2352
2353         dpo_copy(&tmp, dpo);
2354
2355         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2356         dpo_reset(&tmp);
2357         break;
2358     }                
2359     case FIB_PATH_TYPE_DEAG:
2360     {
2361         dpo_id_t tmp = DPO_INVALID;
2362
2363         dpo_copy(&tmp, dpo);
2364
2365         mpls_disp_dpo_create(payload_proto,
2366                              path->deag.fp_rpf_id,
2367                              mode, &tmp, dpo);
2368         dpo_reset(&tmp);
2369         break;
2370     }
2371     case FIB_PATH_TYPE_RECEIVE:
2372     case FIB_PATH_TYPE_ATTACHED:
2373     case FIB_PATH_TYPE_RECURSIVE:
2374     case FIB_PATH_TYPE_INTF_RX:
2375     case FIB_PATH_TYPE_UDP_ENCAP:
2376     case FIB_PATH_TYPE_EXCLUSIVE:
2377     case FIB_PATH_TYPE_SPECIAL:
2378     case FIB_PATH_TYPE_BIER_FMASK:
2379     case FIB_PATH_TYPE_BIER_TABLE:
2380     case FIB_PATH_TYPE_BIER_IMP:
2381     case FIB_PATH_TYPE_DVR:
2382         break;
2383     }
2384 }
2385
2386 void
2387 fib_path_contribute_forwarding (fib_node_index_t path_index,
2388                                 fib_forward_chain_type_t fct,
2389                                 dpo_id_t *dpo)
2390 {
2391     fib_path_t *path;
2392
2393     path = fib_path_get(path_index);
2394
2395     ASSERT(path);
2396     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2397
2398     /*
2399      * The DPO stored in the path was created when the path was resolved.
2400      * This then represents the path's 'native' protocol; IP.
2401      * For all others will need to go find something else.
2402      */
2403     if (fib_path_to_chain_type(path) == fct)
2404     {
2405         dpo_copy(dpo, &path->fp_dpo);
2406     }
2407     else
2408     {
2409         switch (path->fp_type)
2410         {
2411         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2412             switch (fct)
2413             {
2414             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2415             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2416             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2417             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2418             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2419             case FIB_FORW_CHAIN_TYPE_NSH:
2420             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2421             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2422             {
2423                 adj_index_t ai;
2424
2425                 /*
2426                  * get a appropriate link type adj.
2427                  */
2428                 ai = fib_path_attached_next_hop_get_adj(
2429                          path,
2430                          fib_forw_chain_type_to_link_type(fct));
2431                 dpo_set(dpo, DPO_ADJACENCY,
2432                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2433                 adj_unlock(ai);
2434
2435                 break;
2436             }
2437             case FIB_FORW_CHAIN_TYPE_BIER:
2438                 break;
2439             }
2440             break;
2441         case FIB_PATH_TYPE_RECURSIVE:
2442             switch (fct)
2443             {
2444             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2445             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2446             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2447             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2448             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2449             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2450             case FIB_FORW_CHAIN_TYPE_BIER:
2451                 fib_path_recursive_adj_update(path, fct, dpo);
2452                 break;
2453             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2454             case FIB_FORW_CHAIN_TYPE_NSH:
2455                 ASSERT(0);
2456                 break;
2457             }
2458             break;
2459         case FIB_PATH_TYPE_BIER_TABLE:
2460             switch (fct)
2461             {
2462             case FIB_FORW_CHAIN_TYPE_BIER:
2463                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2464                 break;
2465             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2466             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2467             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2468             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2469             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2470             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2471             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2472             case FIB_FORW_CHAIN_TYPE_NSH:
2473                 ASSERT(0);
2474                 break;
2475             }
2476             break;
2477         case FIB_PATH_TYPE_BIER_FMASK:
2478             switch (fct)
2479             {
2480             case FIB_FORW_CHAIN_TYPE_BIER:
2481                 fib_path_bier_fmask_update(path, dpo);
2482                 break;
2483             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2484             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2485             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2486             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2487             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2488             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2489             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2490             case FIB_FORW_CHAIN_TYPE_NSH:
2491                 ASSERT(0);
2492                 break;
2493             }
2494             break;
2495         case FIB_PATH_TYPE_BIER_IMP:
2496             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2497                                            fib_forw_chain_type_to_dpo_proto(fct),
2498                                            dpo);
2499             break;
2500         case FIB_PATH_TYPE_DEAG:
2501             switch (fct)
2502             {
2503             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2504                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2505                                                   DPO_PROTO_MPLS,
2506                                                   LOOKUP_UNICAST,
2507                                                   LOOKUP_INPUT_DST_ADDR,
2508                                                   LOOKUP_TABLE_FROM_CONFIG,
2509                                                   dpo);
2510                 break;
2511             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2512             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2513             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2514             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2515             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2516                 dpo_copy(dpo, &path->fp_dpo);
2517                 break;
2518             case FIB_FORW_CHAIN_TYPE_BIER:
2519                 break;
2520             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2521             case FIB_FORW_CHAIN_TYPE_NSH:
2522                 ASSERT(0);
2523                 break;
2524             }
2525             break;
2526         case FIB_PATH_TYPE_EXCLUSIVE:
2527             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2528             break;
2529         case FIB_PATH_TYPE_ATTACHED:
2530             switch (fct)
2531             {
2532             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2533             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2534             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2535             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2536             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2537             case FIB_FORW_CHAIN_TYPE_NSH:
2538             case FIB_FORW_CHAIN_TYPE_BIER:
2539                 {
2540                     adj_index_t ai;
2541
2542                     /*
2543                      * get a appropriate link type adj.
2544                      */
2545                     ai = fib_path_attached_get_adj(
2546                             path,
2547                             fib_forw_chain_type_to_link_type(fct));
2548                     dpo_set(dpo, DPO_ADJACENCY,
2549                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2550                     adj_unlock(ai);
2551                     break;
2552                 }
2553             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2554             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2555                 {
2556                     adj_index_t ai;
2557
2558                     /*
2559                      * Create the adj needed for sending IP multicast traffic
2560                      */
2561                     ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2562                                                fib_forw_chain_type_to_link_type(fct),
2563                                                path->attached.fp_interface);
2564                     dpo_set(dpo, DPO_ADJACENCY,
2565                             fib_forw_chain_type_to_dpo_proto(fct),
2566                             ai);
2567                     adj_unlock(ai);
2568                 }
2569                 break;
2570             }
2571             break;
2572         case FIB_PATH_TYPE_INTF_RX:
2573             /*
2574              * Create the adj needed for sending IP multicast traffic
2575              */
2576             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2577                                          path->attached.fp_interface,
2578                                          dpo);
2579             break;
2580         case FIB_PATH_TYPE_UDP_ENCAP:
2581             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2582                                             path->fp_nh_proto,
2583                                             dpo);
2584             break;
2585         case FIB_PATH_TYPE_RECEIVE:
2586         case FIB_PATH_TYPE_SPECIAL:
2587         case FIB_PATH_TYPE_DVR:
2588             dpo_copy(dpo, &path->fp_dpo);
2589             break;
2590         }
2591     }
2592 }
2593
2594 load_balance_path_t *
2595 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2596                                        fib_forward_chain_type_t fct,
2597                                        load_balance_path_t *hash_key)
2598 {
2599     load_balance_path_t *mnh;
2600     fib_path_t *path;
2601
2602     path = fib_path_get(path_index);
2603
2604     ASSERT(path);
2605
2606     vec_add2(hash_key, mnh, 1);
2607
2608     mnh->path_weight = path->fp_weight;
2609     mnh->path_index = path_index;
2610
2611     if (fib_path_is_resolved(path_index))
2612     {
2613         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2614     }
2615     else
2616     {
2617         dpo_copy(&mnh->path_dpo,
2618                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2619     }
2620     return (hash_key);
2621 }
2622
2623 int
2624 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2625 {
2626     fib_path_t *path;
2627
2628     path = fib_path_get(path_index);
2629
2630     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2631             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2632              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2633 }
2634
2635 int
2636 fib_path_is_exclusive (fib_node_index_t path_index)
2637 {
2638     fib_path_t *path;
2639
2640     path = fib_path_get(path_index);
2641
2642     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2643 }
2644
2645 int
2646 fib_path_is_deag (fib_node_index_t path_index)
2647 {
2648     fib_path_t *path;
2649
2650     path = fib_path_get(path_index);
2651
2652     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2653 }
2654
2655 int
2656 fib_path_is_resolved (fib_node_index_t path_index)
2657 {
2658     fib_path_t *path;
2659
2660     path = fib_path_get(path_index);
2661
2662     return (dpo_id_is_valid(&path->fp_dpo) &&
2663             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2664             !fib_path_is_looped(path_index) &&
2665             !fib_path_is_permanent_drop(path));
2666 }
2667
2668 int
2669 fib_path_is_looped (fib_node_index_t path_index)
2670 {
2671     fib_path_t *path;
2672
2673     path = fib_path_get(path_index);
2674
2675     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2676 }
2677
2678 fib_path_list_walk_rc_t
2679 fib_path_encode (fib_node_index_t path_list_index,
2680                  fib_node_index_t path_index,
2681                  const fib_path_ext_t *path_ext,
2682                  void *args)
2683 {
2684     fib_path_encode_ctx_t *ctx = args;
2685     fib_route_path_t *rpath;
2686     fib_path_t *path;
2687
2688     path = fib_path_get(path_index);
2689     if (!path)
2690       return (FIB_PATH_LIST_WALK_CONTINUE);
2691
2692     vec_add2(ctx->rpaths, rpath, 1);
2693     rpath->frp_weight = path->fp_weight;
2694     rpath->frp_preference = path->fp_preference;
2695     rpath->frp_proto = path->fp_nh_proto;
2696     rpath->frp_sw_if_index = ~0;
2697     rpath->frp_fib_index = 0;
2698
2699     switch (path->fp_type)
2700     {
2701       case FIB_PATH_TYPE_RECEIVE:
2702         rpath->frp_addr = path->receive.fp_addr;
2703         rpath->frp_sw_if_index = path->receive.fp_interface;
2704         rpath->frp_flags |= FIB_ROUTE_PATH_LOCAL;
2705         break;
2706       case FIB_PATH_TYPE_ATTACHED:
2707         rpath->frp_sw_if_index = path->attached.fp_interface;
2708         break;
2709       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2710         rpath->frp_sw_if_index = path->attached_next_hop.fp_interface;
2711         rpath->frp_addr = path->attached_next_hop.fp_nh;
2712         break;
2713       case FIB_PATH_TYPE_BIER_FMASK:
2714         rpath->frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2715         break;
2716       case FIB_PATH_TYPE_SPECIAL:
2717         break;
2718       case FIB_PATH_TYPE_DEAG:
2719         rpath->frp_fib_index = path->deag.fp_tbl_id;
2720         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
2721         {
2722             rpath->frp_flags |= FIB_ROUTE_PATH_RPF_ID;
2723         }
2724         break;
2725       case FIB_PATH_TYPE_RECURSIVE:
2726         rpath->frp_addr = path->recursive.fp_nh.fp_ip;
2727         rpath->frp_fib_index = path->recursive.fp_tbl_id;
2728         break;
2729       case FIB_PATH_TYPE_DVR:
2730           rpath->frp_sw_if_index = path->dvr.fp_interface;
2731           rpath->frp_flags |= FIB_ROUTE_PATH_DVR;
2732           break;
2733       case FIB_PATH_TYPE_UDP_ENCAP:
2734           rpath->frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2735           rpath->frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2736           break;
2737       case FIB_PATH_TYPE_INTF_RX:
2738           rpath->frp_sw_if_index = path->receive.fp_interface;
2739           rpath->frp_flags |= FIB_ROUTE_PATH_INTF_RX;
2740           break;
2741       case FIB_PATH_TYPE_EXCLUSIVE:
2742         rpath->frp_flags |= FIB_ROUTE_PATH_EXCLUSIVE;
2743       default:
2744         break;
2745     }
2746
2747     if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS) 
2748     {
2749         rpath->frp_label_stack = path_ext->fpe_path.frp_label_stack;
2750     }
2751
2752     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP)
2753         rpath->frp_flags |= FIB_ROUTE_PATH_DROP;
2754     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2755         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_UNREACH;
2756     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2757         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_PROHIBIT;
2758
2759     return (FIB_PATH_LIST_WALK_CONTINUE);
2760 }
2761
2762 dpo_proto_t
2763 fib_path_get_proto (fib_node_index_t path_index)
2764 {
2765     fib_path_t *path;
2766
2767     path = fib_path_get(path_index);
2768
2769     return (path->fp_nh_proto);
2770 }
2771
2772 void
2773 fib_path_module_init (void)
2774 {
2775     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2776     fib_path_logger = vlib_log_register_class ("fib", "path");
2777 }
2778
2779 static clib_error_t *
2780 show_fib_path_command (vlib_main_t * vm,
2781                         unformat_input_t * input,
2782                         vlib_cli_command_t * cmd)
2783 {
2784     fib_node_index_t pi;
2785     fib_path_t *path;
2786
2787     if (unformat (input, "%d", &pi))
2788     {
2789         /*
2790          * show one in detail
2791          */
2792         if (!pool_is_free_index(fib_path_pool, pi))
2793         {
2794             path = fib_path_get(pi);
2795             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2796                            FIB_PATH_FORMAT_FLAGS_NONE);
2797             s = format(s, "\n  children:");
2798             s = fib_node_children_format(path->fp_node.fn_children, s);
2799             vlib_cli_output (vm, "%s", s);
2800             vec_free(s);
2801         }
2802         else
2803         {
2804             vlib_cli_output (vm, "path %d invalid", pi);
2805         }
2806     }
2807     else
2808     {
2809         vlib_cli_output (vm, "FIB Paths");
2810         pool_foreach_index (pi, fib_path_pool,
2811         ({
2812             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2813                              FIB_PATH_FORMAT_FLAGS_NONE);
2814         }));
2815     }
2816
2817     return (NULL);
2818 }
2819
2820 VLIB_CLI_COMMAND (show_fib_path, static) = {
2821   .path = "show fib paths",
2822   .function = show_fib_path_command,
2823   .short_help = "show fib paths",
2824 };