67a4bc1d4de118115877da32139edf8883b0a0ed
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/drop_dpo.h>
28
29 #include <vnet/adj/adj.h>
30 #include <vnet/adj/adj_mcast.h>
31
32 #include <vnet/fib/fib_path.h>
33 #include <vnet/fib/fib_node.h>
34 #include <vnet/fib/fib_table.h>
35 #include <vnet/fib/fib_entry.h>
36 #include <vnet/fib/fib_path_list.h>
37 #include <vnet/fib/fib_internal.h>
38 #include <vnet/fib/fib_urpf_list.h>
39 #include <vnet/fib/mpls_fib.h>
40 #include <vnet/fib/fib_path_ext.h>
41 #include <vnet/udp/udp_encap.h>
42 #include <vnet/bier/bier_fmask.h>
43 #include <vnet/bier/bier_table.h>
44 #include <vnet/bier/bier_imp.h>
45 #include <vnet/bier/bier_disp_table.h>
46
47 /**
48  * Enurmeration of path types
49  */
50 typedef enum fib_path_type_t_ {
51     /**
52      * Marker. Add new types after this one.
53      */
54     FIB_PATH_TYPE_FIRST = 0,
55     /**
56      * Attached-nexthop. An interface and a nexthop are known.
57      */
58     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
59     /**
60      * attached. Only the interface is known.
61      */
62     FIB_PATH_TYPE_ATTACHED,
63     /**
64      * recursive. Only the next-hop is known.
65      */
66     FIB_PATH_TYPE_RECURSIVE,
67     /**
68      * special. nothing is known. so we drop.
69      */
70     FIB_PATH_TYPE_SPECIAL,
71     /**
72      * exclusive. user provided adj.
73      */
74     FIB_PATH_TYPE_EXCLUSIVE,
75     /**
76      * deag. Link to a lookup adj in the next table
77      */
78     FIB_PATH_TYPE_DEAG,
79     /**
80      * interface receive.
81      */
82     FIB_PATH_TYPE_INTF_RX,
83     /**
84      * Path resolves via a UDP encap object.
85      */
86     FIB_PATH_TYPE_UDP_ENCAP,
87     /**
88      * receive. it's for-us.
89      */
90     FIB_PATH_TYPE_RECEIVE,
91     /**
92      * bier-imp. it's via a BIER imposition.
93      */
94     FIB_PATH_TYPE_BIER_IMP,
95     /**
96      * bier-fmask. it's via a BIER ECMP-table.
97      */
98     FIB_PATH_TYPE_BIER_TABLE,
99     /**
100      * bier-fmask. it's via a BIER f-mask.
101      */
102     FIB_PATH_TYPE_BIER_FMASK,
103     /**
104      * via a DVR.
105      */
106     FIB_PATH_TYPE_DVR,
107     /**
108      * Marker. Add new types before this one, then update it.
109      */
110     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
111 } __attribute__ ((packed)) fib_path_type_t;
112
113 /**
114  * The maximum number of path_types
115  */
116 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
117
118 #define FIB_PATH_TYPES {                                        \
119     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
120     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
121     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
122     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
123     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
124     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
125     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
126     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
127     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
128     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
129     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
130     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
131     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
132 }
133
134 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
135     for (_item = FIB_PATH_TYPE_FIRST;           \
136          _item <= FIB_PATH_TYPE_LAST;           \
137          _item++)
138
139 /**
140  * Enurmeration of path operational (i.e. derived) attributes
141  */
142 typedef enum fib_path_oper_attribute_t_ {
143     /**
144      * Marker. Add new types after this one.
145      */
146     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
147     /**
148      * The path forms part of a recursive loop.
149      */
150     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
151     /**
152      * The path is resolved
153      */
154     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
155     /**
156      * The path is attached, despite what the next-hop may say.
157      */
158     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
159     /**
160      * The path has become a permanent drop.
161      */
162     FIB_PATH_OPER_ATTRIBUTE_DROP,
163     /**
164      * Marker. Add new types before this one, then update it.
165      */
166     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
167 } __attribute__ ((packed)) fib_path_oper_attribute_t;
168
169 /**
170  * The maximum number of path operational attributes
171  */
172 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
173
174 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
175     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
176     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
177     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
178 }
179
180 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
181     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
182          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
183          _item++)
184
185 /**
186  * Path flags from the attributes
187  */
188 typedef enum fib_path_oper_flags_t_ {
189     FIB_PATH_OPER_FLAG_NONE = 0,
190     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
191     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
192     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
193     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
194 } __attribute__ ((packed)) fib_path_oper_flags_t;
195
196 /**
197  * A FIB path
198  */
199 typedef struct fib_path_t_ {
200     /**
201      * A path is a node in the FIB graph.
202      */
203     fib_node_t fp_node;
204
205     /**
206      * The index of the path-list to which this path belongs
207      */
208     u32 fp_pl_index;
209
210     /**
211      * This marks the start of the memory area used to hash
212      * the path
213      */
214     STRUCT_MARK(path_hash_start);
215
216     /**
217      * Configuration Flags
218      */
219     fib_path_cfg_flags_t fp_cfg_flags;
220
221     /**
222      * The type of the path. This is the selector for the union
223      */
224     fib_path_type_t fp_type;
225
226     /**
227      * The protocol of the next-hop, i.e. the address family of the
228      * next-hop's address. We can't derive this from the address itself
229      * since the address can be all zeros
230      */
231     dpo_proto_t fp_nh_proto;
232
233     /**
234      * UCMP [unnormalised] weigth
235      */
236     u8 fp_weight;
237
238     /**
239      * A path preference. 0 is the best.
240      * Only paths of the best preference, that are 'up', are considered
241      * for forwarding.
242      */
243     u8 fp_preference;
244
245     /**
246      * per-type union of the data required to resolve the path
247      */
248     union {
249         struct {
250             /**
251              * The next-hop
252              */
253             ip46_address_t fp_nh;
254             /**
255              * The interface
256              */
257             u32 fp_interface;
258         } attached_next_hop;
259         struct {
260             /**
261              * The interface
262              */
263             u32 fp_interface;
264         } attached;
265         struct {
266             union
267             {
268                 /**
269                  * The next-hop
270                  */
271                 ip46_address_t fp_ip;
272                 struct {
273                     /**
274                      * The local label to resolve through.
275                      */
276                     mpls_label_t fp_local_label;
277                     /**
278                      * The EOS bit of the resolving label
279                      */
280                     mpls_eos_bit_t fp_eos;
281                 };
282             } fp_nh;
283             union {
284                 /**
285                  * The FIB table index in which to find the next-hop.
286                  */
287                 fib_node_index_t fp_tbl_id;
288                 /**
289                  * The BIER FIB the fmask is in
290                  */
291                 index_t fp_bier_fib;
292             };
293         } recursive;
294         struct {
295             /**
296              * BIER FMask ID
297              */
298             index_t fp_bier_fmask;
299         } bier_fmask;
300         struct {
301             /**
302              * The BIER table's ID
303              */
304             bier_table_id_t fp_bier_tbl;
305         } bier_table;
306         struct {
307             /**
308              * The BIER imposition object
309              * this is part of the path's key, since the index_t
310              * of an imposition object is the object's key.
311              */
312             index_t fp_bier_imp;
313         } bier_imp;
314         struct {
315             /**
316              * The FIB index in which to perfom the next lookup
317              */
318             fib_node_index_t fp_tbl_id;
319             /**
320              * The RPF-ID to tag the packets with
321              */
322             fib_rpf_id_t fp_rpf_id;
323         } deag;
324         struct {
325         } special;
326         struct {
327             /**
328              * The user provided 'exclusive' DPO
329              */
330             dpo_id_t fp_ex_dpo;
331         } exclusive;
332         struct {
333             /**
334              * The interface on which the local address is configured
335              */
336             u32 fp_interface;
337             /**
338              * The next-hop
339              */
340             ip46_address_t fp_addr;
341         } receive;
342         struct {
343             /**
344              * The interface on which the packets will be input.
345              */
346             u32 fp_interface;
347         } intf_rx;
348         struct {
349             /**
350              * The UDP Encap object this path resolves through
351              */
352             u32 fp_udp_encap_id;
353         } udp_encap;
354         struct {
355             /**
356              * The interface
357              */
358             u32 fp_interface;
359         } dvr;
360     };
361     STRUCT_MARK(path_hash_end);
362
363     /**
364      * Memebers in this last section represent information that is
365      * dervied during resolution. It should not be copied to new paths
366      * nor compared.
367      */
368
369     /**
370      * Operational Flags
371      */
372     fib_path_oper_flags_t fp_oper_flags;
373
374     union {
375         /**
376          * the resolving via fib. not part of the union, since it it not part
377          * of the path's hash.
378          */
379         fib_node_index_t fp_via_fib;
380         /**
381          * the resolving bier-table
382          */
383         index_t fp_via_bier_tbl;
384         /**
385          * the resolving bier-fmask
386          */
387         index_t fp_via_bier_fmask;
388     };
389
390     /**
391      * The Data-path objects through which this path resolves for IP.
392      */
393     dpo_id_t fp_dpo;
394
395     /**
396      * the index of this path in the parent's child list.
397      */
398     u32 fp_sibling;
399 } fib_path_t;
400
401 /*
402  * Array of strings/names for the path types and attributes
403  */
404 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
405 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
406 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
407
408 /*
409  * The memory pool from which we allocate all the paths
410  */
411 static fib_path_t *fib_path_pool;
412
413 /**
414  * the logger
415  */
416 vlib_log_class_t fib_path_logger;
417
418 /*
419  * Debug macro
420  */
421 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
422 {                                                                       \
423     vlib_log_debug (fib_path_logger,                                    \
424                     "[%U]: " _fmt,                                      \
425                     format_fib_path, fib_path_get_index(_p), 0,         \
426                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
427                     ##_args);                                           \
428 }
429
430 static fib_path_t *
431 fib_path_get (fib_node_index_t index)
432 {
433     return (pool_elt_at_index(fib_path_pool, index));
434 }
435
436 static fib_node_index_t 
437 fib_path_get_index (fib_path_t *path)
438 {
439     return (path - fib_path_pool);
440 }
441
442 static fib_node_t *
443 fib_path_get_node (fib_node_index_t index)
444 {
445     return ((fib_node_t*)fib_path_get(index));
446 }
447
448 static fib_path_t*
449 fib_path_from_fib_node (fib_node_t *node)
450 {
451     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
452     return ((fib_path_t*)node);
453 }
454
455 u8 *
456 format_fib_path (u8 * s, va_list * args)
457 {
458     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
459     u32 indent = va_arg (*args, u32);
460     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
461     vnet_main_t * vnm = vnet_get_main();
462     fib_path_oper_attribute_t oattr;
463     fib_path_cfg_attribute_t cattr;
464     fib_path_t *path;
465     const char *eol;
466
467     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
468     {
469         eol = "";
470     }
471     else
472     {
473         eol = "\n";
474     }
475
476     path = fib_path_get(path_index);
477
478     s = format (s, "%Upath:[%d] ", format_white_space, indent,
479                 fib_path_get_index(path));
480     s = format (s, "pl-index:%d ", path->fp_pl_index);
481     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
482     s = format (s, "weight=%d ", path->fp_weight);
483     s = format (s, "pref=%d ", path->fp_preference);
484     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
485     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
486         s = format(s, " oper-flags:");
487         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
488             if ((1<<oattr) & path->fp_oper_flags) {
489                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
490             }
491         }
492     }
493     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
494         s = format(s, " cfg-flags:");
495         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
496             if ((1<<cattr) & path->fp_cfg_flags) {
497                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
498             }
499         }
500     }
501     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
502         s = format(s, "\n%U", format_white_space, indent+2);
503
504     switch (path->fp_type)
505     {
506     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
507         s = format (s, "%U", format_ip46_address,
508                     &path->attached_next_hop.fp_nh,
509                     IP46_TYPE_ANY);
510         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
511         {
512             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
513         }
514         else
515         {
516             s = format (s, " %U",
517                         format_vnet_sw_interface_name,
518                         vnm,
519                         vnet_get_sw_interface(
520                             vnm,
521                             path->attached_next_hop.fp_interface));
522             if (vnet_sw_interface_is_p2p(vnet_get_main(),
523                                          path->attached_next_hop.fp_interface))
524             {
525                 s = format (s, " (p2p)");
526             }
527         }
528         if (!dpo_id_is_valid(&path->fp_dpo))
529         {
530             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
531         }
532         else
533         {
534             s = format(s, "%s%U%U", eol,
535                        format_white_space, indent,
536                        format_dpo_id,
537                        &path->fp_dpo, 13);
538         }
539         break;
540     case FIB_PATH_TYPE_ATTACHED:
541         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
542         {
543             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
544         }
545         else
546         {
547             s = format (s, " %U",
548                         format_vnet_sw_interface_name,
549                         vnm,
550                         vnet_get_sw_interface(
551                             vnm,
552                             path->attached.fp_interface));
553         }
554         break;
555     case FIB_PATH_TYPE_RECURSIVE:
556         if (DPO_PROTO_MPLS == path->fp_nh_proto)
557         {
558             s = format (s, "via %U %U",
559                         format_mpls_unicast_label,
560                         path->recursive.fp_nh.fp_local_label,
561                         format_mpls_eos_bit,
562                         path->recursive.fp_nh.fp_eos);
563         }
564         else
565         {
566             s = format (s, "via %U",
567                         format_ip46_address,
568                         &path->recursive.fp_nh.fp_ip,
569                         IP46_TYPE_ANY);
570         }
571         s = format (s, " in fib:%d",
572                     path->recursive.fp_tbl_id,
573                     path->fp_via_fib); 
574         s = format (s, " via-fib:%d", path->fp_via_fib); 
575         s = format (s, " via-dpo:[%U:%d]",
576                     format_dpo_type, path->fp_dpo.dpoi_type, 
577                     path->fp_dpo.dpoi_index);
578
579         break;
580     case FIB_PATH_TYPE_UDP_ENCAP:
581         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
582         break;
583     case FIB_PATH_TYPE_BIER_TABLE:
584         s = format (s, "via bier-table:[%U}",
585                     format_bier_table_id,
586                     &path->bier_table.fp_bier_tbl);
587         s = format (s, " via-dpo:[%U:%d]",
588                     format_dpo_type, path->fp_dpo.dpoi_type,
589                     path->fp_dpo.dpoi_index);
590         break;
591     case FIB_PATH_TYPE_BIER_FMASK:
592         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
593         s = format (s, " via-dpo:[%U:%d]",
594                     format_dpo_type, path->fp_dpo.dpoi_type, 
595                     path->fp_dpo.dpoi_index);
596         break;
597     case FIB_PATH_TYPE_BIER_IMP:
598         s = format (s, "via %U", format_bier_imp,
599                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
600         break;
601     case FIB_PATH_TYPE_DVR:
602         s = format (s, " %U",
603                     format_vnet_sw_interface_name,
604                     vnm,
605                     vnet_get_sw_interface(
606                         vnm,
607                         path->dvr.fp_interface));
608         break;
609     case FIB_PATH_TYPE_DEAG:
610         s = format (s, " %sfib-index:%d",
611                     (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?  "m" : ""),
612                     path->deag.fp_tbl_id);
613         break;
614     case FIB_PATH_TYPE_RECEIVE:
615     case FIB_PATH_TYPE_INTF_RX:
616     case FIB_PATH_TYPE_SPECIAL:
617     case FIB_PATH_TYPE_EXCLUSIVE:
618         if (dpo_id_is_valid(&path->fp_dpo))
619         {
620             s = format(s, "%U", format_dpo_id,
621                        &path->fp_dpo, indent+2);
622         }
623         break;
624     }
625     return (s);
626 }
627
628 /*
629  * fib_path_last_lock_gone
630  *
631  * We don't share paths, we share path lists, so the [un]lock functions
632  * are no-ops
633  */
634 static void
635 fib_path_last_lock_gone (fib_node_t *node)
636 {
637     ASSERT(0);
638 }
639
640 static const adj_index_t
641 fib_path_attached_next_hop_get_adj (fib_path_t *path,
642                                     vnet_link_t link)
643 {
644     if (vnet_sw_interface_is_p2p(vnet_get_main(),
645                                  path->attached_next_hop.fp_interface))
646     {
647         /*
648          * if the interface is p2p then the adj for the specific
649          * neighbour on that link will never exist. on p2p links
650          * the subnet address (the attached route) links to the
651          * auto-adj (see below), we want that adj here too.
652          */
653         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
654                                     link,
655                                     &zero_addr,
656                                     path->attached_next_hop.fp_interface));
657     }
658     else
659     {
660         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
661                                     link,
662                                     &path->attached_next_hop.fp_nh,
663                                     path->attached_next_hop.fp_interface));
664     }
665 }
666
667 static void
668 fib_path_attached_next_hop_set (fib_path_t *path)
669 {
670     /*
671      * resolve directly via the adjacnecy discribed by the
672      * interface and next-hop
673      */
674     dpo_set(&path->fp_dpo,
675             DPO_ADJACENCY,
676             path->fp_nh_proto,
677             fib_path_attached_next_hop_get_adj(
678                  path,
679                  dpo_proto_to_link(path->fp_nh_proto)));
680
681     /*
682      * become a child of the adjacency so we receive updates
683      * when its rewrite changes
684      */
685     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
686                                      FIB_NODE_TYPE_PATH,
687                                      fib_path_get_index(path));
688
689     if (!vnet_sw_interface_is_up(vnet_get_main(),
690                                  path->attached_next_hop.fp_interface) ||
691         !adj_is_up(path->fp_dpo.dpoi_index))
692     {
693         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
694     }
695 }
696
697 static const adj_index_t
698 fib_path_attached_get_adj (fib_path_t *path,
699                            vnet_link_t link)
700 {
701     if (vnet_sw_interface_is_p2p(vnet_get_main(),
702                                  path->attached.fp_interface))
703     {
704         /*
705          * point-2-point interfaces do not require a glean, since
706          * there is nothing to ARP. Install a rewrite/nbr adj instead
707          */
708         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
709                                     link,
710                                     &zero_addr,
711                                     path->attached.fp_interface));
712     }
713     else
714     {
715         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
716                                       link,
717                                       path->attached.fp_interface,
718                                       NULL));
719     }
720 }
721
722 /*
723  * create of update the paths recursive adj
724  */
725 static void
726 fib_path_recursive_adj_update (fib_path_t *path,
727                                fib_forward_chain_type_t fct,
728                                dpo_id_t *dpo)
729 {
730     dpo_id_t via_dpo = DPO_INVALID;
731
732     /*
733      * get the DPO to resolve through from the via-entry
734      */
735     fib_entry_contribute_forwarding(path->fp_via_fib,
736                                     fct,
737                                     &via_dpo);
738
739
740     /*
741      * hope for the best - clear if restrictions apply.
742      */
743     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
744
745     /*
746      * Validate any recursion constraints and over-ride the via
747      * adj if not met
748      */
749     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
750     {
751         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
752         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
753     }
754     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
755     {
756         /*
757          * the via FIB must be a host route.
758          * note the via FIB just added will always be a host route
759          * since it is an RR source added host route. So what we need to
760          * check is whether the route has other sources. If it does then
761          * some other source has added it as a host route. If it doesn't
762          * then it was added only here and inherits forwarding from a cover.
763          * the cover is not a host route.
764          * The RR source is the lowest priority source, so we check if it
765          * is the best. if it is there are no other sources.
766          */
767         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
768         {
769             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
770             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
771
772             /*
773              * PIC edge trigger. let the load-balance maps know
774              */
775             load_balance_map_path_state_change(fib_path_get_index(path));
776         }
777     }
778     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
779     {
780         /*
781          * RR source entries inherit the flags from the cover, so
782          * we can check the via directly
783          */
784         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
785         {
786             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
787             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
788
789             /*
790              * PIC edge trigger. let the load-balance maps know
791              */
792             load_balance_map_path_state_change(fib_path_get_index(path));
793         }
794     }
795     /*
796      * check for over-riding factors on the FIB entry itself
797      */
798     if (!fib_entry_is_resolved(path->fp_via_fib))
799     {
800         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
801         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
802
803         /*
804          * PIC edge trigger. let the load-balance maps know
805          */
806         load_balance_map_path_state_change(fib_path_get_index(path));
807     }
808
809     /*
810      * If this path is contributing a drop, then it's not resolved
811      */
812     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
813     {
814         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
815     }
816
817     /*
818      * update the path's contributed DPO
819      */
820     dpo_copy(dpo, &via_dpo);
821
822     FIB_PATH_DBG(path, "recursive update:");
823
824     dpo_reset(&via_dpo);
825 }
826
827 /*
828  * re-evaulate the forwarding state for a via fmask path
829  */
830 static void
831 fib_path_bier_fmask_update (fib_path_t *path,
832                             dpo_id_t *dpo)
833 {
834     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
835
836     /*
837      * if we are stakcing on the drop, then the path is not resolved
838      */
839     if (dpo_is_drop(dpo))
840     {
841         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
842     }
843     else
844     {
845         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
846     }
847 }
848
849 /*
850  * fib_path_is_permanent_drop
851  *
852  * Return !0 if the path is configured to permanently drop,
853  * despite other attributes.
854  */
855 static int
856 fib_path_is_permanent_drop (fib_path_t *path)
857 {
858     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
859             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
860 }
861
862 /*
863  * fib_path_unresolve
864  *
865  * Remove our dependency on the resolution target
866  */
867 static void
868 fib_path_unresolve (fib_path_t *path)
869 {
870     /*
871      * the forced drop path does not need unresolving
872      */
873     if (fib_path_is_permanent_drop(path))
874     {
875         return;
876     }
877
878     switch (path->fp_type)
879     {
880     case FIB_PATH_TYPE_RECURSIVE:
881         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
882         {
883             fib_entry_child_remove(path->fp_via_fib,
884                                    path->fp_sibling);
885             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
886                                            fib_entry_get_prefix(path->fp_via_fib),
887                                            FIB_SOURCE_RR);
888             fib_table_unlock(path->recursive.fp_tbl_id,
889                              dpo_proto_to_fib(path->fp_nh_proto),
890                              FIB_SOURCE_RR);
891             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
892         }
893         break;
894     case FIB_PATH_TYPE_BIER_FMASK:
895         bier_fmask_child_remove(path->fp_via_bier_fmask,
896                                 path->fp_sibling);
897         break;
898     case FIB_PATH_TYPE_BIER_IMP:
899         bier_imp_unlock(path->fp_dpo.dpoi_index);
900         break;
901     case FIB_PATH_TYPE_BIER_TABLE:
902         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
903         break;
904     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
905         adj_child_remove(path->fp_dpo.dpoi_index,
906                          path->fp_sibling);
907         adj_unlock(path->fp_dpo.dpoi_index);
908         break;
909     case FIB_PATH_TYPE_ATTACHED:
910         adj_child_remove(path->fp_dpo.dpoi_index,
911                          path->fp_sibling);
912         adj_unlock(path->fp_dpo.dpoi_index);
913         break;
914     case FIB_PATH_TYPE_UDP_ENCAP:
915         udp_encap_unlock(path->fp_dpo.dpoi_index);
916         break;
917     case FIB_PATH_TYPE_EXCLUSIVE:
918         dpo_reset(&path->exclusive.fp_ex_dpo);
919         break;
920     case FIB_PATH_TYPE_SPECIAL:
921     case FIB_PATH_TYPE_RECEIVE:
922     case FIB_PATH_TYPE_INTF_RX:
923     case FIB_PATH_TYPE_DEAG:
924     case FIB_PATH_TYPE_DVR:
925         /*
926          * these hold only the path's DPO, which is reset below.
927          */
928         break;
929     }
930
931     /*
932      * release the adj we were holding and pick up the
933      * drop just in case.
934      */
935     dpo_reset(&path->fp_dpo);
936     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
937
938     return;
939 }
940
941 static fib_forward_chain_type_t
942 fib_path_to_chain_type (const fib_path_t *path)
943 {
944     if (DPO_PROTO_MPLS == path->fp_nh_proto)
945     {
946         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
947             MPLS_EOS == path->recursive.fp_nh.fp_eos)
948         {
949             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
950         }
951         else
952         {
953             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
954         }
955     }
956     else
957     {
958         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
959     }
960 }
961
962 /*
963  * fib_path_back_walk_notify
964  *
965  * A back walk has reach this path.
966  */
967 static fib_node_back_walk_rc_t
968 fib_path_back_walk_notify (fib_node_t *node,
969                            fib_node_back_walk_ctx_t *ctx)
970 {
971     fib_path_t *path;
972
973     path = fib_path_from_fib_node(node);
974
975     FIB_PATH_DBG(path, "bw:%U",
976                  format_fib_node_bw_reason, ctx->fnbw_reason);
977
978     switch (path->fp_type)
979     {
980     case FIB_PATH_TYPE_RECURSIVE:
981         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
982         {
983             /*
984              * modify the recursive adjacency to use the new forwarding
985              * of the via-fib.
986              * this update is visible to packets in flight in the DP.
987              */
988             fib_path_recursive_adj_update(
989                 path,
990                 fib_path_to_chain_type(path),
991                 &path->fp_dpo);
992         }
993         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
994             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
995         {
996             /*
997              * ADJ updates (complete<->incomplete) do not need to propagate to
998              * recursive entries.
999              * The only reason its needed as far back as here, is that the adj
1000              * and the incomplete adj are a different DPO type, so the LBs need
1001              * to re-stack.
1002              * If this walk was quashed in the fib_entry, then any non-fib_path
1003              * children (like tunnels that collapse out the LB when they stack)
1004              * would not see the update.
1005              */
1006             return (FIB_NODE_BACK_WALK_CONTINUE);
1007         }
1008         break;
1009     case FIB_PATH_TYPE_BIER_FMASK:
1010         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1011         {
1012             /*
1013              * update to use the BIER fmask's new forwading
1014              */
1015             fib_path_bier_fmask_update(path, &path->fp_dpo);
1016         }
1017         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1018             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1019         {
1020             /*
1021              * ADJ updates (complete<->incomplete) do not need to propagate to
1022              * recursive entries.
1023              * The only reason its needed as far back as here, is that the adj
1024              * and the incomplete adj are a different DPO type, so the LBs need
1025              * to re-stack.
1026              * If this walk was quashed in the fib_entry, then any non-fib_path
1027              * children (like tunnels that collapse out the LB when they stack)
1028              * would not see the update.
1029              */
1030             return (FIB_NODE_BACK_WALK_CONTINUE);
1031         }
1032         break;
1033     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1034         /*
1035 FIXME comment
1036          * ADJ_UPDATE backwalk pass silently through here and up to
1037          * the path-list when the multipath adj collapse occurs.
1038          * The reason we do this is that the assumtption is that VPP
1039          * runs in an environment where the Control-Plane is remote
1040          * and hence reacts slowly to link up down. In order to remove
1041          * this down link from the ECMP set quickly, we back-walk.
1042          * VPP also has dedicated CPUs, so we are not stealing resources
1043          * from the CP to do so.
1044          */
1045         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1046         {
1047             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1048             {
1049                 /*
1050                  * alreday resolved. no need to walk back again
1051                  */
1052                 return (FIB_NODE_BACK_WALK_CONTINUE);
1053             }
1054             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1055         }
1056         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1057         {
1058             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1059             {
1060                 /*
1061                  * alreday unresolved. no need to walk back again
1062                  */
1063                 return (FIB_NODE_BACK_WALK_CONTINUE);
1064             }
1065             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1066         }
1067         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1068         {
1069             /*
1070              * The interface this path resolves through has been deleted.
1071              * This will leave the path in a permanent drop state. The route
1072              * needs to be removed and readded (and hence the path-list deleted)
1073              * before it can forward again.
1074              */
1075             fib_path_unresolve(path);
1076             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1077         }
1078         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1079         {
1080             /*
1081              * restack the DPO to pick up the correct DPO sub-type
1082              */
1083             uword if_is_up;
1084             adj_index_t ai;
1085
1086             if_is_up = vnet_sw_interface_is_up(
1087                            vnet_get_main(),
1088                            path->attached_next_hop.fp_interface);
1089
1090             ai = fib_path_attached_next_hop_get_adj(
1091                      path,
1092                      dpo_proto_to_link(path->fp_nh_proto));
1093
1094             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1095             if (if_is_up && adj_is_up(ai))
1096             {
1097                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1098             }
1099
1100             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1101             adj_unlock(ai);
1102
1103             if (!if_is_up)
1104             {
1105                 /*
1106                  * If the interface is not up there is no reason to walk
1107                  * back to children. if we did they would only evalute
1108                  * that this path is unresolved and hence it would
1109                  * not contribute the adjacency - so it would be wasted
1110                  * CPU time.
1111                  */
1112                 return (FIB_NODE_BACK_WALK_CONTINUE);
1113             }
1114         }
1115         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1116         {
1117             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1118             {
1119                 /*
1120                  * alreday unresolved. no need to walk back again
1121                  */
1122                 return (FIB_NODE_BACK_WALK_CONTINUE);
1123             }
1124             /*
1125              * the adj has gone down. the path is no longer resolved.
1126              */
1127             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1128         }
1129         break;
1130     case FIB_PATH_TYPE_ATTACHED:
1131     case FIB_PATH_TYPE_DVR:
1132         /*
1133          * FIXME; this could schedule a lower priority walk, since attached
1134          * routes are not usually in ECMP configurations so the backwalk to
1135          * the FIB entry does not need to be high priority
1136          */
1137         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1138         {
1139             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1140         }
1141         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1142         {
1143             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1144         }
1145         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1146         {
1147             fib_path_unresolve(path);
1148             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1149         }
1150         break;
1151     case FIB_PATH_TYPE_UDP_ENCAP:
1152     {
1153         dpo_id_t via_dpo = DPO_INVALID;
1154
1155         /*
1156          * hope for the best - clear if restrictions apply.
1157          */
1158         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1159
1160         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1161                                         path->fp_nh_proto,
1162                                         &via_dpo);
1163         /*
1164          * If this path is contributing a drop, then it's not resolved
1165          */
1166         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1167         {
1168             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1169         }
1170
1171         /*
1172          * update the path's contributed DPO
1173          */
1174         dpo_copy(&path->fp_dpo, &via_dpo);
1175         dpo_reset(&via_dpo);
1176         break;
1177     }
1178     case FIB_PATH_TYPE_INTF_RX:
1179         ASSERT(0);
1180     case FIB_PATH_TYPE_DEAG:
1181         /*
1182          * FIXME When VRF delete is allowed this will need a poke.
1183          */
1184     case FIB_PATH_TYPE_SPECIAL:
1185     case FIB_PATH_TYPE_RECEIVE:
1186     case FIB_PATH_TYPE_EXCLUSIVE:
1187     case FIB_PATH_TYPE_BIER_TABLE:
1188     case FIB_PATH_TYPE_BIER_IMP:
1189         /*
1190          * these path types have no parents. so to be
1191          * walked from one is unexpected.
1192          */
1193         ASSERT(0);
1194         break;
1195     }
1196
1197     /*
1198      * propagate the backwalk further to the path-list
1199      */
1200     fib_path_list_back_walk(path->fp_pl_index, ctx);
1201
1202     return (FIB_NODE_BACK_WALK_CONTINUE);
1203 }
1204
1205 static void
1206 fib_path_memory_show (void)
1207 {
1208     fib_show_memory_usage("Path",
1209                           pool_elts(fib_path_pool),
1210                           pool_len(fib_path_pool),
1211                           sizeof(fib_path_t));
1212 }
1213
1214 /*
1215  * The FIB path's graph node virtual function table
1216  */
1217 static const fib_node_vft_t fib_path_vft = {
1218     .fnv_get = fib_path_get_node,
1219     .fnv_last_lock = fib_path_last_lock_gone,
1220     .fnv_back_walk = fib_path_back_walk_notify,
1221     .fnv_mem_show = fib_path_memory_show,
1222 };
1223
1224 static fib_path_cfg_flags_t
1225 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1226 {
1227     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1228
1229     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1230         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1231     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1232         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1233     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1234         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1235     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1236         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1237     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1238         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1239     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1240         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1241     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1242         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1243     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1244         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1245     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1246         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1247
1248     return (cfg_flags);
1249 }
1250
1251 /*
1252  * fib_path_create
1253  *
1254  * Create and initialise a new path object.
1255  * return the index of the path.
1256  */
1257 fib_node_index_t
1258 fib_path_create (fib_node_index_t pl_index,
1259                  const fib_route_path_t *rpath)
1260 {
1261     fib_path_t *path;
1262
1263     pool_get(fib_path_pool, path);
1264     clib_memset(path, 0, sizeof(*path));
1265
1266     fib_node_init(&path->fp_node,
1267                   FIB_NODE_TYPE_PATH);
1268
1269     dpo_reset(&path->fp_dpo);
1270     path->fp_pl_index = pl_index;
1271     path->fp_nh_proto = rpath->frp_proto;
1272     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1273     path->fp_weight = rpath->frp_weight;
1274     if (0 == path->fp_weight)
1275     {
1276         /*
1277          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1278          * clients to always use 1, or we can accept it and fixup approrpiately.
1279          */
1280         path->fp_weight = 1;
1281     }
1282     path->fp_preference = rpath->frp_preference;
1283     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1284
1285     /*
1286      * deduce the path's tpye from the parementers and save what is needed.
1287      */
1288     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1289     {
1290         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1291         path->receive.fp_interface = rpath->frp_sw_if_index;
1292         path->receive.fp_addr = rpath->frp_addr;
1293     }
1294     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1295     {
1296         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1297         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1298     }
1299     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1300     {
1301         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1302         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1303     }
1304     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1305     {
1306         path->fp_type = FIB_PATH_TYPE_DEAG;
1307         path->deag.fp_tbl_id = rpath->frp_fib_index;
1308         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1309     }
1310     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1311     {
1312         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1313         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1314     }
1315     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1316     {
1317         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1318         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1319     }
1320     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1321     {
1322         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1323         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1324     }
1325     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1326     {
1327         path->fp_type = FIB_PATH_TYPE_DEAG;
1328         path->deag.fp_tbl_id = rpath->frp_fib_index;
1329     }
1330     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1331     {
1332         path->fp_type = FIB_PATH_TYPE_DVR;
1333         path->dvr.fp_interface = rpath->frp_sw_if_index;
1334     }
1335     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1336     {
1337         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1338         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1339     }
1340     else if (~0 != rpath->frp_sw_if_index)
1341     {
1342         if (ip46_address_is_zero(&rpath->frp_addr))
1343         {
1344             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1345             path->attached.fp_interface = rpath->frp_sw_if_index;
1346         }
1347         else
1348         {
1349             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1350             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1351             path->attached_next_hop.fp_nh = rpath->frp_addr;
1352         }
1353     }
1354     else
1355     {
1356         if (ip46_address_is_zero(&rpath->frp_addr))
1357         {
1358             if (~0 == rpath->frp_fib_index)
1359             {
1360                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1361             }
1362             else
1363             {
1364                 path->fp_type = FIB_PATH_TYPE_DEAG;
1365                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1366                 path->deag.fp_rpf_id = ~0;
1367             }
1368         }
1369         else
1370         {
1371             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1372             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1373             {
1374                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1375                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1376             }
1377             else
1378             {
1379                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1380             }
1381             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1382         }
1383     }
1384
1385     FIB_PATH_DBG(path, "create");
1386
1387     return (fib_path_get_index(path));
1388 }
1389
1390 /*
1391  * fib_path_create_special
1392  *
1393  * Create and initialise a new path object.
1394  * return the index of the path.
1395  */
1396 fib_node_index_t
1397 fib_path_create_special (fib_node_index_t pl_index,
1398                          dpo_proto_t nh_proto,
1399                          fib_path_cfg_flags_t flags,
1400                          const dpo_id_t *dpo)
1401 {
1402     fib_path_t *path;
1403
1404     pool_get(fib_path_pool, path);
1405     clib_memset(path, 0, sizeof(*path));
1406
1407     fib_node_init(&path->fp_node,
1408                   FIB_NODE_TYPE_PATH);
1409     dpo_reset(&path->fp_dpo);
1410
1411     path->fp_pl_index = pl_index;
1412     path->fp_weight = 1;
1413     path->fp_preference = 0;
1414     path->fp_nh_proto = nh_proto;
1415     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1416     path->fp_cfg_flags = flags;
1417
1418     if (FIB_PATH_CFG_FLAG_DROP & flags)
1419     {
1420         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1421     }
1422     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1423     {
1424         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1425         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1426     }
1427     else
1428     {
1429         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1430         ASSERT(NULL != dpo);
1431         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1432     }
1433
1434     return (fib_path_get_index(path));
1435 }
1436
1437 /*
1438  * fib_path_copy
1439  *
1440  * Copy a path. return index of new path.
1441  */
1442 fib_node_index_t
1443 fib_path_copy (fib_node_index_t path_index,
1444                fib_node_index_t path_list_index)
1445 {
1446     fib_path_t *path, *orig_path;
1447
1448     pool_get(fib_path_pool, path);
1449
1450     orig_path = fib_path_get(path_index);
1451     ASSERT(NULL != orig_path);
1452
1453     memcpy(path, orig_path, sizeof(*path));
1454
1455     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1456
1457     /*
1458      * reset the dynamic section
1459      */
1460     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1461     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1462     path->fp_pl_index  = path_list_index;
1463     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1464     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1465     dpo_reset(&path->fp_dpo);
1466
1467     return (fib_path_get_index(path));
1468 }
1469
1470 /*
1471  * fib_path_destroy
1472  *
1473  * destroy a path that is no longer required
1474  */
1475 void
1476 fib_path_destroy (fib_node_index_t path_index)
1477 {
1478     fib_path_t *path;
1479
1480     path = fib_path_get(path_index);
1481
1482     ASSERT(NULL != path);
1483     FIB_PATH_DBG(path, "destroy");
1484
1485     fib_path_unresolve(path);
1486
1487     fib_node_deinit(&path->fp_node);
1488     pool_put(fib_path_pool, path);
1489 }
1490
1491 /*
1492  * fib_path_destroy
1493  *
1494  * destroy a path that is no longer required
1495  */
1496 uword
1497 fib_path_hash (fib_node_index_t path_index)
1498 {
1499     fib_path_t *path;
1500
1501     path = fib_path_get(path_index);
1502
1503     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1504                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1505                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1506                         0));
1507 }
1508
1509 /*
1510  * fib_path_cmp_i
1511  *
1512  * Compare two paths for equivalence.
1513  */
1514 static int
1515 fib_path_cmp_i (const fib_path_t *path1,
1516                 const fib_path_t *path2)
1517 {
1518     int res;
1519
1520     res = 1;
1521
1522     /*
1523      * paths of different types and protocol are not equal.
1524      * different weights and/or preference only are the same path.
1525      */
1526     if (path1->fp_type != path2->fp_type)
1527     {
1528         res = (path1->fp_type - path2->fp_type);
1529     }
1530     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1531     {
1532         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1533     }
1534     else
1535     {
1536         /*
1537          * both paths are of the same type.
1538          * consider each type and its attributes in turn.
1539          */
1540         switch (path1->fp_type)
1541         {
1542         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1543             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1544                                    &path2->attached_next_hop.fp_nh);
1545             if (0 == res) {
1546                 res = (path1->attached_next_hop.fp_interface -
1547                        path2->attached_next_hop.fp_interface);
1548             }
1549             break;
1550         case FIB_PATH_TYPE_ATTACHED:
1551             res = (path1->attached.fp_interface -
1552                    path2->attached.fp_interface);
1553             break;
1554         case FIB_PATH_TYPE_RECURSIVE:
1555             res = ip46_address_cmp(&path1->recursive.fp_nh,
1556                                    &path2->recursive.fp_nh);
1557  
1558             if (0 == res)
1559             {
1560                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1561             }
1562             break;
1563         case FIB_PATH_TYPE_BIER_FMASK:
1564             res = (path1->bier_fmask.fp_bier_fmask -
1565                    path2->bier_fmask.fp_bier_fmask);
1566             break;
1567         case FIB_PATH_TYPE_BIER_IMP:
1568             res = (path1->bier_imp.fp_bier_imp -
1569                    path2->bier_imp.fp_bier_imp);
1570             break;
1571         case FIB_PATH_TYPE_BIER_TABLE:
1572             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1573                                     &path2->bier_table.fp_bier_tbl);
1574             break;
1575         case FIB_PATH_TYPE_DEAG:
1576             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1577             if (0 == res)
1578             {
1579                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1580             }
1581             break;
1582         case FIB_PATH_TYPE_INTF_RX:
1583             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1584             break;
1585         case FIB_PATH_TYPE_UDP_ENCAP:
1586             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1587             break;
1588         case FIB_PATH_TYPE_DVR:
1589             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1590             break;
1591         case FIB_PATH_TYPE_EXCLUSIVE:
1592             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1593             break;
1594         case FIB_PATH_TYPE_SPECIAL:
1595         case FIB_PATH_TYPE_RECEIVE:
1596             res = 0;
1597             break;
1598         }
1599     }
1600     return (res);
1601 }
1602
1603 /*
1604  * fib_path_cmp_for_sort
1605  *
1606  * Compare two paths for equivalence. Used during path sorting.
1607  * As usual 0 means equal.
1608  */
1609 int
1610 fib_path_cmp_for_sort (void * v1,
1611                        void * v2)
1612 {
1613     fib_node_index_t *pi1 = v1, *pi2 = v2;
1614     fib_path_t *path1, *path2;
1615
1616     path1 = fib_path_get(*pi1);
1617     path2 = fib_path_get(*pi2);
1618
1619     /*
1620      * when sorting paths we want the highest preference paths
1621      * first, so that the choices set built is in prefernce order
1622      */
1623     if (path1->fp_preference != path2->fp_preference)
1624     {
1625         return (path1->fp_preference - path2->fp_preference);
1626     }
1627
1628     return (fib_path_cmp_i(path1, path2));
1629 }
1630
1631 /*
1632  * fib_path_cmp
1633  *
1634  * Compare two paths for equivalence.
1635  */
1636 int
1637 fib_path_cmp (fib_node_index_t pi1,
1638               fib_node_index_t pi2)
1639 {
1640     fib_path_t *path1, *path2;
1641
1642     path1 = fib_path_get(pi1);
1643     path2 = fib_path_get(pi2);
1644
1645     return (fib_path_cmp_i(path1, path2));
1646 }
1647
1648 int
1649 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1650                            const fib_route_path_t *rpath)
1651 {
1652     fib_path_t *path;
1653     int res;
1654
1655     path = fib_path_get(path_index);
1656
1657     res = 1;
1658
1659     if (path->fp_weight != rpath->frp_weight)
1660     {
1661         res = (path->fp_weight - rpath->frp_weight);
1662     }
1663     else
1664     {
1665         /*
1666          * both paths are of the same type.
1667          * consider each type and its attributes in turn.
1668          */
1669         switch (path->fp_type)
1670         {
1671         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1672             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1673                                    &rpath->frp_addr);
1674             if (0 == res)
1675             {
1676                 res = (path->attached_next_hop.fp_interface -
1677                        rpath->frp_sw_if_index);
1678             }
1679             break;
1680         case FIB_PATH_TYPE_ATTACHED:
1681             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1682             break;
1683         case FIB_PATH_TYPE_RECURSIVE:
1684             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1685             {
1686                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1687
1688                 if (res == 0)
1689                 {
1690                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1691                 }
1692             }
1693             else
1694             {
1695                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1696                                        &rpath->frp_addr);
1697             }
1698
1699             if (0 == res)
1700             {
1701                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1702             }
1703             break;
1704         case FIB_PATH_TYPE_BIER_FMASK:
1705             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1706             break;
1707         case FIB_PATH_TYPE_BIER_IMP:
1708             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1709             break;
1710         case FIB_PATH_TYPE_BIER_TABLE:
1711             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1712                                     &rpath->frp_bier_tbl);
1713             break;
1714         case FIB_PATH_TYPE_INTF_RX:
1715             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1716             break;
1717         case FIB_PATH_TYPE_UDP_ENCAP:
1718             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1719             break;
1720         case FIB_PATH_TYPE_DEAG:
1721             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1722             if (0 == res)
1723             {
1724                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1725             }
1726             break;
1727         case FIB_PATH_TYPE_DVR:
1728             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1729             break;
1730         case FIB_PATH_TYPE_EXCLUSIVE:
1731             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1732             break;
1733         case FIB_PATH_TYPE_SPECIAL:
1734         case FIB_PATH_TYPE_RECEIVE:
1735             res = 0;
1736             break;
1737         }
1738     }
1739     return (res);
1740 }
1741
1742 /*
1743  * fib_path_recursive_loop_detect
1744  *
1745  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1746  * walk is initiated when an entry is linking to a new path list or from an old.
1747  * The entry vector passed contains all the FIB entrys that are children of this
1748  * path (it is all the entries encountered on the walk so far). If this vector
1749  * contains the entry this path resolve via, then a loop is about to form.
1750  * The loop must be allowed to form, since we need the dependencies in place
1751  * so that we can track when the loop breaks.
1752  * However, we MUST not produce a loop in the forwarding graph (else packets
1753  * would loop around the switch path until the loop breaks), so we mark recursive
1754  * paths as looped so that they do not contribute forwarding information.
1755  * By marking the path as looped, an etry such as;
1756  *    X/Y
1757  *     via a.a.a.a (looped)
1758  *     via b.b.b.b (not looped)
1759  * can still forward using the info provided by b.b.b.b only
1760  */
1761 int
1762 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1763                                 fib_node_index_t **entry_indicies)
1764 {
1765     fib_path_t *path;
1766
1767     path = fib_path_get(path_index);
1768
1769     /*
1770      * the forced drop path is never looped, cos it is never resolved.
1771      */
1772     if (fib_path_is_permanent_drop(path))
1773     {
1774         return (0);
1775     }
1776
1777     switch (path->fp_type)
1778     {
1779     case FIB_PATH_TYPE_RECURSIVE:
1780     {
1781         fib_node_index_t *entry_index, *entries;
1782         int looped = 0;
1783         entries = *entry_indicies;
1784
1785         vec_foreach(entry_index, entries) {
1786             if (*entry_index == path->fp_via_fib)
1787             {
1788                 /*
1789                  * the entry that is about to link to this path-list (or
1790                  * one of this path-list's children) is the same entry that
1791                  * this recursive path resolves through. this is a cycle.
1792                  * abort the walk.
1793                  */
1794                 looped = 1;
1795                 break;
1796             }
1797         }
1798
1799         if (looped)
1800         {
1801             FIB_PATH_DBG(path, "recursive loop formed");
1802             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1803
1804             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1805         }
1806         else
1807         {
1808             /*
1809              * no loop here yet. keep forward walking the graph.
1810              */
1811             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1812             {
1813                 FIB_PATH_DBG(path, "recursive loop formed");
1814                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1815             }
1816             else
1817             {
1818                 FIB_PATH_DBG(path, "recursive loop cleared");
1819                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1820             }
1821         }
1822         break;
1823     }
1824     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1825     case FIB_PATH_TYPE_ATTACHED:
1826         if (adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1827                                       entry_indicies))
1828         {
1829             FIB_PATH_DBG(path, "recursive loop formed");
1830             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1831         }
1832         else
1833         {
1834             FIB_PATH_DBG(path, "recursive loop cleared");
1835             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1836         }
1837         break;
1838     case FIB_PATH_TYPE_SPECIAL:
1839     case FIB_PATH_TYPE_DEAG:
1840     case FIB_PATH_TYPE_DVR:
1841     case FIB_PATH_TYPE_RECEIVE:
1842     case FIB_PATH_TYPE_INTF_RX:
1843     case FIB_PATH_TYPE_UDP_ENCAP:
1844     case FIB_PATH_TYPE_EXCLUSIVE:
1845     case FIB_PATH_TYPE_BIER_FMASK:
1846     case FIB_PATH_TYPE_BIER_TABLE:
1847     case FIB_PATH_TYPE_BIER_IMP:
1848         /*
1849          * these path types cannot be part of a loop, since they are the leaves
1850          * of the graph.
1851          */
1852         break;
1853     }
1854
1855     return (fib_path_is_looped(path_index));
1856 }
1857
1858 int
1859 fib_path_resolve (fib_node_index_t path_index)
1860 {
1861     fib_path_t *path;
1862
1863     path = fib_path_get(path_index);
1864
1865     /*
1866      * hope for the best.
1867      */
1868     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1869
1870     /*
1871      * the forced drop path resolves via the drop adj
1872      */
1873     if (fib_path_is_permanent_drop(path))
1874     {
1875         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1876         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1877         return (fib_path_is_resolved(path_index));
1878     }
1879
1880     switch (path->fp_type)
1881     {
1882     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1883         fib_path_attached_next_hop_set(path);
1884         break;
1885     case FIB_PATH_TYPE_ATTACHED:
1886     {
1887         dpo_id_t tmp = DPO_INVALID;
1888
1889         /*
1890          * path->attached.fp_interface
1891          */
1892         if (!vnet_sw_interface_is_up(vnet_get_main(),
1893                                      path->attached.fp_interface))
1894         {
1895             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1896         }
1897         dpo_set(&tmp,
1898                 DPO_ADJACENCY,
1899                 path->fp_nh_proto,
1900                 fib_path_attached_get_adj(path,
1901                                           dpo_proto_to_link(path->fp_nh_proto)));
1902
1903         /*
1904          * re-fetch after possible mem realloc
1905          */
1906         path = fib_path_get(path_index);
1907         dpo_copy(&path->fp_dpo, &tmp);
1908
1909         /*
1910          * become a child of the adjacency so we receive updates
1911          * when the interface state changes
1912          */
1913         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1914                                          FIB_NODE_TYPE_PATH,
1915                                          fib_path_get_index(path));
1916         dpo_reset(&tmp);
1917         break;
1918     }
1919     case FIB_PATH_TYPE_RECURSIVE:
1920     {
1921         /*
1922          * Create a RR source entry in the table for the address
1923          * that this path recurses through.
1924          * This resolve action is recursive, hence we may create
1925          * more paths in the process. more creates mean maybe realloc
1926          * of this path.
1927          */
1928         fib_node_index_t fei;
1929         fib_prefix_t pfx;
1930
1931         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1932
1933         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1934         {
1935             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1936                                        path->recursive.fp_nh.fp_eos,
1937                                        &pfx);
1938         }
1939         else
1940         {
1941             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1942         }
1943
1944         fib_table_lock(path->recursive.fp_tbl_id,
1945                        dpo_proto_to_fib(path->fp_nh_proto),
1946                        FIB_SOURCE_RR);
1947         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1948                                           &pfx,
1949                                           FIB_SOURCE_RR,
1950                                           FIB_ENTRY_FLAG_NONE);
1951
1952         path = fib_path_get(path_index);
1953         path->fp_via_fib = fei;
1954
1955         /*
1956          * become a dependent child of the entry so the path is 
1957          * informed when the forwarding for the entry changes.
1958          */
1959         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1960                                                FIB_NODE_TYPE_PATH,
1961                                                fib_path_get_index(path));
1962
1963         /*
1964          * create and configure the IP DPO
1965          */
1966         fib_path_recursive_adj_update(
1967             path,
1968             fib_path_to_chain_type(path),
1969             &path->fp_dpo);
1970
1971         break;
1972     }
1973     case FIB_PATH_TYPE_BIER_FMASK:
1974     {
1975         /*
1976          * become a dependent child of the entry so the path is
1977          * informed when the forwarding for the entry changes.
1978          */
1979         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
1980                                                 FIB_NODE_TYPE_PATH,
1981                                                 fib_path_get_index(path));
1982
1983         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
1984         fib_path_bier_fmask_update(path, &path->fp_dpo);
1985
1986         break;
1987     }
1988     case FIB_PATH_TYPE_BIER_IMP:
1989         bier_imp_lock(path->bier_imp.fp_bier_imp);
1990         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
1991                                        DPO_PROTO_IP4,
1992                                        &path->fp_dpo);
1993         break;
1994     case FIB_PATH_TYPE_BIER_TABLE:
1995     {
1996         /*
1997          * Find/create the BIER table to link to
1998          */
1999         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
2000
2001         path->fp_via_bier_tbl =
2002             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
2003
2004         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
2005                                          &path->fp_dpo);
2006         break;
2007     }
2008     case FIB_PATH_TYPE_SPECIAL:
2009         /*
2010          * Resolve via the drop
2011          */
2012         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
2013         break;
2014     case FIB_PATH_TYPE_DEAG:
2015     {
2016         if (DPO_PROTO_BIER == path->fp_nh_proto)
2017         {
2018             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2019                                                   &path->fp_dpo);
2020         }
2021         else
2022         {
2023             /*
2024              * Resolve via a lookup DPO.
2025              * FIXME. control plane should add routes with a table ID
2026              */
2027             lookup_input_t input;
2028             lookup_cast_t cast;
2029
2030             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2031                     LOOKUP_MULTICAST :
2032                     LOOKUP_UNICAST);
2033             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2034                      LOOKUP_INPUT_SRC_ADDR :
2035                      LOOKUP_INPUT_DST_ADDR);
2036
2037             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2038                                                path->fp_nh_proto,
2039                                                cast,
2040                                                input,
2041                                                LOOKUP_TABLE_FROM_CONFIG,
2042                                                &path->fp_dpo);
2043         }
2044         break;
2045     }
2046     case FIB_PATH_TYPE_DVR:
2047         dvr_dpo_add_or_lock(path->attached.fp_interface,
2048                             path->fp_nh_proto,
2049                             &path->fp_dpo);
2050         break;
2051     case FIB_PATH_TYPE_RECEIVE:
2052         /*
2053          * Resolve via a receive DPO.
2054          */
2055         receive_dpo_add_or_lock(path->fp_nh_proto,
2056                                 path->receive.fp_interface,
2057                                 &path->receive.fp_addr,
2058                                 &path->fp_dpo);
2059         break;
2060     case FIB_PATH_TYPE_UDP_ENCAP:
2061         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2062         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2063                                         path->fp_nh_proto,
2064                                         &path->fp_dpo);
2065         break;
2066     case FIB_PATH_TYPE_INTF_RX: {
2067         /*
2068          * Resolve via a receive DPO.
2069          */
2070         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2071                                      path->intf_rx.fp_interface,
2072                                      &path->fp_dpo);
2073         break;
2074     }
2075     case FIB_PATH_TYPE_EXCLUSIVE:
2076         /*
2077          * Resolve via the user provided DPO
2078          */
2079         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2080         break;
2081     }
2082
2083     return (fib_path_is_resolved(path_index));
2084 }
2085
2086 u32
2087 fib_path_get_resolving_interface (fib_node_index_t path_index)
2088 {
2089     fib_path_t *path;
2090
2091     path = fib_path_get(path_index);
2092
2093     switch (path->fp_type)
2094     {
2095     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2096         return (path->attached_next_hop.fp_interface);
2097     case FIB_PATH_TYPE_ATTACHED:
2098         return (path->attached.fp_interface);
2099     case FIB_PATH_TYPE_RECEIVE:
2100         return (path->receive.fp_interface);
2101     case FIB_PATH_TYPE_RECURSIVE:
2102         if (fib_path_is_resolved(path_index))
2103         {
2104             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2105         }
2106         break;
2107     case FIB_PATH_TYPE_DVR:
2108         return (path->dvr.fp_interface);
2109     case FIB_PATH_TYPE_INTF_RX:
2110     case FIB_PATH_TYPE_UDP_ENCAP:
2111     case FIB_PATH_TYPE_SPECIAL:
2112     case FIB_PATH_TYPE_DEAG:
2113     case FIB_PATH_TYPE_EXCLUSIVE:
2114     case FIB_PATH_TYPE_BIER_FMASK:
2115     case FIB_PATH_TYPE_BIER_TABLE:
2116     case FIB_PATH_TYPE_BIER_IMP:
2117         break;
2118     }
2119     return (dpo_get_urpf(&path->fp_dpo));
2120 }
2121
2122 index_t
2123 fib_path_get_resolving_index (fib_node_index_t path_index)
2124 {
2125     fib_path_t *path;
2126
2127     path = fib_path_get(path_index);
2128
2129     switch (path->fp_type)
2130     {
2131     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2132     case FIB_PATH_TYPE_ATTACHED:
2133     case FIB_PATH_TYPE_RECEIVE:
2134     case FIB_PATH_TYPE_INTF_RX:
2135     case FIB_PATH_TYPE_SPECIAL:
2136     case FIB_PATH_TYPE_DEAG:
2137     case FIB_PATH_TYPE_DVR:
2138     case FIB_PATH_TYPE_EXCLUSIVE:
2139         break;
2140     case FIB_PATH_TYPE_UDP_ENCAP:
2141         return (path->udp_encap.fp_udp_encap_id);
2142     case FIB_PATH_TYPE_RECURSIVE:
2143         return (path->fp_via_fib);
2144     case FIB_PATH_TYPE_BIER_FMASK:
2145         return (path->bier_fmask.fp_bier_fmask);
2146    case FIB_PATH_TYPE_BIER_TABLE:
2147        return (path->fp_via_bier_tbl);
2148    case FIB_PATH_TYPE_BIER_IMP:
2149        return (path->bier_imp.fp_bier_imp);
2150     }
2151     return (~0);
2152 }
2153
2154 adj_index_t
2155 fib_path_get_adj (fib_node_index_t path_index)
2156 {
2157     fib_path_t *path;
2158
2159     path = fib_path_get(path_index);
2160
2161     ASSERT(dpo_is_adj(&path->fp_dpo));
2162     if (dpo_is_adj(&path->fp_dpo))
2163     {
2164         return (path->fp_dpo.dpoi_index);
2165     }
2166     return (ADJ_INDEX_INVALID);
2167 }
2168
2169 u16
2170 fib_path_get_weight (fib_node_index_t path_index)
2171 {
2172     fib_path_t *path;
2173
2174     path = fib_path_get(path_index);
2175
2176     ASSERT(path);
2177
2178     return (path->fp_weight);
2179 }
2180
2181 u16
2182 fib_path_get_preference (fib_node_index_t path_index)
2183 {
2184     fib_path_t *path;
2185
2186     path = fib_path_get(path_index);
2187
2188     ASSERT(path);
2189
2190     return (path->fp_preference);
2191 }
2192
2193 u32
2194 fib_path_get_rpf_id (fib_node_index_t path_index)
2195 {
2196     fib_path_t *path;
2197
2198     path = fib_path_get(path_index);
2199
2200     ASSERT(path);
2201
2202     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2203     {
2204         return (path->deag.fp_rpf_id);
2205     }
2206
2207     return (~0);
2208 }
2209
2210 /**
2211  * @brief Contribute the path's adjacency to the list passed.
2212  * By calling this function over all paths, recursively, a child
2213  * can construct its full set of forwarding adjacencies, and hence its
2214  * uRPF list.
2215  */
2216 void
2217 fib_path_contribute_urpf (fib_node_index_t path_index,
2218                           index_t urpf)
2219 {
2220     fib_path_t *path;
2221
2222     path = fib_path_get(path_index);
2223
2224     /*
2225      * resolved and unresolved paths contribute to the RPF list.
2226      */
2227     switch (path->fp_type)
2228     {
2229     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2230         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2231         break;
2232
2233     case FIB_PATH_TYPE_ATTACHED:
2234         fib_urpf_list_append(urpf, path->attached.fp_interface);
2235         break;
2236
2237     case FIB_PATH_TYPE_RECURSIVE:
2238         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2239             !fib_path_is_looped(path_index))
2240         {
2241             /*
2242              * there's unresolved due to constraints, and there's unresolved
2243              * due to ain't got no via. can't do nowt w'out via.
2244              */
2245             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2246         }
2247         break;
2248
2249     case FIB_PATH_TYPE_EXCLUSIVE:
2250     case FIB_PATH_TYPE_SPECIAL:
2251     {
2252         /*
2253          * these path types may link to an adj, if that's what
2254          * the clinet gave
2255          */
2256         u32 rpf_sw_if_index;
2257
2258         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2259
2260         if (~0 != rpf_sw_if_index)
2261         {
2262             fib_urpf_list_append(urpf, rpf_sw_if_index);
2263         }
2264         break;
2265     }
2266     case FIB_PATH_TYPE_DVR:
2267         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2268         break;
2269     case FIB_PATH_TYPE_DEAG:
2270     case FIB_PATH_TYPE_RECEIVE:
2271     case FIB_PATH_TYPE_INTF_RX:
2272     case FIB_PATH_TYPE_UDP_ENCAP:
2273     case FIB_PATH_TYPE_BIER_FMASK:
2274     case FIB_PATH_TYPE_BIER_TABLE:
2275     case FIB_PATH_TYPE_BIER_IMP:
2276         /*
2277          * these path types don't link to an adj
2278          */
2279         break;
2280     }
2281 }
2282
2283 void
2284 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2285                           dpo_proto_t payload_proto,
2286                           fib_mpls_lsp_mode_t mode,
2287                           dpo_id_t *dpo)
2288 {
2289     fib_path_t *path;
2290
2291     path = fib_path_get(path_index);
2292
2293     ASSERT(path);
2294
2295     switch (path->fp_type)
2296     {
2297     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2298     {
2299         dpo_id_t tmp = DPO_INVALID;
2300
2301         dpo_copy(&tmp, dpo);
2302
2303         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2304         dpo_reset(&tmp);
2305         break;
2306     }                
2307     case FIB_PATH_TYPE_DEAG:
2308     {
2309         dpo_id_t tmp = DPO_INVALID;
2310
2311         dpo_copy(&tmp, dpo);
2312
2313         mpls_disp_dpo_create(payload_proto,
2314                              path->deag.fp_rpf_id,
2315                              mode, &tmp, dpo);
2316         dpo_reset(&tmp);
2317         break;
2318     }
2319     case FIB_PATH_TYPE_RECEIVE:
2320     case FIB_PATH_TYPE_ATTACHED:
2321     case FIB_PATH_TYPE_RECURSIVE:
2322     case FIB_PATH_TYPE_INTF_RX:
2323     case FIB_PATH_TYPE_UDP_ENCAP:
2324     case FIB_PATH_TYPE_EXCLUSIVE:
2325     case FIB_PATH_TYPE_SPECIAL:
2326     case FIB_PATH_TYPE_BIER_FMASK:
2327     case FIB_PATH_TYPE_BIER_TABLE:
2328     case FIB_PATH_TYPE_BIER_IMP:
2329     case FIB_PATH_TYPE_DVR:
2330         break;
2331     }
2332 }
2333
2334 void
2335 fib_path_contribute_forwarding (fib_node_index_t path_index,
2336                                 fib_forward_chain_type_t fct,
2337                                 dpo_id_t *dpo)
2338 {
2339     fib_path_t *path;
2340
2341     path = fib_path_get(path_index);
2342
2343     ASSERT(path);
2344     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2345
2346     /*
2347      * The DPO stored in the path was created when the path was resolved.
2348      * This then represents the path's 'native' protocol; IP.
2349      * For all others will need to go find something else.
2350      */
2351     if (fib_path_to_chain_type(path) == fct)
2352     {
2353         dpo_copy(dpo, &path->fp_dpo);
2354     }
2355     else
2356     {
2357         switch (path->fp_type)
2358         {
2359         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2360             switch (fct)
2361             {
2362             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2363             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2364             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2365             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2366             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2367             case FIB_FORW_CHAIN_TYPE_NSH:
2368             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2369             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2370             {
2371                 adj_index_t ai;
2372
2373                 /*
2374                  * get a appropriate link type adj.
2375                  */
2376                 ai = fib_path_attached_next_hop_get_adj(
2377                          path,
2378                          fib_forw_chain_type_to_link_type(fct));
2379                 dpo_set(dpo, DPO_ADJACENCY,
2380                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2381                 adj_unlock(ai);
2382
2383                 break;
2384             }
2385             case FIB_FORW_CHAIN_TYPE_BIER:
2386                 break;
2387             }
2388             break;
2389         case FIB_PATH_TYPE_RECURSIVE:
2390             switch (fct)
2391             {
2392             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2393             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2394             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2395             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2396             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2397             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2398             case FIB_FORW_CHAIN_TYPE_BIER:
2399                 fib_path_recursive_adj_update(path, fct, dpo);
2400                 break;
2401             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2402             case FIB_FORW_CHAIN_TYPE_NSH:
2403                 ASSERT(0);
2404                 break;
2405             }
2406             break;
2407         case FIB_PATH_TYPE_BIER_TABLE:
2408             switch (fct)
2409             {
2410             case FIB_FORW_CHAIN_TYPE_BIER:
2411                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2412                 break;
2413             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2414             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2415             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2416             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2417             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2418             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2419             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2420             case FIB_FORW_CHAIN_TYPE_NSH:
2421                 ASSERT(0);
2422                 break;
2423             }
2424             break;
2425         case FIB_PATH_TYPE_BIER_FMASK:
2426             switch (fct)
2427             {
2428             case FIB_FORW_CHAIN_TYPE_BIER:
2429                 fib_path_bier_fmask_update(path, dpo);
2430                 break;
2431             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2432             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2433             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2434             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2435             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2436             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2437             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2438             case FIB_FORW_CHAIN_TYPE_NSH:
2439                 ASSERT(0);
2440                 break;
2441             }
2442             break;
2443         case FIB_PATH_TYPE_BIER_IMP:
2444             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2445                                            fib_forw_chain_type_to_dpo_proto(fct),
2446                                            dpo);
2447             break;
2448         case FIB_PATH_TYPE_DEAG:
2449             switch (fct)
2450             {
2451             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2452                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2453                                                   DPO_PROTO_MPLS,
2454                                                   LOOKUP_UNICAST,
2455                                                   LOOKUP_INPUT_DST_ADDR,
2456                                                   LOOKUP_TABLE_FROM_CONFIG,
2457                                                   dpo);
2458                 break;
2459             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2460             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2461             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2462                 dpo_copy(dpo, &path->fp_dpo);
2463                 break;
2464             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2465             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2466             case FIB_FORW_CHAIN_TYPE_BIER:
2467                 break;
2468             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2469             case FIB_FORW_CHAIN_TYPE_NSH:
2470                 ASSERT(0);
2471                 break;
2472             }
2473             break;
2474         case FIB_PATH_TYPE_EXCLUSIVE:
2475             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2476             break;
2477         case FIB_PATH_TYPE_ATTACHED:
2478             switch (fct)
2479             {
2480             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2481             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2482             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2483             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2484             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2485             case FIB_FORW_CHAIN_TYPE_NSH:
2486             case FIB_FORW_CHAIN_TYPE_BIER:
2487                 {
2488                     adj_index_t ai;
2489
2490                     /*
2491                      * get a appropriate link type adj.
2492                      */
2493                     ai = fib_path_attached_get_adj(
2494                             path,
2495                             fib_forw_chain_type_to_link_type(fct));
2496                     dpo_set(dpo, DPO_ADJACENCY,
2497                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2498                     adj_unlock(ai);
2499                     break;
2500                 }
2501             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2502             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2503                 {
2504                     adj_index_t ai;
2505
2506                     /*
2507                      * Create the adj needed for sending IP multicast traffic
2508                      */
2509                     ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2510                                                fib_forw_chain_type_to_link_type(fct),
2511                                                path->attached.fp_interface);
2512                     dpo_set(dpo, DPO_ADJACENCY,
2513                             fib_forw_chain_type_to_dpo_proto(fct),
2514                             ai);
2515                     adj_unlock(ai);
2516                 }
2517                 break;
2518             }
2519             break;
2520         case FIB_PATH_TYPE_INTF_RX:
2521             /*
2522              * Create the adj needed for sending IP multicast traffic
2523              */
2524             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2525                                          path->attached.fp_interface,
2526                                          dpo);
2527             break;
2528         case FIB_PATH_TYPE_UDP_ENCAP:
2529             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2530                                             path->fp_nh_proto,
2531                                             dpo);
2532             break;
2533         case FIB_PATH_TYPE_RECEIVE:
2534         case FIB_PATH_TYPE_SPECIAL:
2535         case FIB_PATH_TYPE_DVR:
2536             dpo_copy(dpo, &path->fp_dpo);
2537             break;
2538         }
2539     }
2540 }
2541
2542 load_balance_path_t *
2543 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2544                                        fib_forward_chain_type_t fct,
2545                                        load_balance_path_t *hash_key)
2546 {
2547     load_balance_path_t *mnh;
2548     fib_path_t *path;
2549
2550     path = fib_path_get(path_index);
2551
2552     ASSERT(path);
2553
2554     vec_add2(hash_key, mnh, 1);
2555
2556     mnh->path_weight = path->fp_weight;
2557     mnh->path_index = path_index;
2558
2559     if (fib_path_is_resolved(path_index))
2560     {
2561         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2562     }
2563     else
2564     {
2565         dpo_copy(&mnh->path_dpo,
2566                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2567     }
2568     return (hash_key);
2569 }
2570
2571 int
2572 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2573 {
2574     fib_path_t *path;
2575
2576     path = fib_path_get(path_index);
2577
2578     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2579             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2580              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2581 }
2582
2583 int
2584 fib_path_is_exclusive (fib_node_index_t path_index)
2585 {
2586     fib_path_t *path;
2587
2588     path = fib_path_get(path_index);
2589
2590     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2591 }
2592
2593 int
2594 fib_path_is_deag (fib_node_index_t path_index)
2595 {
2596     fib_path_t *path;
2597
2598     path = fib_path_get(path_index);
2599
2600     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2601 }
2602
2603 int
2604 fib_path_is_resolved (fib_node_index_t path_index)
2605 {
2606     fib_path_t *path;
2607
2608     path = fib_path_get(path_index);
2609
2610     return (dpo_id_is_valid(&path->fp_dpo) &&
2611             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2612             !fib_path_is_looped(path_index) &&
2613             !fib_path_is_permanent_drop(path));
2614 }
2615
2616 int
2617 fib_path_is_looped (fib_node_index_t path_index)
2618 {
2619     fib_path_t *path;
2620
2621     path = fib_path_get(path_index);
2622
2623     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2624 }
2625
2626 fib_path_list_walk_rc_t
2627 fib_path_encode (fib_node_index_t path_list_index,
2628                  fib_node_index_t path_index,
2629                  const fib_path_ext_t *path_ext,
2630                  void *ctx)
2631 {
2632     fib_route_path_encode_t **api_rpaths = ctx;
2633     fib_route_path_encode_t *api_rpath;
2634     fib_path_t *path;
2635
2636     path = fib_path_get(path_index);
2637     if (!path)
2638       return (FIB_PATH_LIST_WALK_CONTINUE);
2639     vec_add2(*api_rpaths, api_rpath, 1);
2640     api_rpath->rpath.frp_weight = path->fp_weight;
2641     api_rpath->rpath.frp_preference = path->fp_preference;
2642     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2643     api_rpath->rpath.frp_sw_if_index = ~0;
2644     api_rpath->rpath.frp_fib_index = 0;
2645     api_rpath->dpo = path->fp_dpo;
2646
2647     switch (path->fp_type)
2648     {
2649       case FIB_PATH_TYPE_RECEIVE:
2650         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2651         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2652         break;
2653       case FIB_PATH_TYPE_ATTACHED:
2654         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2655         break;
2656       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2657         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2658         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2659         break;
2660       case FIB_PATH_TYPE_BIER_FMASK:
2661         api_rpath->rpath.frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2662         break;
2663       case FIB_PATH_TYPE_SPECIAL:
2664         break;
2665       case FIB_PATH_TYPE_DEAG:
2666         api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2667         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
2668         {
2669             api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_RPF_ID;
2670         }
2671         break;
2672       case FIB_PATH_TYPE_RECURSIVE:
2673         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2674         api_rpath->rpath.frp_fib_index = path->recursive.fp_tbl_id;
2675         break;
2676       case FIB_PATH_TYPE_DVR:
2677           api_rpath->rpath.frp_sw_if_index = path->dvr.fp_interface;
2678           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_DVR;
2679           break;
2680       case FIB_PATH_TYPE_UDP_ENCAP:
2681           api_rpath->rpath.frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2682           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2683           break;
2684       case FIB_PATH_TYPE_INTF_RX:
2685           api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2686           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_INTF_RX;
2687           break;
2688       default:
2689         break;
2690     }
2691
2692     if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS) 
2693     {
2694         api_rpath->rpath.frp_label_stack = path_ext->fpe_path.frp_label_stack;
2695     }
2696
2697     return (FIB_PATH_LIST_WALK_CONTINUE);
2698 }
2699
2700 dpo_proto_t
2701 fib_path_get_proto (fib_node_index_t path_index)
2702 {
2703     fib_path_t *path;
2704
2705     path = fib_path_get(path_index);
2706
2707     return (path->fp_nh_proto);
2708 }
2709
2710 void
2711 fib_path_module_init (void)
2712 {
2713     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2714     fib_path_logger = vlib_log_register_class ("fib", "path");
2715 }
2716
2717 static clib_error_t *
2718 show_fib_path_command (vlib_main_t * vm,
2719                         unformat_input_t * input,
2720                         vlib_cli_command_t * cmd)
2721 {
2722     fib_node_index_t pi;
2723     fib_path_t *path;
2724
2725     if (unformat (input, "%d", &pi))
2726     {
2727         /*
2728          * show one in detail
2729          */
2730         if (!pool_is_free_index(fib_path_pool, pi))
2731         {
2732             path = fib_path_get(pi);
2733             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2734                            FIB_PATH_FORMAT_FLAGS_NONE);
2735             s = format(s, "\n  children:");
2736             s = fib_node_children_format(path->fp_node.fn_children, s);
2737             vlib_cli_output (vm, "%s", s);
2738             vec_free(s);
2739         }
2740         else
2741         {
2742             vlib_cli_output (vm, "path %d invalid", pi);
2743         }
2744     }
2745     else
2746     {
2747         vlib_cli_output (vm, "FIB Paths");
2748         pool_foreach_index (pi, fib_path_pool,
2749         ({
2750             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2751                              FIB_PATH_FORMAT_FLAGS_NONE);
2752         }));
2753     }
2754
2755     return (NULL);
2756 }
2757
2758 VLIB_CLI_COMMAND (show_fib_path, static) = {
2759   .path = "show fib paths",
2760   .function = show_fib_path_command,
2761   .short_help = "show fib paths",
2762 };