c11 safe string handling support
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/drop_dpo.h>
28
29 #include <vnet/adj/adj.h>
30 #include <vnet/adj/adj_mcast.h>
31
32 #include <vnet/fib/fib_path.h>
33 #include <vnet/fib/fib_node.h>
34 #include <vnet/fib/fib_table.h>
35 #include <vnet/fib/fib_entry.h>
36 #include <vnet/fib/fib_path_list.h>
37 #include <vnet/fib/fib_internal.h>
38 #include <vnet/fib/fib_urpf_list.h>
39 #include <vnet/fib/mpls_fib.h>
40 #include <vnet/udp/udp_encap.h>
41 #include <vnet/bier/bier_fmask.h>
42 #include <vnet/bier/bier_table.h>
43 #include <vnet/bier/bier_imp.h>
44 #include <vnet/bier/bier_disp_table.h>
45
46 /**
47  * Enurmeration of path types
48  */
49 typedef enum fib_path_type_t_ {
50     /**
51      * Marker. Add new types after this one.
52      */
53     FIB_PATH_TYPE_FIRST = 0,
54     /**
55      * Attached-nexthop. An interface and a nexthop are known.
56      */
57     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
58     /**
59      * attached. Only the interface is known.
60      */
61     FIB_PATH_TYPE_ATTACHED,
62     /**
63      * recursive. Only the next-hop is known.
64      */
65     FIB_PATH_TYPE_RECURSIVE,
66     /**
67      * special. nothing is known. so we drop.
68      */
69     FIB_PATH_TYPE_SPECIAL,
70     /**
71      * exclusive. user provided adj.
72      */
73     FIB_PATH_TYPE_EXCLUSIVE,
74     /**
75      * deag. Link to a lookup adj in the next table
76      */
77     FIB_PATH_TYPE_DEAG,
78     /**
79      * interface receive.
80      */
81     FIB_PATH_TYPE_INTF_RX,
82     /**
83      * Path resolves via a UDP encap object.
84      */
85     FIB_PATH_TYPE_UDP_ENCAP,
86     /**
87      * receive. it's for-us.
88      */
89     FIB_PATH_TYPE_RECEIVE,
90     /**
91      * bier-imp. it's via a BIER imposition.
92      */
93     FIB_PATH_TYPE_BIER_IMP,
94     /**
95      * bier-fmask. it's via a BIER ECMP-table.
96      */
97     FIB_PATH_TYPE_BIER_TABLE,
98     /**
99      * bier-fmask. it's via a BIER f-mask.
100      */
101     FIB_PATH_TYPE_BIER_FMASK,
102     /**
103      * via a DVR.
104      */
105     FIB_PATH_TYPE_DVR,
106     /**
107      * Marker. Add new types before this one, then update it.
108      */
109     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
110 } __attribute__ ((packed)) fib_path_type_t;
111
112 /**
113  * The maximum number of path_types
114  */
115 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
116
117 #define FIB_PATH_TYPES {                                        \
118     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
119     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
120     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
121     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
122     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
123     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
124     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
125     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
126     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
127     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
128     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
129     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
130     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
131 }
132
133 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
134     for (_item = FIB_PATH_TYPE_FIRST;           \
135          _item <= FIB_PATH_TYPE_LAST;           \
136          _item++)
137
138 /**
139  * Enurmeration of path operational (i.e. derived) attributes
140  */
141 typedef enum fib_path_oper_attribute_t_ {
142     /**
143      * Marker. Add new types after this one.
144      */
145     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
146     /**
147      * The path forms part of a recursive loop.
148      */
149     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
150     /**
151      * The path is resolved
152      */
153     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
154     /**
155      * The path is attached, despite what the next-hop may say.
156      */
157     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
158     /**
159      * The path has become a permanent drop.
160      */
161     FIB_PATH_OPER_ATTRIBUTE_DROP,
162     /**
163      * Marker. Add new types before this one, then update it.
164      */
165     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
166 } __attribute__ ((packed)) fib_path_oper_attribute_t;
167
168 /**
169  * The maximum number of path operational attributes
170  */
171 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
172
173 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
174     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
175     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
176     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
177 }
178
179 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
180     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
181          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
182          _item++)
183
184 /**
185  * Path flags from the attributes
186  */
187 typedef enum fib_path_oper_flags_t_ {
188     FIB_PATH_OPER_FLAG_NONE = 0,
189     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
190     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
191     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
192     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
193 } __attribute__ ((packed)) fib_path_oper_flags_t;
194
195 /**
196  * A FIB path
197  */
198 typedef struct fib_path_t_ {
199     /**
200      * A path is a node in the FIB graph.
201      */
202     fib_node_t fp_node;
203
204     /**
205      * The index of the path-list to which this path belongs
206      */
207     u32 fp_pl_index;
208
209     /**
210      * This marks the start of the memory area used to hash
211      * the path
212      */
213     STRUCT_MARK(path_hash_start);
214
215     /**
216      * Configuration Flags
217      */
218     fib_path_cfg_flags_t fp_cfg_flags;
219
220     /**
221      * The type of the path. This is the selector for the union
222      */
223     fib_path_type_t fp_type;
224
225     /**
226      * The protocol of the next-hop, i.e. the address family of the
227      * next-hop's address. We can't derive this from the address itself
228      * since the address can be all zeros
229      */
230     dpo_proto_t fp_nh_proto;
231
232     /**
233      * UCMP [unnormalised] weigth
234      */
235     u8 fp_weight;
236
237     /**
238      * A path preference. 0 is the best.
239      * Only paths of the best preference, that are 'up', are considered
240      * for forwarding.
241      */
242     u8 fp_preference;
243
244     /**
245      * per-type union of the data required to resolve the path
246      */
247     union {
248         struct {
249             /**
250              * The next-hop
251              */
252             ip46_address_t fp_nh;
253             /**
254              * The interface
255              */
256             u32 fp_interface;
257         } attached_next_hop;
258         struct {
259             /**
260              * The interface
261              */
262             u32 fp_interface;
263         } attached;
264         struct {
265             union
266             {
267                 /**
268                  * The next-hop
269                  */
270                 ip46_address_t fp_ip;
271                 struct {
272                     /**
273                      * The local label to resolve through.
274                      */
275                     mpls_label_t fp_local_label;
276                     /**
277                      * The EOS bit of the resolving label
278                      */
279                     mpls_eos_bit_t fp_eos;
280                 };
281             } fp_nh;
282             union {
283                 /**
284                  * The FIB table index in which to find the next-hop.
285                  */
286                 fib_node_index_t fp_tbl_id;
287                 /**
288                  * The BIER FIB the fmask is in
289                  */
290                 index_t fp_bier_fib;
291             };
292         } recursive;
293         struct {
294             /**
295              * BIER FMask ID
296              */
297             index_t fp_bier_fmask;
298         } bier_fmask;
299         struct {
300             /**
301              * The BIER table's ID
302              */
303             bier_table_id_t fp_bier_tbl;
304         } bier_table;
305         struct {
306             /**
307              * The BIER imposition object
308              * this is part of the path's key, since the index_t
309              * of an imposition object is the object's key.
310              */
311             index_t fp_bier_imp;
312         } bier_imp;
313         struct {
314             /**
315              * The FIB index in which to perfom the next lookup
316              */
317             fib_node_index_t fp_tbl_id;
318             /**
319              * The RPF-ID to tag the packets with
320              */
321             fib_rpf_id_t fp_rpf_id;
322         } deag;
323         struct {
324         } special;
325         struct {
326             /**
327              * The user provided 'exclusive' DPO
328              */
329             dpo_id_t fp_ex_dpo;
330         } exclusive;
331         struct {
332             /**
333              * The interface on which the local address is configured
334              */
335             u32 fp_interface;
336             /**
337              * The next-hop
338              */
339             ip46_address_t fp_addr;
340         } receive;
341         struct {
342             /**
343              * The interface on which the packets will be input.
344              */
345             u32 fp_interface;
346         } intf_rx;
347         struct {
348             /**
349              * The UDP Encap object this path resolves through
350              */
351             u32 fp_udp_encap_id;
352         } udp_encap;
353         struct {
354             /**
355              * The interface
356              */
357             u32 fp_interface;
358         } dvr;
359     };
360     STRUCT_MARK(path_hash_end);
361
362     /**
363      * Memebers in this last section represent information that is
364      * dervied during resolution. It should not be copied to new paths
365      * nor compared.
366      */
367
368     /**
369      * Operational Flags
370      */
371     fib_path_oper_flags_t fp_oper_flags;
372
373     union {
374         /**
375          * the resolving via fib. not part of the union, since it it not part
376          * of the path's hash.
377          */
378         fib_node_index_t fp_via_fib;
379         /**
380          * the resolving bier-table
381          */
382         index_t fp_via_bier_tbl;
383         /**
384          * the resolving bier-fmask
385          */
386         index_t fp_via_bier_fmask;
387     };
388
389     /**
390      * The Data-path objects through which this path resolves for IP.
391      */
392     dpo_id_t fp_dpo;
393
394     /**
395      * the index of this path in the parent's child list.
396      */
397     u32 fp_sibling;
398 } fib_path_t;
399
400 /*
401  * Array of strings/names for the path types and attributes
402  */
403 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
404 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
405 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
406
407 /*
408  * The memory pool from which we allocate all the paths
409  */
410 static fib_path_t *fib_path_pool;
411
412 /**
413  * the logger
414  */
415 vlib_log_class_t fib_path_logger;
416
417 /*
418  * Debug macro
419  */
420 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
421 {                                                                       \
422     vlib_log_debug (fib_path_logger,                                    \
423                     "[%U]: " _fmt,                                      \
424                     format_fib_path, fib_path_get_index(_p), 0,         \
425                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
426                     ##_args);                                           \
427 }
428
429 static fib_path_t *
430 fib_path_get (fib_node_index_t index)
431 {
432     return (pool_elt_at_index(fib_path_pool, index));
433 }
434
435 static fib_node_index_t 
436 fib_path_get_index (fib_path_t *path)
437 {
438     return (path - fib_path_pool);
439 }
440
441 static fib_node_t *
442 fib_path_get_node (fib_node_index_t index)
443 {
444     return ((fib_node_t*)fib_path_get(index));
445 }
446
447 static fib_path_t*
448 fib_path_from_fib_node (fib_node_t *node)
449 {
450     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
451     return ((fib_path_t*)node);
452 }
453
454 u8 *
455 format_fib_path (u8 * s, va_list * args)
456 {
457     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
458     u32 indent = va_arg (*args, u32);
459     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
460     vnet_main_t * vnm = vnet_get_main();
461     fib_path_oper_attribute_t oattr;
462     fib_path_cfg_attribute_t cattr;
463     fib_path_t *path;
464     const char *eol;
465
466     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
467     {
468         eol = "";
469     }
470     else
471     {
472         eol = "\n";
473     }
474
475     path = fib_path_get(path_index);
476
477     s = format (s, "%Upath:[%d] ", format_white_space, indent,
478                 fib_path_get_index(path));
479     s = format (s, "pl-index:%d ", path->fp_pl_index);
480     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
481     s = format (s, "weight=%d ", path->fp_weight);
482     s = format (s, "pref=%d ", path->fp_preference);
483     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
484     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
485         s = format(s, " oper-flags:");
486         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
487             if ((1<<oattr) & path->fp_oper_flags) {
488                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
489             }
490         }
491     }
492     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
493         s = format(s, " cfg-flags:");
494         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
495             if ((1<<cattr) & path->fp_cfg_flags) {
496                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
497             }
498         }
499     }
500     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
501         s = format(s, "\n%U", format_white_space, indent+2);
502
503     switch (path->fp_type)
504     {
505     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
506         s = format (s, "%U", format_ip46_address,
507                     &path->attached_next_hop.fp_nh,
508                     IP46_TYPE_ANY);
509         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
510         {
511             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
512         }
513         else
514         {
515             s = format (s, " %U",
516                         format_vnet_sw_interface_name,
517                         vnm,
518                         vnet_get_sw_interface(
519                             vnm,
520                             path->attached_next_hop.fp_interface));
521             if (vnet_sw_interface_is_p2p(vnet_get_main(),
522                                          path->attached_next_hop.fp_interface))
523             {
524                 s = format (s, " (p2p)");
525             }
526         }
527         if (!dpo_id_is_valid(&path->fp_dpo))
528         {
529             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
530         }
531         else
532         {
533             s = format(s, "%s%U%U", eol,
534                        format_white_space, indent,
535                        format_dpo_id,
536                        &path->fp_dpo, 13);
537         }
538         break;
539     case FIB_PATH_TYPE_ATTACHED:
540         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
541         {
542             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
543         }
544         else
545         {
546             s = format (s, " %U",
547                         format_vnet_sw_interface_name,
548                         vnm,
549                         vnet_get_sw_interface(
550                             vnm,
551                             path->attached.fp_interface));
552         }
553         break;
554     case FIB_PATH_TYPE_RECURSIVE:
555         if (DPO_PROTO_MPLS == path->fp_nh_proto)
556         {
557             s = format (s, "via %U %U",
558                         format_mpls_unicast_label,
559                         path->recursive.fp_nh.fp_local_label,
560                         format_mpls_eos_bit,
561                         path->recursive.fp_nh.fp_eos);
562         }
563         else
564         {
565             s = format (s, "via %U",
566                         format_ip46_address,
567                         &path->recursive.fp_nh.fp_ip,
568                         IP46_TYPE_ANY);
569         }
570         s = format (s, " in fib:%d",
571                     path->recursive.fp_tbl_id,
572                     path->fp_via_fib); 
573         s = format (s, " via-fib:%d", path->fp_via_fib); 
574         s = format (s, " via-dpo:[%U:%d]",
575                     format_dpo_type, path->fp_dpo.dpoi_type, 
576                     path->fp_dpo.dpoi_index);
577
578         break;
579     case FIB_PATH_TYPE_UDP_ENCAP:
580         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
581         break;
582     case FIB_PATH_TYPE_BIER_TABLE:
583         s = format (s, "via bier-table:[%U}",
584                     format_bier_table_id,
585                     &path->bier_table.fp_bier_tbl);
586         s = format (s, " via-dpo:[%U:%d]",
587                     format_dpo_type, path->fp_dpo.dpoi_type,
588                     path->fp_dpo.dpoi_index);
589         break;
590     case FIB_PATH_TYPE_BIER_FMASK:
591         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
592         s = format (s, " via-dpo:[%U:%d]",
593                     format_dpo_type, path->fp_dpo.dpoi_type, 
594                     path->fp_dpo.dpoi_index);
595         break;
596     case FIB_PATH_TYPE_BIER_IMP:
597         s = format (s, "via %U", format_bier_imp,
598                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
599         break;
600     case FIB_PATH_TYPE_DVR:
601         s = format (s, " %U",
602                     format_vnet_sw_interface_name,
603                     vnm,
604                     vnet_get_sw_interface(
605                         vnm,
606                         path->dvr.fp_interface));
607         break;
608     case FIB_PATH_TYPE_RECEIVE:
609     case FIB_PATH_TYPE_INTF_RX:
610     case FIB_PATH_TYPE_SPECIAL:
611     case FIB_PATH_TYPE_DEAG:
612     case FIB_PATH_TYPE_EXCLUSIVE:
613         if (dpo_id_is_valid(&path->fp_dpo))
614         {
615             s = format(s, "%U", format_dpo_id,
616                        &path->fp_dpo, indent+2);
617         }
618         break;
619     }
620     return (s);
621 }
622
623 /*
624  * fib_path_last_lock_gone
625  *
626  * We don't share paths, we share path lists, so the [un]lock functions
627  * are no-ops
628  */
629 static void
630 fib_path_last_lock_gone (fib_node_t *node)
631 {
632     ASSERT(0);
633 }
634
635 static const adj_index_t
636 fib_path_attached_next_hop_get_adj (fib_path_t *path,
637                                     vnet_link_t link)
638 {
639     if (vnet_sw_interface_is_p2p(vnet_get_main(),
640                                  path->attached_next_hop.fp_interface))
641     {
642         /*
643          * if the interface is p2p then the adj for the specific
644          * neighbour on that link will never exist. on p2p links
645          * the subnet address (the attached route) links to the
646          * auto-adj (see below), we want that adj here too.
647          */
648         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
649                                     link,
650                                     &zero_addr,
651                                     path->attached_next_hop.fp_interface));
652     }
653     else
654     {
655         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
656                                     link,
657                                     &path->attached_next_hop.fp_nh,
658                                     path->attached_next_hop.fp_interface));
659     }
660 }
661
662 static void
663 fib_path_attached_next_hop_set (fib_path_t *path)
664 {
665     /*
666      * resolve directly via the adjacnecy discribed by the
667      * interface and next-hop
668      */
669     dpo_set(&path->fp_dpo,
670             DPO_ADJACENCY,
671             path->fp_nh_proto,
672             fib_path_attached_next_hop_get_adj(
673                  path,
674                  dpo_proto_to_link(path->fp_nh_proto)));
675
676     /*
677      * become a child of the adjacency so we receive updates
678      * when its rewrite changes
679      */
680     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
681                                      FIB_NODE_TYPE_PATH,
682                                      fib_path_get_index(path));
683
684     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
685                                       path->attached_next_hop.fp_interface) ||
686         !adj_is_up(path->fp_dpo.dpoi_index))
687     {
688         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
689     }
690 }
691
692 static const adj_index_t
693 fib_path_attached_get_adj (fib_path_t *path,
694                            vnet_link_t link)
695 {
696     if (vnet_sw_interface_is_p2p(vnet_get_main(),
697                                  path->attached.fp_interface))
698     {
699         /*
700          * point-2-point interfaces do not require a glean, since
701          * there is nothing to ARP. Install a rewrite/nbr adj instead
702          */
703         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
704                                     link,
705                                     &zero_addr,
706                                     path->attached.fp_interface));
707     }
708     else
709     {
710         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
711                                       link,
712                                       path->attached.fp_interface,
713                                       NULL));
714     }
715 }
716
717 /*
718  * create of update the paths recursive adj
719  */
720 static void
721 fib_path_recursive_adj_update (fib_path_t *path,
722                                fib_forward_chain_type_t fct,
723                                dpo_id_t *dpo)
724 {
725     dpo_id_t via_dpo = DPO_INVALID;
726
727     /*
728      * get the DPO to resolve through from the via-entry
729      */
730     fib_entry_contribute_forwarding(path->fp_via_fib,
731                                     fct,
732                                     &via_dpo);
733
734
735     /*
736      * hope for the best - clear if restrictions apply.
737      */
738     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
739
740     /*
741      * Validate any recursion constraints and over-ride the via
742      * adj if not met
743      */
744     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
745     {
746         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
747         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
748     }
749     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
750     {
751         /*
752          * the via FIB must be a host route.
753          * note the via FIB just added will always be a host route
754          * since it is an RR source added host route. So what we need to
755          * check is whether the route has other sources. If it does then
756          * some other source has added it as a host route. If it doesn't
757          * then it was added only here and inherits forwarding from a cover.
758          * the cover is not a host route.
759          * The RR source is the lowest priority source, so we check if it
760          * is the best. if it is there are no other sources.
761          */
762         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
763         {
764             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
765             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
766
767             /*
768              * PIC edge trigger. let the load-balance maps know
769              */
770             load_balance_map_path_state_change(fib_path_get_index(path));
771         }
772     }
773     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
774     {
775         /*
776          * RR source entries inherit the flags from the cover, so
777          * we can check the via directly
778          */
779         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
780         {
781             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
782             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
783
784             /*
785              * PIC edge trigger. let the load-balance maps know
786              */
787             load_balance_map_path_state_change(fib_path_get_index(path));
788         }
789     }
790     /*
791      * check for over-riding factors on the FIB entry itself
792      */
793     if (!fib_entry_is_resolved(path->fp_via_fib))
794     {
795         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
796         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
797
798         /*
799          * PIC edge trigger. let the load-balance maps know
800          */
801         load_balance_map_path_state_change(fib_path_get_index(path));
802     }
803
804     /*
805      * If this path is contributing a drop, then it's not resolved
806      */
807     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
808     {
809         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
810     }
811
812     /*
813      * update the path's contributed DPO
814      */
815     dpo_copy(dpo, &via_dpo);
816
817     FIB_PATH_DBG(path, "recursive update:");
818
819     dpo_reset(&via_dpo);
820 }
821
822 /*
823  * re-evaulate the forwarding state for a via fmask path
824  */
825 static void
826 fib_path_bier_fmask_update (fib_path_t *path,
827                             dpo_id_t *dpo)
828 {
829     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
830
831     /*
832      * if we are stakcing on the drop, then the path is not resolved
833      */
834     if (dpo_is_drop(dpo))
835     {
836         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
837     }
838     else
839     {
840         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
841     }
842 }
843
844 /*
845  * fib_path_is_permanent_drop
846  *
847  * Return !0 if the path is configured to permanently drop,
848  * despite other attributes.
849  */
850 static int
851 fib_path_is_permanent_drop (fib_path_t *path)
852 {
853     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
854             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
855 }
856
857 /*
858  * fib_path_unresolve
859  *
860  * Remove our dependency on the resolution target
861  */
862 static void
863 fib_path_unresolve (fib_path_t *path)
864 {
865     /*
866      * the forced drop path does not need unresolving
867      */
868     if (fib_path_is_permanent_drop(path))
869     {
870         return;
871     }
872
873     switch (path->fp_type)
874     {
875     case FIB_PATH_TYPE_RECURSIVE:
876         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
877         {
878             fib_entry_child_remove(path->fp_via_fib,
879                                    path->fp_sibling);
880             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
881                                            fib_entry_get_prefix(path->fp_via_fib),
882                                            FIB_SOURCE_RR);
883             fib_table_unlock(path->recursive.fp_tbl_id,
884                              dpo_proto_to_fib(path->fp_nh_proto),
885                              FIB_SOURCE_RR);
886             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
887         }
888         break;
889     case FIB_PATH_TYPE_BIER_FMASK:
890         bier_fmask_child_remove(path->fp_via_bier_fmask,
891                                 path->fp_sibling);
892         break;
893     case FIB_PATH_TYPE_BIER_IMP:
894         bier_imp_unlock(path->fp_dpo.dpoi_index);
895         break;
896     case FIB_PATH_TYPE_BIER_TABLE:
897         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
898         break;
899     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
900         adj_child_remove(path->fp_dpo.dpoi_index,
901                          path->fp_sibling);
902         adj_unlock(path->fp_dpo.dpoi_index);
903         break;
904     case FIB_PATH_TYPE_ATTACHED:
905         adj_child_remove(path->fp_dpo.dpoi_index,
906                          path->fp_sibling);
907         adj_unlock(path->fp_dpo.dpoi_index);
908         break;
909     case FIB_PATH_TYPE_UDP_ENCAP:
910         udp_encap_unlock(path->fp_dpo.dpoi_index);
911         break;
912     case FIB_PATH_TYPE_EXCLUSIVE:
913         dpo_reset(&path->exclusive.fp_ex_dpo);
914         break;
915     case FIB_PATH_TYPE_SPECIAL:
916     case FIB_PATH_TYPE_RECEIVE:
917     case FIB_PATH_TYPE_INTF_RX:
918     case FIB_PATH_TYPE_DEAG:
919     case FIB_PATH_TYPE_DVR:
920         /*
921          * these hold only the path's DPO, which is reset below.
922          */
923         break;
924     }
925
926     /*
927      * release the adj we were holding and pick up the
928      * drop just in case.
929      */
930     dpo_reset(&path->fp_dpo);
931     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
932
933     return;
934 }
935
936 static fib_forward_chain_type_t
937 fib_path_to_chain_type (const fib_path_t *path)
938 {
939     if (DPO_PROTO_MPLS == path->fp_nh_proto)
940     {
941         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
942             MPLS_EOS == path->recursive.fp_nh.fp_eos)
943         {
944             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
945         }
946         else
947         {
948             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
949         }
950     }
951     else
952     {
953         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
954     }
955 }
956
957 /*
958  * fib_path_back_walk_notify
959  *
960  * A back walk has reach this path.
961  */
962 static fib_node_back_walk_rc_t
963 fib_path_back_walk_notify (fib_node_t *node,
964                            fib_node_back_walk_ctx_t *ctx)
965 {
966     fib_path_t *path;
967
968     path = fib_path_from_fib_node(node);
969
970     FIB_PATH_DBG(path, "bw:%U",
971                  format_fib_node_bw_reason, ctx->fnbw_reason);
972
973     switch (path->fp_type)
974     {
975     case FIB_PATH_TYPE_RECURSIVE:
976         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
977         {
978             /*
979              * modify the recursive adjacency to use the new forwarding
980              * of the via-fib.
981              * this update is visible to packets in flight in the DP.
982              */
983             fib_path_recursive_adj_update(
984                 path,
985                 fib_path_to_chain_type(path),
986                 &path->fp_dpo);
987         }
988         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
989             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
990         {
991             /*
992              * ADJ updates (complete<->incomplete) do not need to propagate to
993              * recursive entries.
994              * The only reason its needed as far back as here, is that the adj
995              * and the incomplete adj are a different DPO type, so the LBs need
996              * to re-stack.
997              * If this walk was quashed in the fib_entry, then any non-fib_path
998              * children (like tunnels that collapse out the LB when they stack)
999              * would not see the update.
1000              */
1001             return (FIB_NODE_BACK_WALK_CONTINUE);
1002         }
1003         break;
1004     case FIB_PATH_TYPE_BIER_FMASK:
1005         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1006         {
1007             /*
1008              * update to use the BIER fmask's new forwading
1009              */
1010             fib_path_bier_fmask_update(path, &path->fp_dpo);
1011         }
1012         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1013             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1014         {
1015             /*
1016              * ADJ updates (complete<->incomplete) do not need to propagate to
1017              * recursive entries.
1018              * The only reason its needed as far back as here, is that the adj
1019              * and the incomplete adj are a different DPO type, so the LBs need
1020              * to re-stack.
1021              * If this walk was quashed in the fib_entry, then any non-fib_path
1022              * children (like tunnels that collapse out the LB when they stack)
1023              * would not see the update.
1024              */
1025             return (FIB_NODE_BACK_WALK_CONTINUE);
1026         }
1027         break;
1028     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1029         /*
1030 FIXME comment
1031          * ADJ_UPDATE backwalk pass silently through here and up to
1032          * the path-list when the multipath adj collapse occurs.
1033          * The reason we do this is that the assumtption is that VPP
1034          * runs in an environment where the Control-Plane is remote
1035          * and hence reacts slowly to link up down. In order to remove
1036          * this down link from the ECMP set quickly, we back-walk.
1037          * VPP also has dedicated CPUs, so we are not stealing resources
1038          * from the CP to do so.
1039          */
1040         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1041         {
1042             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1043             {
1044                 /*
1045                  * alreday resolved. no need to walk back again
1046                  */
1047                 return (FIB_NODE_BACK_WALK_CONTINUE);
1048             }
1049             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1050         }
1051         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1052         {
1053             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1054             {
1055                 /*
1056                  * alreday unresolved. no need to walk back again
1057                  */
1058                 return (FIB_NODE_BACK_WALK_CONTINUE);
1059             }
1060             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1061         }
1062         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1063         {
1064             /*
1065              * The interface this path resolves through has been deleted.
1066              * This will leave the path in a permanent drop state. The route
1067              * needs to be removed and readded (and hence the path-list deleted)
1068              * before it can forward again.
1069              */
1070             fib_path_unresolve(path);
1071             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1072         }
1073         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1074         {
1075             /*
1076              * restack the DPO to pick up the correct DPO sub-type
1077              */
1078             uword if_is_up;
1079             adj_index_t ai;
1080
1081             if_is_up = vnet_sw_interface_is_admin_up(
1082                            vnet_get_main(),
1083                            path->attached_next_hop.fp_interface);
1084
1085             ai = fib_path_attached_next_hop_get_adj(
1086                      path,
1087                      dpo_proto_to_link(path->fp_nh_proto));
1088
1089             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1090             if (if_is_up && adj_is_up(ai))
1091             {
1092                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1093             }
1094
1095             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1096             adj_unlock(ai);
1097
1098             if (!if_is_up)
1099             {
1100                 /*
1101                  * If the interface is not up there is no reason to walk
1102                  * back to children. if we did they would only evalute
1103                  * that this path is unresolved and hence it would
1104                  * not contribute the adjacency - so it would be wasted
1105                  * CPU time.
1106                  */
1107                 return (FIB_NODE_BACK_WALK_CONTINUE);
1108             }
1109         }
1110         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1111         {
1112             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1113             {
1114                 /*
1115                  * alreday unresolved. no need to walk back again
1116                  */
1117                 return (FIB_NODE_BACK_WALK_CONTINUE);
1118             }
1119             /*
1120              * the adj has gone down. the path is no longer resolved.
1121              */
1122             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1123         }
1124         break;
1125     case FIB_PATH_TYPE_ATTACHED:
1126     case FIB_PATH_TYPE_DVR:
1127         /*
1128          * FIXME; this could schedule a lower priority walk, since attached
1129          * routes are not usually in ECMP configurations so the backwalk to
1130          * the FIB entry does not need to be high priority
1131          */
1132         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1133         {
1134             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1135         }
1136         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1137         {
1138             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1139         }
1140         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1141         {
1142             fib_path_unresolve(path);
1143             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1144         }
1145         break;
1146     case FIB_PATH_TYPE_UDP_ENCAP:
1147     {
1148         dpo_id_t via_dpo = DPO_INVALID;
1149
1150         /*
1151          * hope for the best - clear if restrictions apply.
1152          */
1153         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1154
1155         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1156                                         path->fp_nh_proto,
1157                                         &via_dpo);
1158         /*
1159          * If this path is contributing a drop, then it's not resolved
1160          */
1161         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1162         {
1163             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1164         }
1165
1166         /*
1167          * update the path's contributed DPO
1168          */
1169         dpo_copy(&path->fp_dpo, &via_dpo);
1170         dpo_reset(&via_dpo);
1171         break;
1172     }
1173     case FIB_PATH_TYPE_INTF_RX:
1174         ASSERT(0);
1175     case FIB_PATH_TYPE_DEAG:
1176         /*
1177          * FIXME When VRF delete is allowed this will need a poke.
1178          */
1179     case FIB_PATH_TYPE_SPECIAL:
1180     case FIB_PATH_TYPE_RECEIVE:
1181     case FIB_PATH_TYPE_EXCLUSIVE:
1182     case FIB_PATH_TYPE_BIER_TABLE:
1183     case FIB_PATH_TYPE_BIER_IMP:
1184         /*
1185          * these path types have no parents. so to be
1186          * walked from one is unexpected.
1187          */
1188         ASSERT(0);
1189         break;
1190     }
1191
1192     /*
1193      * propagate the backwalk further to the path-list
1194      */
1195     fib_path_list_back_walk(path->fp_pl_index, ctx);
1196
1197     return (FIB_NODE_BACK_WALK_CONTINUE);
1198 }
1199
1200 static void
1201 fib_path_memory_show (void)
1202 {
1203     fib_show_memory_usage("Path",
1204                           pool_elts(fib_path_pool),
1205                           pool_len(fib_path_pool),
1206                           sizeof(fib_path_t));
1207 }
1208
1209 /*
1210  * The FIB path's graph node virtual function table
1211  */
1212 static const fib_node_vft_t fib_path_vft = {
1213     .fnv_get = fib_path_get_node,
1214     .fnv_last_lock = fib_path_last_lock_gone,
1215     .fnv_back_walk = fib_path_back_walk_notify,
1216     .fnv_mem_show = fib_path_memory_show,
1217 };
1218
1219 static fib_path_cfg_flags_t
1220 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1221 {
1222     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1223
1224     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1225         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1226     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1227         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1228     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1229         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1230     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1231         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1232     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1233         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1234     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1235         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1236     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1237         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1238     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1239         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1240     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1241         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1242
1243     return (cfg_flags);
1244 }
1245
1246 /*
1247  * fib_path_create
1248  *
1249  * Create and initialise a new path object.
1250  * return the index of the path.
1251  */
1252 fib_node_index_t
1253 fib_path_create (fib_node_index_t pl_index,
1254                  const fib_route_path_t *rpath)
1255 {
1256     fib_path_t *path;
1257
1258     pool_get(fib_path_pool, path);
1259     clib_memset(path, 0, sizeof(*path));
1260
1261     fib_node_init(&path->fp_node,
1262                   FIB_NODE_TYPE_PATH);
1263
1264     dpo_reset(&path->fp_dpo);
1265     path->fp_pl_index = pl_index;
1266     path->fp_nh_proto = rpath->frp_proto;
1267     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1268     path->fp_weight = rpath->frp_weight;
1269     if (0 == path->fp_weight)
1270     {
1271         /*
1272          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1273          * clients to always use 1, or we can accept it and fixup approrpiately.
1274          */
1275         path->fp_weight = 1;
1276     }
1277     path->fp_preference = rpath->frp_preference;
1278     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1279
1280     /*
1281      * deduce the path's tpye from the parementers and save what is needed.
1282      */
1283     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1284     {
1285         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1286         path->receive.fp_interface = rpath->frp_sw_if_index;
1287         path->receive.fp_addr = rpath->frp_addr;
1288     }
1289     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1290     {
1291         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1292         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1293     }
1294     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1295     {
1296         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1297         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1298     }
1299     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1300     {
1301         path->fp_type = FIB_PATH_TYPE_DEAG;
1302         path->deag.fp_tbl_id = rpath->frp_fib_index;
1303         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1304     }
1305     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1306     {
1307         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1308         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1309     }
1310     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1311     {
1312         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1313         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1314     }
1315     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1316     {
1317         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1318         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1319     }
1320     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1321     {
1322         path->fp_type = FIB_PATH_TYPE_DEAG;
1323         path->deag.fp_tbl_id = rpath->frp_fib_index;
1324     }
1325     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1326     {
1327         path->fp_type = FIB_PATH_TYPE_DVR;
1328         path->dvr.fp_interface = rpath->frp_sw_if_index;
1329     }
1330     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1331     {
1332         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1333         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1334     }
1335     else if (~0 != rpath->frp_sw_if_index)
1336     {
1337         if (ip46_address_is_zero(&rpath->frp_addr))
1338         {
1339             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1340             path->attached.fp_interface = rpath->frp_sw_if_index;
1341         }
1342         else
1343         {
1344             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1345             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1346             path->attached_next_hop.fp_nh = rpath->frp_addr;
1347         }
1348     }
1349     else
1350     {
1351         if (ip46_address_is_zero(&rpath->frp_addr))
1352         {
1353             if (~0 == rpath->frp_fib_index)
1354             {
1355                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1356             }
1357             else
1358             {
1359                 path->fp_type = FIB_PATH_TYPE_DEAG;
1360                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1361                 path->deag.fp_rpf_id = ~0;
1362             }
1363         }
1364         else
1365         {
1366             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1367             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1368             {
1369                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1370                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1371             }
1372             else
1373             {
1374                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1375             }
1376             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1377         }
1378     }
1379
1380     FIB_PATH_DBG(path, "create");
1381
1382     return (fib_path_get_index(path));
1383 }
1384
1385 /*
1386  * fib_path_create_special
1387  *
1388  * Create and initialise a new path object.
1389  * return the index of the path.
1390  */
1391 fib_node_index_t
1392 fib_path_create_special (fib_node_index_t pl_index,
1393                          dpo_proto_t nh_proto,
1394                          fib_path_cfg_flags_t flags,
1395                          const dpo_id_t *dpo)
1396 {
1397     fib_path_t *path;
1398
1399     pool_get(fib_path_pool, path);
1400     clib_memset(path, 0, sizeof(*path));
1401
1402     fib_node_init(&path->fp_node,
1403                   FIB_NODE_TYPE_PATH);
1404     dpo_reset(&path->fp_dpo);
1405
1406     path->fp_pl_index = pl_index;
1407     path->fp_weight = 1;
1408     path->fp_preference = 0;
1409     path->fp_nh_proto = nh_proto;
1410     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1411     path->fp_cfg_flags = flags;
1412
1413     if (FIB_PATH_CFG_FLAG_DROP & flags)
1414     {
1415         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1416     }
1417     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1418     {
1419         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1420         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1421     }
1422     else
1423     {
1424         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1425         ASSERT(NULL != dpo);
1426         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1427     }
1428
1429     return (fib_path_get_index(path));
1430 }
1431
1432 /*
1433  * fib_path_copy
1434  *
1435  * Copy a path. return index of new path.
1436  */
1437 fib_node_index_t
1438 fib_path_copy (fib_node_index_t path_index,
1439                fib_node_index_t path_list_index)
1440 {
1441     fib_path_t *path, *orig_path;
1442
1443     pool_get(fib_path_pool, path);
1444
1445     orig_path = fib_path_get(path_index);
1446     ASSERT(NULL != orig_path);
1447
1448     memcpy(path, orig_path, sizeof(*path));
1449
1450     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1451
1452     /*
1453      * reset the dynamic section
1454      */
1455     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1456     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1457     path->fp_pl_index  = path_list_index;
1458     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1459     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1460     dpo_reset(&path->fp_dpo);
1461
1462     return (fib_path_get_index(path));
1463 }
1464
1465 /*
1466  * fib_path_destroy
1467  *
1468  * destroy a path that is no longer required
1469  */
1470 void
1471 fib_path_destroy (fib_node_index_t path_index)
1472 {
1473     fib_path_t *path;
1474
1475     path = fib_path_get(path_index);
1476
1477     ASSERT(NULL != path);
1478     FIB_PATH_DBG(path, "destroy");
1479
1480     fib_path_unresolve(path);
1481
1482     fib_node_deinit(&path->fp_node);
1483     pool_put(fib_path_pool, path);
1484 }
1485
1486 /*
1487  * fib_path_destroy
1488  *
1489  * destroy a path that is no longer required
1490  */
1491 uword
1492 fib_path_hash (fib_node_index_t path_index)
1493 {
1494     fib_path_t *path;
1495
1496     path = fib_path_get(path_index);
1497
1498     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1499                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1500                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1501                         0));
1502 }
1503
1504 /*
1505  * fib_path_cmp_i
1506  *
1507  * Compare two paths for equivalence.
1508  */
1509 static int
1510 fib_path_cmp_i (const fib_path_t *path1,
1511                 const fib_path_t *path2)
1512 {
1513     int res;
1514
1515     res = 1;
1516
1517     /*
1518      * paths of different types and protocol are not equal.
1519      * different weights and/or preference only are the same path.
1520      */
1521     if (path1->fp_type != path2->fp_type)
1522     {
1523         res = (path1->fp_type - path2->fp_type);
1524     }
1525     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1526     {
1527         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1528     }
1529     else
1530     {
1531         /*
1532          * both paths are of the same type.
1533          * consider each type and its attributes in turn.
1534          */
1535         switch (path1->fp_type)
1536         {
1537         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1538             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1539                                    &path2->attached_next_hop.fp_nh);
1540             if (0 == res) {
1541                 res = (path1->attached_next_hop.fp_interface -
1542                        path2->attached_next_hop.fp_interface);
1543             }
1544             break;
1545         case FIB_PATH_TYPE_ATTACHED:
1546             res = (path1->attached.fp_interface -
1547                    path2->attached.fp_interface);
1548             break;
1549         case FIB_PATH_TYPE_RECURSIVE:
1550             res = ip46_address_cmp(&path1->recursive.fp_nh,
1551                                    &path2->recursive.fp_nh);
1552  
1553             if (0 == res)
1554             {
1555                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1556             }
1557             break;
1558         case FIB_PATH_TYPE_BIER_FMASK:
1559             res = (path1->bier_fmask.fp_bier_fmask -
1560                    path2->bier_fmask.fp_bier_fmask);
1561             break;
1562         case FIB_PATH_TYPE_BIER_IMP:
1563             res = (path1->bier_imp.fp_bier_imp -
1564                    path2->bier_imp.fp_bier_imp);
1565             break;
1566         case FIB_PATH_TYPE_BIER_TABLE:
1567             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1568                                     &path2->bier_table.fp_bier_tbl);
1569             break;
1570         case FIB_PATH_TYPE_DEAG:
1571             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1572             if (0 == res)
1573             {
1574                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1575             }
1576             break;
1577         case FIB_PATH_TYPE_INTF_RX:
1578             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1579             break;
1580         case FIB_PATH_TYPE_UDP_ENCAP:
1581             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1582             break;
1583         case FIB_PATH_TYPE_DVR:
1584             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1585             break;
1586         case FIB_PATH_TYPE_EXCLUSIVE:
1587             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1588             break;
1589         case FIB_PATH_TYPE_SPECIAL:
1590         case FIB_PATH_TYPE_RECEIVE:
1591             res = 0;
1592             break;
1593         }
1594     }
1595     return (res);
1596 }
1597
1598 /*
1599  * fib_path_cmp_for_sort
1600  *
1601  * Compare two paths for equivalence. Used during path sorting.
1602  * As usual 0 means equal.
1603  */
1604 int
1605 fib_path_cmp_for_sort (void * v1,
1606                        void * v2)
1607 {
1608     fib_node_index_t *pi1 = v1, *pi2 = v2;
1609     fib_path_t *path1, *path2;
1610
1611     path1 = fib_path_get(*pi1);
1612     path2 = fib_path_get(*pi2);
1613
1614     /*
1615      * when sorting paths we want the highest preference paths
1616      * first, so that the choices set built is in prefernce order
1617      */
1618     if (path1->fp_preference != path2->fp_preference)
1619     {
1620         return (path1->fp_preference - path2->fp_preference);
1621     }
1622
1623     return (fib_path_cmp_i(path1, path2));
1624 }
1625
1626 /*
1627  * fib_path_cmp
1628  *
1629  * Compare two paths for equivalence.
1630  */
1631 int
1632 fib_path_cmp (fib_node_index_t pi1,
1633               fib_node_index_t pi2)
1634 {
1635     fib_path_t *path1, *path2;
1636
1637     path1 = fib_path_get(pi1);
1638     path2 = fib_path_get(pi2);
1639
1640     return (fib_path_cmp_i(path1, path2));
1641 }
1642
1643 int
1644 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1645                            const fib_route_path_t *rpath)
1646 {
1647     fib_path_t *path;
1648     int res;
1649
1650     path = fib_path_get(path_index);
1651
1652     res = 1;
1653
1654     if (path->fp_weight != rpath->frp_weight)
1655     {
1656         res = (path->fp_weight - rpath->frp_weight);
1657     }
1658     else
1659     {
1660         /*
1661          * both paths are of the same type.
1662          * consider each type and its attributes in turn.
1663          */
1664         switch (path->fp_type)
1665         {
1666         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1667             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1668                                    &rpath->frp_addr);
1669             if (0 == res)
1670             {
1671                 res = (path->attached_next_hop.fp_interface -
1672                        rpath->frp_sw_if_index);
1673             }
1674             break;
1675         case FIB_PATH_TYPE_ATTACHED:
1676             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1677             break;
1678         case FIB_PATH_TYPE_RECURSIVE:
1679             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1680             {
1681                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1682
1683                 if (res == 0)
1684                 {
1685                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1686                 }
1687             }
1688             else
1689             {
1690                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1691                                        &rpath->frp_addr);
1692             }
1693
1694             if (0 == res)
1695             {
1696                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1697             }
1698             break;
1699         case FIB_PATH_TYPE_BIER_FMASK:
1700             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1701             break;
1702         case FIB_PATH_TYPE_BIER_IMP:
1703             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1704             break;
1705         case FIB_PATH_TYPE_BIER_TABLE:
1706             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1707                                     &rpath->frp_bier_tbl);
1708             break;
1709         case FIB_PATH_TYPE_INTF_RX:
1710             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1711             break;
1712         case FIB_PATH_TYPE_UDP_ENCAP:
1713             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1714             break;
1715         case FIB_PATH_TYPE_DEAG:
1716             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1717             if (0 == res)
1718             {
1719                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1720             }
1721             break;
1722         case FIB_PATH_TYPE_DVR:
1723             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1724             break;
1725         case FIB_PATH_TYPE_EXCLUSIVE:
1726             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1727             break;
1728         case FIB_PATH_TYPE_SPECIAL:
1729         case FIB_PATH_TYPE_RECEIVE:
1730             res = 0;
1731             break;
1732         }
1733     }
1734     return (res);
1735 }
1736
1737 /*
1738  * fib_path_recursive_loop_detect
1739  *
1740  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1741  * walk is initiated when an entry is linking to a new path list or from an old.
1742  * The entry vector passed contains all the FIB entrys that are children of this
1743  * path (it is all the entries encountered on the walk so far). If this vector
1744  * contains the entry this path resolve via, then a loop is about to form.
1745  * The loop must be allowed to form, since we need the dependencies in place
1746  * so that we can track when the loop breaks.
1747  * However, we MUST not produce a loop in the forwarding graph (else packets
1748  * would loop around the switch path until the loop breaks), so we mark recursive
1749  * paths as looped so that they do not contribute forwarding information.
1750  * By marking the path as looped, an etry such as;
1751  *    X/Y
1752  *     via a.a.a.a (looped)
1753  *     via b.b.b.b (not looped)
1754  * can still forward using the info provided by b.b.b.b only
1755  */
1756 int
1757 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1758                                 fib_node_index_t **entry_indicies)
1759 {
1760     fib_path_t *path;
1761
1762     path = fib_path_get(path_index);
1763
1764     /*
1765      * the forced drop path is never looped, cos it is never resolved.
1766      */
1767     if (fib_path_is_permanent_drop(path))
1768     {
1769         return (0);
1770     }
1771
1772     switch (path->fp_type)
1773     {
1774     case FIB_PATH_TYPE_RECURSIVE:
1775     {
1776         fib_node_index_t *entry_index, *entries;
1777         int looped = 0;
1778         entries = *entry_indicies;
1779
1780         vec_foreach(entry_index, entries) {
1781             if (*entry_index == path->fp_via_fib)
1782             {
1783                 /*
1784                  * the entry that is about to link to this path-list (or
1785                  * one of this path-list's children) is the same entry that
1786                  * this recursive path resolves through. this is a cycle.
1787                  * abort the walk.
1788                  */
1789                 looped = 1;
1790                 break;
1791             }
1792         }
1793
1794         if (looped)
1795         {
1796             FIB_PATH_DBG(path, "recursive loop formed");
1797             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1798
1799             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1800         }
1801         else
1802         {
1803             /*
1804              * no loop here yet. keep forward walking the graph.
1805              */     
1806             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1807             {
1808                 FIB_PATH_DBG(path, "recursive loop formed");
1809                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1810             }
1811             else
1812             {
1813                 FIB_PATH_DBG(path, "recursive loop cleared");
1814                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1815             }
1816         }
1817         break;
1818     }
1819     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1820     case FIB_PATH_TYPE_ATTACHED:
1821     case FIB_PATH_TYPE_SPECIAL:
1822     case FIB_PATH_TYPE_DEAG:
1823     case FIB_PATH_TYPE_DVR:
1824     case FIB_PATH_TYPE_RECEIVE:
1825     case FIB_PATH_TYPE_INTF_RX:
1826     case FIB_PATH_TYPE_UDP_ENCAP:
1827     case FIB_PATH_TYPE_EXCLUSIVE:
1828     case FIB_PATH_TYPE_BIER_FMASK:
1829     case FIB_PATH_TYPE_BIER_TABLE:
1830     case FIB_PATH_TYPE_BIER_IMP:
1831         /*
1832          * these path types cannot be part of a loop, since they are the leaves
1833          * of the graph.
1834          */
1835         break;
1836     }
1837
1838     return (fib_path_is_looped(path_index));
1839 }
1840
1841 int
1842 fib_path_resolve (fib_node_index_t path_index)
1843 {
1844     fib_path_t *path;
1845
1846     path = fib_path_get(path_index);
1847
1848     /*
1849      * hope for the best.
1850      */
1851     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1852
1853     /*
1854      * the forced drop path resolves via the drop adj
1855      */
1856     if (fib_path_is_permanent_drop(path))
1857     {
1858         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1859         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1860         return (fib_path_is_resolved(path_index));
1861     }
1862
1863     switch (path->fp_type)
1864     {
1865     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1866         fib_path_attached_next_hop_set(path);
1867         break;
1868     case FIB_PATH_TYPE_ATTACHED:
1869         /*
1870          * path->attached.fp_interface
1871          */
1872         if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1873                                            path->attached.fp_interface))
1874         {
1875             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1876         }
1877         dpo_set(&path->fp_dpo,
1878                 DPO_ADJACENCY,
1879                 path->fp_nh_proto,
1880                 fib_path_attached_get_adj(path,
1881                                           dpo_proto_to_link(path->fp_nh_proto)));
1882
1883         /*
1884          * become a child of the adjacency so we receive updates
1885          * when the interface state changes
1886          */
1887         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1888                                          FIB_NODE_TYPE_PATH,
1889                                          fib_path_get_index(path));
1890         break;
1891     case FIB_PATH_TYPE_RECURSIVE:
1892     {
1893         /*
1894          * Create a RR source entry in the table for the address
1895          * that this path recurses through.
1896          * This resolve action is recursive, hence we may create
1897          * more paths in the process. more creates mean maybe realloc
1898          * of this path.
1899          */
1900         fib_node_index_t fei;
1901         fib_prefix_t pfx;
1902
1903         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1904
1905         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1906         {
1907             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1908                                        path->recursive.fp_nh.fp_eos,
1909                                        &pfx);
1910         }
1911         else
1912         {
1913             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1914         }
1915
1916         fib_table_lock(path->recursive.fp_tbl_id,
1917                        dpo_proto_to_fib(path->fp_nh_proto),
1918                        FIB_SOURCE_RR);
1919         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1920                                           &pfx,
1921                                           FIB_SOURCE_RR,
1922                                           FIB_ENTRY_FLAG_NONE);
1923
1924         path = fib_path_get(path_index);
1925         path->fp_via_fib = fei;
1926
1927         /*
1928          * become a dependent child of the entry so the path is 
1929          * informed when the forwarding for the entry changes.
1930          */
1931         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1932                                                FIB_NODE_TYPE_PATH,
1933                                                fib_path_get_index(path));
1934
1935         /*
1936          * create and configure the IP DPO
1937          */
1938         fib_path_recursive_adj_update(
1939             path,
1940             fib_path_to_chain_type(path),
1941             &path->fp_dpo);
1942
1943         break;
1944     }
1945     case FIB_PATH_TYPE_BIER_FMASK:
1946     {
1947         /*
1948          * become a dependent child of the entry so the path is
1949          * informed when the forwarding for the entry changes.
1950          */
1951         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
1952                                                 FIB_NODE_TYPE_PATH,
1953                                                 fib_path_get_index(path));
1954
1955         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
1956         fib_path_bier_fmask_update(path, &path->fp_dpo);
1957
1958         break;
1959     }
1960     case FIB_PATH_TYPE_BIER_IMP:
1961         bier_imp_lock(path->bier_imp.fp_bier_imp);
1962         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
1963                                        DPO_PROTO_IP4,
1964                                        &path->fp_dpo);
1965         break;
1966     case FIB_PATH_TYPE_BIER_TABLE:
1967     {
1968         /*
1969          * Find/create the BIER table to link to
1970          */
1971         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
1972
1973         path->fp_via_bier_tbl =
1974             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
1975
1976         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
1977                                          &path->fp_dpo);
1978         break;
1979     }
1980     case FIB_PATH_TYPE_SPECIAL:
1981         /*
1982          * Resolve via the drop
1983          */
1984         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1985         break;
1986     case FIB_PATH_TYPE_DEAG:
1987     {
1988         if (DPO_PROTO_BIER == path->fp_nh_proto)
1989         {
1990             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
1991                                                   &path->fp_dpo);
1992         }
1993         else
1994         {
1995             /*
1996              * Resolve via a lookup DPO.
1997              * FIXME. control plane should add routes with a table ID
1998              */
1999             lookup_input_t input;
2000             lookup_cast_t cast;
2001
2002             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2003                     LOOKUP_MULTICAST :
2004                     LOOKUP_UNICAST);
2005             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2006                      LOOKUP_INPUT_SRC_ADDR :
2007                      LOOKUP_INPUT_DST_ADDR);
2008
2009             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2010                                                path->fp_nh_proto,
2011                                                cast,
2012                                                input,
2013                                                LOOKUP_TABLE_FROM_CONFIG,
2014                                                &path->fp_dpo);
2015         }
2016         break;
2017     }
2018     case FIB_PATH_TYPE_DVR:
2019         dvr_dpo_add_or_lock(path->attached.fp_interface,
2020                             path->fp_nh_proto,
2021                             &path->fp_dpo);
2022         break;
2023     case FIB_PATH_TYPE_RECEIVE:
2024         /*
2025          * Resolve via a receive DPO.
2026          */
2027         receive_dpo_add_or_lock(path->fp_nh_proto,
2028                                 path->receive.fp_interface,
2029                                 &path->receive.fp_addr,
2030                                 &path->fp_dpo);
2031         break;
2032     case FIB_PATH_TYPE_UDP_ENCAP:
2033         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2034         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2035                                         path->fp_nh_proto,
2036                                         &path->fp_dpo);
2037         break;
2038     case FIB_PATH_TYPE_INTF_RX: {
2039         /*
2040          * Resolve via a receive DPO.
2041          */
2042         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2043                                      path->intf_rx.fp_interface,
2044                                      &path->fp_dpo);
2045         break;
2046     }
2047     case FIB_PATH_TYPE_EXCLUSIVE:
2048         /*
2049          * Resolve via the user provided DPO
2050          */
2051         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2052         break;
2053     }
2054
2055     return (fib_path_is_resolved(path_index));
2056 }
2057
2058 u32
2059 fib_path_get_resolving_interface (fib_node_index_t path_index)
2060 {
2061     fib_path_t *path;
2062
2063     path = fib_path_get(path_index);
2064
2065     switch (path->fp_type)
2066     {
2067     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2068         return (path->attached_next_hop.fp_interface);
2069     case FIB_PATH_TYPE_ATTACHED:
2070         return (path->attached.fp_interface);
2071     case FIB_PATH_TYPE_RECEIVE:
2072         return (path->receive.fp_interface);
2073     case FIB_PATH_TYPE_RECURSIVE:
2074         if (fib_path_is_resolved(path_index))
2075         {
2076             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2077         }
2078         break;
2079     case FIB_PATH_TYPE_DVR:
2080         return (path->dvr.fp_interface);
2081     case FIB_PATH_TYPE_INTF_RX:
2082     case FIB_PATH_TYPE_UDP_ENCAP:
2083     case FIB_PATH_TYPE_SPECIAL:
2084     case FIB_PATH_TYPE_DEAG:
2085     case FIB_PATH_TYPE_EXCLUSIVE:
2086     case FIB_PATH_TYPE_BIER_FMASK:
2087     case FIB_PATH_TYPE_BIER_TABLE:
2088     case FIB_PATH_TYPE_BIER_IMP:
2089         break;
2090     }
2091     return (dpo_get_urpf(&path->fp_dpo));
2092 }
2093
2094 index_t
2095 fib_path_get_resolving_index (fib_node_index_t path_index)
2096 {
2097     fib_path_t *path;
2098
2099     path = fib_path_get(path_index);
2100
2101     switch (path->fp_type)
2102     {
2103     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2104     case FIB_PATH_TYPE_ATTACHED:
2105     case FIB_PATH_TYPE_RECEIVE:
2106     case FIB_PATH_TYPE_INTF_RX:
2107     case FIB_PATH_TYPE_SPECIAL:
2108     case FIB_PATH_TYPE_DEAG:
2109     case FIB_PATH_TYPE_DVR:
2110     case FIB_PATH_TYPE_EXCLUSIVE:
2111         break;
2112     case FIB_PATH_TYPE_UDP_ENCAP:
2113         return (path->udp_encap.fp_udp_encap_id);
2114     case FIB_PATH_TYPE_RECURSIVE:
2115         return (path->fp_via_fib);
2116     case FIB_PATH_TYPE_BIER_FMASK:
2117         return (path->bier_fmask.fp_bier_fmask);
2118    case FIB_PATH_TYPE_BIER_TABLE:
2119        return (path->fp_via_bier_tbl);
2120    case FIB_PATH_TYPE_BIER_IMP:
2121        return (path->bier_imp.fp_bier_imp);
2122     }
2123     return (~0);
2124 }
2125
2126 adj_index_t
2127 fib_path_get_adj (fib_node_index_t path_index)
2128 {
2129     fib_path_t *path;
2130
2131     path = fib_path_get(path_index);
2132
2133     ASSERT(dpo_is_adj(&path->fp_dpo));
2134     if (dpo_is_adj(&path->fp_dpo))
2135     {
2136         return (path->fp_dpo.dpoi_index);
2137     }
2138     return (ADJ_INDEX_INVALID);
2139 }
2140
2141 u16
2142 fib_path_get_weight (fib_node_index_t path_index)
2143 {
2144     fib_path_t *path;
2145
2146     path = fib_path_get(path_index);
2147
2148     ASSERT(path);
2149
2150     return (path->fp_weight);
2151 }
2152
2153 u16
2154 fib_path_get_preference (fib_node_index_t path_index)
2155 {
2156     fib_path_t *path;
2157
2158     path = fib_path_get(path_index);
2159
2160     ASSERT(path);
2161
2162     return (path->fp_preference);
2163 }
2164
2165 u32
2166 fib_path_get_rpf_id (fib_node_index_t path_index)
2167 {
2168     fib_path_t *path;
2169
2170     path = fib_path_get(path_index);
2171
2172     ASSERT(path);
2173
2174     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2175     {
2176         return (path->deag.fp_rpf_id);
2177     }
2178
2179     return (~0);
2180 }
2181
2182 /**
2183  * @brief Contribute the path's adjacency to the list passed.
2184  * By calling this function over all paths, recursively, a child
2185  * can construct its full set of forwarding adjacencies, and hence its
2186  * uRPF list.
2187  */
2188 void
2189 fib_path_contribute_urpf (fib_node_index_t path_index,
2190                           index_t urpf)
2191 {
2192     fib_path_t *path;
2193
2194     path = fib_path_get(path_index);
2195
2196     /*
2197      * resolved and unresolved paths contribute to the RPF list.
2198      */
2199     switch (path->fp_type)
2200     {
2201     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2202         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2203         break;
2204
2205     case FIB_PATH_TYPE_ATTACHED:
2206         fib_urpf_list_append(urpf, path->attached.fp_interface);
2207         break;
2208
2209     case FIB_PATH_TYPE_RECURSIVE:
2210         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2211             !fib_path_is_looped(path_index))
2212         {
2213             /*
2214              * there's unresolved due to constraints, and there's unresolved
2215              * due to ain't got no via. can't do nowt w'out via.
2216              */
2217             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2218         }
2219         break;
2220
2221     case FIB_PATH_TYPE_EXCLUSIVE:
2222     case FIB_PATH_TYPE_SPECIAL:
2223     {
2224         /*
2225          * these path types may link to an adj, if that's what
2226          * the clinet gave
2227          */
2228         u32 rpf_sw_if_index;
2229
2230         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2231
2232         if (~0 != rpf_sw_if_index)
2233         {
2234             fib_urpf_list_append(urpf, rpf_sw_if_index);
2235         }
2236         break;
2237     }
2238     case FIB_PATH_TYPE_DVR:
2239         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2240         break;
2241     case FIB_PATH_TYPE_DEAG:
2242     case FIB_PATH_TYPE_RECEIVE:
2243     case FIB_PATH_TYPE_INTF_RX:
2244     case FIB_PATH_TYPE_UDP_ENCAP:
2245     case FIB_PATH_TYPE_BIER_FMASK:
2246     case FIB_PATH_TYPE_BIER_TABLE:
2247     case FIB_PATH_TYPE_BIER_IMP:
2248         /*
2249          * these path types don't link to an adj
2250          */
2251         break;
2252     }
2253 }
2254
2255 void
2256 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2257                           dpo_proto_t payload_proto,
2258                           fib_mpls_lsp_mode_t mode,
2259                           dpo_id_t *dpo)
2260 {
2261     fib_path_t *path;
2262
2263     path = fib_path_get(path_index);
2264
2265     ASSERT(path);
2266
2267     switch (path->fp_type)
2268     {
2269     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2270     {
2271         dpo_id_t tmp = DPO_INVALID;
2272
2273         dpo_copy(&tmp, dpo);
2274
2275         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2276         dpo_reset(&tmp);
2277         break;
2278     }                
2279     case FIB_PATH_TYPE_DEAG:
2280     {
2281         dpo_id_t tmp = DPO_INVALID;
2282
2283         dpo_copy(&tmp, dpo);
2284
2285         mpls_disp_dpo_create(payload_proto,
2286                              path->deag.fp_rpf_id,
2287                              mode, &tmp, dpo);
2288         dpo_reset(&tmp);
2289         break;
2290     }
2291     case FIB_PATH_TYPE_RECEIVE:
2292     case FIB_PATH_TYPE_ATTACHED:
2293     case FIB_PATH_TYPE_RECURSIVE:
2294     case FIB_PATH_TYPE_INTF_RX:
2295     case FIB_PATH_TYPE_UDP_ENCAP:
2296     case FIB_PATH_TYPE_EXCLUSIVE:
2297     case FIB_PATH_TYPE_SPECIAL:
2298     case FIB_PATH_TYPE_BIER_FMASK:
2299     case FIB_PATH_TYPE_BIER_TABLE:
2300     case FIB_PATH_TYPE_BIER_IMP:
2301     case FIB_PATH_TYPE_DVR:
2302         break;
2303     }
2304 }
2305
2306 void
2307 fib_path_contribute_forwarding (fib_node_index_t path_index,
2308                                 fib_forward_chain_type_t fct,
2309                                 dpo_id_t *dpo)
2310 {
2311     fib_path_t *path;
2312
2313     path = fib_path_get(path_index);
2314
2315     ASSERT(path);
2316     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2317
2318     /*
2319      * The DPO stored in the path was created when the path was resolved.
2320      * This then represents the path's 'native' protocol; IP.
2321      * For all others will need to go find something else.
2322      */
2323     if (fib_path_to_chain_type(path) == fct)
2324     {
2325         dpo_copy(dpo, &path->fp_dpo);
2326     }
2327     else
2328     {
2329         switch (path->fp_type)
2330         {
2331         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2332             switch (fct)
2333             {
2334             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2335             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2336             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2337             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2338             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2339             case FIB_FORW_CHAIN_TYPE_NSH:
2340             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2341             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2342             {
2343                 adj_index_t ai;
2344
2345                 /*
2346                  * get a appropriate link type adj.
2347                  */
2348                 ai = fib_path_attached_next_hop_get_adj(
2349                          path,
2350                          fib_forw_chain_type_to_link_type(fct));
2351                 dpo_set(dpo, DPO_ADJACENCY,
2352                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2353                 adj_unlock(ai);
2354
2355                 break;
2356             }
2357             case FIB_FORW_CHAIN_TYPE_BIER:
2358                 break;
2359             }
2360             break;
2361         case FIB_PATH_TYPE_RECURSIVE:
2362             switch (fct)
2363             {
2364             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2365             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2366             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2367             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2368             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2369             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2370             case FIB_FORW_CHAIN_TYPE_BIER:
2371                 fib_path_recursive_adj_update(path, fct, dpo);
2372                 break;
2373             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2374             case FIB_FORW_CHAIN_TYPE_NSH:
2375                 ASSERT(0);
2376                 break;
2377             }
2378             break;
2379         case FIB_PATH_TYPE_BIER_TABLE:
2380             switch (fct)
2381             {
2382             case FIB_FORW_CHAIN_TYPE_BIER:
2383                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2384                 break;
2385             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2386             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2387             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2388             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2389             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2390             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2391             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2392             case FIB_FORW_CHAIN_TYPE_NSH:
2393                 ASSERT(0);
2394                 break;
2395             }
2396             break;
2397         case FIB_PATH_TYPE_BIER_FMASK:
2398             switch (fct)
2399             {
2400             case FIB_FORW_CHAIN_TYPE_BIER:
2401                 fib_path_bier_fmask_update(path, dpo);
2402                 break;
2403             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2404             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2405             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2406             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2407             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2408             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2409             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2410             case FIB_FORW_CHAIN_TYPE_NSH:
2411                 ASSERT(0);
2412                 break;
2413             }
2414             break;
2415         case FIB_PATH_TYPE_BIER_IMP:
2416             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2417                                            fib_forw_chain_type_to_dpo_proto(fct),
2418                                            dpo);
2419             break;
2420         case FIB_PATH_TYPE_DEAG:
2421             switch (fct)
2422             {
2423             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2424                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2425                                                   DPO_PROTO_MPLS,
2426                                                   LOOKUP_UNICAST,
2427                                                   LOOKUP_INPUT_DST_ADDR,
2428                                                   LOOKUP_TABLE_FROM_CONFIG,
2429                                                   dpo);
2430                 break;
2431             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2432             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2433             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2434                 dpo_copy(dpo, &path->fp_dpo);
2435                 break;
2436             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2437             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2438             case FIB_FORW_CHAIN_TYPE_BIER:
2439                 break;
2440             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2441             case FIB_FORW_CHAIN_TYPE_NSH:
2442                 ASSERT(0);
2443                 break;
2444             }
2445             break;
2446         case FIB_PATH_TYPE_EXCLUSIVE:
2447             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2448             break;
2449         case FIB_PATH_TYPE_ATTACHED:
2450             switch (fct)
2451             {
2452             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2453             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2454             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2455             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2456             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2457             case FIB_FORW_CHAIN_TYPE_NSH:
2458             case FIB_FORW_CHAIN_TYPE_BIER:
2459                 {
2460                     adj_index_t ai;
2461
2462                     /*
2463                      * get a appropriate link type adj.
2464                      */
2465                     ai = fib_path_attached_get_adj(
2466                             path,
2467                             fib_forw_chain_type_to_link_type(fct));
2468                     dpo_set(dpo, DPO_ADJACENCY,
2469                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2470                     adj_unlock(ai);
2471                     break;
2472                 }
2473             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2474             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2475                 {
2476                     adj_index_t ai;
2477
2478                     /*
2479                      * Create the adj needed for sending IP multicast traffic
2480                      */
2481                     ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2482                                                fib_forw_chain_type_to_link_type(fct),
2483                                                path->attached.fp_interface);
2484                     dpo_set(dpo, DPO_ADJACENCY,
2485                             fib_forw_chain_type_to_dpo_proto(fct),
2486                             ai);
2487                     adj_unlock(ai);
2488                 }
2489                 break;
2490             }
2491             break;
2492         case FIB_PATH_TYPE_INTF_RX:
2493             /*
2494              * Create the adj needed for sending IP multicast traffic
2495              */
2496             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2497                                          path->attached.fp_interface,
2498                                          dpo);
2499             break;
2500         case FIB_PATH_TYPE_UDP_ENCAP:
2501             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2502                                             path->fp_nh_proto,
2503                                             dpo);
2504             break;
2505         case FIB_PATH_TYPE_RECEIVE:
2506         case FIB_PATH_TYPE_SPECIAL:
2507         case FIB_PATH_TYPE_DVR:
2508             dpo_copy(dpo, &path->fp_dpo);
2509             break;
2510         }
2511     }
2512 }
2513
2514 load_balance_path_t *
2515 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2516                                        fib_forward_chain_type_t fct,
2517                                        load_balance_path_t *hash_key)
2518 {
2519     load_balance_path_t *mnh;
2520     fib_path_t *path;
2521
2522     path = fib_path_get(path_index);
2523
2524     ASSERT(path);
2525
2526     vec_add2(hash_key, mnh, 1);
2527
2528     mnh->path_weight = path->fp_weight;
2529     mnh->path_index = path_index;
2530
2531     if (fib_path_is_resolved(path_index))
2532     {
2533         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2534     }
2535     else
2536     {
2537         dpo_copy(&mnh->path_dpo,
2538                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2539     }
2540     return (hash_key);
2541 }
2542
2543 int
2544 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2545 {
2546     fib_path_t *path;
2547
2548     path = fib_path_get(path_index);
2549
2550     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2551             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2552              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2553 }
2554
2555 int
2556 fib_path_is_exclusive (fib_node_index_t path_index)
2557 {
2558     fib_path_t *path;
2559
2560     path = fib_path_get(path_index);
2561
2562     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2563 }
2564
2565 int
2566 fib_path_is_deag (fib_node_index_t path_index)
2567 {
2568     fib_path_t *path;
2569
2570     path = fib_path_get(path_index);
2571
2572     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2573 }
2574
2575 int
2576 fib_path_is_resolved (fib_node_index_t path_index)
2577 {
2578     fib_path_t *path;
2579
2580     path = fib_path_get(path_index);
2581
2582     return (dpo_id_is_valid(&path->fp_dpo) &&
2583             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2584             !fib_path_is_looped(path_index) &&
2585             !fib_path_is_permanent_drop(path));
2586 }
2587
2588 int
2589 fib_path_is_looped (fib_node_index_t path_index)
2590 {
2591     fib_path_t *path;
2592
2593     path = fib_path_get(path_index);
2594
2595     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2596 }
2597
2598 fib_path_list_walk_rc_t
2599 fib_path_encode (fib_node_index_t path_list_index,
2600                  fib_node_index_t path_index,
2601                  void *ctx)
2602 {
2603     fib_route_path_encode_t **api_rpaths = ctx;
2604     fib_route_path_encode_t *api_rpath;
2605     fib_path_t *path;
2606
2607     path = fib_path_get(path_index);
2608     if (!path)
2609       return (FIB_PATH_LIST_WALK_CONTINUE);
2610     vec_add2(*api_rpaths, api_rpath, 1);
2611     api_rpath->rpath.frp_weight = path->fp_weight;
2612     api_rpath->rpath.frp_preference = path->fp_preference;
2613     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2614     api_rpath->rpath.frp_sw_if_index = ~0;
2615     api_rpath->rpath.frp_fib_index = 0;
2616     api_rpath->dpo = path->fp_dpo;
2617
2618     switch (path->fp_type)
2619       {
2620       case FIB_PATH_TYPE_RECEIVE:
2621         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2622         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2623         break;
2624       case FIB_PATH_TYPE_ATTACHED:
2625         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2626         break;
2627       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2628         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2629         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2630         break;
2631       case FIB_PATH_TYPE_BIER_FMASK:
2632         api_rpath->rpath.frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2633         break;
2634       case FIB_PATH_TYPE_SPECIAL:
2635         break;
2636       case FIB_PATH_TYPE_DEAG:
2637         api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2638         break;
2639       case FIB_PATH_TYPE_RECURSIVE:
2640         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2641         api_rpath->rpath.frp_fib_index = path->recursive.fp_tbl_id;
2642         break;
2643       case FIB_PATH_TYPE_DVR:
2644           api_rpath->rpath.frp_sw_if_index = path->dvr.fp_interface;
2645           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_DVR;
2646           break;
2647       case FIB_PATH_TYPE_UDP_ENCAP:
2648           api_rpath->rpath.frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2649           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2650           break;
2651       default:
2652         break;
2653       }
2654
2655     return (FIB_PATH_LIST_WALK_CONTINUE);
2656 }
2657
2658 dpo_proto_t
2659 fib_path_get_proto (fib_node_index_t path_index)
2660 {
2661     fib_path_t *path;
2662
2663     path = fib_path_get(path_index);
2664
2665     return (path->fp_nh_proto);
2666 }
2667
2668 void
2669 fib_path_module_init (void)
2670 {
2671     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2672     fib_path_logger = vlib_log_register_class ("fib", "path");
2673 }
2674
2675 static clib_error_t *
2676 show_fib_path_command (vlib_main_t * vm,
2677                         unformat_input_t * input,
2678                         vlib_cli_command_t * cmd)
2679 {
2680     fib_node_index_t pi;
2681     fib_path_t *path;
2682
2683     if (unformat (input, "%d", &pi))
2684     {
2685         /*
2686          * show one in detail
2687          */
2688         if (!pool_is_free_index(fib_path_pool, pi))
2689         {
2690             path = fib_path_get(pi);
2691             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2692                            FIB_PATH_FORMAT_FLAGS_NONE);
2693             s = format(s, "children:");
2694             s = fib_node_children_format(path->fp_node.fn_children, s);
2695             vlib_cli_output (vm, "%s", s);
2696             vec_free(s);
2697         }
2698         else
2699         {
2700             vlib_cli_output (vm, "path %d invalid", pi);
2701         }
2702     }
2703     else
2704     {
2705         vlib_cli_output (vm, "FIB Paths");
2706         pool_foreach_index (pi, fib_path_pool,
2707         ({
2708             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2709                              FIB_PATH_FORMAT_FLAGS_NONE);
2710         }));
2711     }
2712
2713     return (NULL);
2714 }
2715
2716 VLIB_CLI_COMMAND (show_fib_path, static) = {
2717   .path = "show fib paths",
2718   .function = show_fib_path_command,
2719   .short_help = "show fib paths",
2720 };