ipip: Multi-point interface
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/ip_null_dpo.h>
28 #include <vnet/dpo/classify_dpo.h>
29 #include <vnet/dpo/pw_cw.h>
30
31 #include <vnet/adj/adj.h>
32 #include <vnet/adj/adj_mcast.h>
33
34 #include <vnet/fib/fib_path.h>
35 #include <vnet/fib/fib_node.h>
36 #include <vnet/fib/fib_table.h>
37 #include <vnet/fib/fib_entry.h>
38 #include <vnet/fib/fib_path_list.h>
39 #include <vnet/fib/fib_internal.h>
40 #include <vnet/fib/fib_urpf_list.h>
41 #include <vnet/fib/mpls_fib.h>
42 #include <vnet/fib/fib_path_ext.h>
43 #include <vnet/udp/udp_encap.h>
44 #include <vnet/bier/bier_fmask.h>
45 #include <vnet/bier/bier_table.h>
46 #include <vnet/bier/bier_imp.h>
47 #include <vnet/bier/bier_disp_table.h>
48
49 /**
50  * Enurmeration of path types
51  */
52 typedef enum fib_path_type_t_ {
53     /**
54      * Marker. Add new types after this one.
55      */
56     FIB_PATH_TYPE_FIRST = 0,
57     /**
58      * Attached-nexthop. An interface and a nexthop are known.
59      */
60     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
61     /**
62      * attached. Only the interface is known.
63      */
64     FIB_PATH_TYPE_ATTACHED,
65     /**
66      * recursive. Only the next-hop is known.
67      */
68     FIB_PATH_TYPE_RECURSIVE,
69     /**
70      * special. nothing is known. so we drop.
71      */
72     FIB_PATH_TYPE_SPECIAL,
73     /**
74      * exclusive. user provided adj.
75      */
76     FIB_PATH_TYPE_EXCLUSIVE,
77     /**
78      * deag. Link to a lookup adj in the next table
79      */
80     FIB_PATH_TYPE_DEAG,
81     /**
82      * interface receive.
83      */
84     FIB_PATH_TYPE_INTF_RX,
85     /**
86      * Path resolves via a UDP encap object.
87      */
88     FIB_PATH_TYPE_UDP_ENCAP,
89     /**
90      * receive. it's for-us.
91      */
92     FIB_PATH_TYPE_RECEIVE,
93     /**
94      * bier-imp. it's via a BIER imposition.
95      */
96     FIB_PATH_TYPE_BIER_IMP,
97     /**
98      * bier-fmask. it's via a BIER ECMP-table.
99      */
100     FIB_PATH_TYPE_BIER_TABLE,
101     /**
102      * bier-fmask. it's via a BIER f-mask.
103      */
104     FIB_PATH_TYPE_BIER_FMASK,
105     /**
106      * via a DVR.
107      */
108     FIB_PATH_TYPE_DVR,
109     /**
110      * Marker. Add new types before this one, then update it.
111      */
112     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
113 } __attribute__ ((packed)) fib_path_type_t;
114
115 /**
116  * The maximum number of path_types
117  */
118 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
119
120 #define FIB_PATH_TYPES {                                        \
121     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
122     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
123     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
124     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
125     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
126     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
127     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
128     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
129     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
130     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
131     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
132     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
133     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
134 }
135
136 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
137     for (_item = FIB_PATH_TYPE_FIRST;           \
138          _item <= FIB_PATH_TYPE_LAST;           \
139          _item++)
140
141 /**
142  * Enurmeration of path operational (i.e. derived) attributes
143  */
144 typedef enum fib_path_oper_attribute_t_ {
145     /**
146      * Marker. Add new types after this one.
147      */
148     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
149     /**
150      * The path forms part of a recursive loop.
151      */
152     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
153     /**
154      * The path is resolved
155      */
156     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
157     /**
158      * The path is attached, despite what the next-hop may say.
159      */
160     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
161     /**
162      * The path has become a permanent drop.
163      */
164     FIB_PATH_OPER_ATTRIBUTE_DROP,
165     /**
166      * Marker. Add new types before this one, then update it.
167      */
168     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
169 } __attribute__ ((packed)) fib_path_oper_attribute_t;
170
171 /**
172  * The maximum number of path operational attributes
173  */
174 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
175
176 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
177     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
178     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
179     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
180 }
181
182 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
183     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
184          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
185          _item++)
186
187 /**
188  * Path flags from the attributes
189  */
190 typedef enum fib_path_oper_flags_t_ {
191     FIB_PATH_OPER_FLAG_NONE = 0,
192     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
193     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
194     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
195     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
196 } __attribute__ ((packed)) fib_path_oper_flags_t;
197
198 /**
199  * A FIB path
200  */
201 typedef struct fib_path_t_ {
202     /**
203      * A path is a node in the FIB graph.
204      */
205     fib_node_t fp_node;
206
207     /**
208      * The index of the path-list to which this path belongs
209      */
210     u32 fp_pl_index;
211
212     /**
213      * This marks the start of the memory area used to hash
214      * the path
215      */
216     STRUCT_MARK(path_hash_start);
217
218     /**
219      * Configuration Flags
220      */
221     fib_path_cfg_flags_t fp_cfg_flags;
222
223     /**
224      * The type of the path. This is the selector for the union
225      */
226     fib_path_type_t fp_type;
227
228     /**
229      * The protocol of the next-hop, i.e. the address family of the
230      * next-hop's address. We can't derive this from the address itself
231      * since the address can be all zeros
232      */
233     dpo_proto_t fp_nh_proto;
234
235     /**
236      * UCMP [unnormalised] weigth
237      */
238     u8 fp_weight;
239
240     /**
241      * A path preference. 0 is the best.
242      * Only paths of the best preference, that are 'up', are considered
243      * for forwarding.
244      */
245     u8 fp_preference;
246
247     /**
248      * per-type union of the data required to resolve the path
249      */
250     union {
251         struct {
252             /**
253              * The next-hop
254              */
255             ip46_address_t fp_nh;
256             /**
257              * The interface
258              */
259             u32 fp_interface;
260         } attached_next_hop;
261         struct {
262             /**
263              * The interface
264              */
265             u32 fp_interface;
266         } attached;
267         struct {
268             union
269             {
270                 /**
271                  * The next-hop
272                  */
273                 ip46_address_t fp_ip;
274                 struct {
275                     /**
276                      * The local label to resolve through.
277                      */
278                     mpls_label_t fp_local_label;
279                     /**
280                      * The EOS bit of the resolving label
281                      */
282                     mpls_eos_bit_t fp_eos;
283                 };
284             } fp_nh;
285             union {
286                 /**
287                  * The FIB table index in which to find the next-hop.
288                  */
289                 fib_node_index_t fp_tbl_id;
290                 /**
291                  * The BIER FIB the fmask is in
292                  */
293                 index_t fp_bier_fib;
294             };
295         } recursive;
296         struct {
297             /**
298              * BIER FMask ID
299              */
300             index_t fp_bier_fmask;
301         } bier_fmask;
302         struct {
303             /**
304              * The BIER table's ID
305              */
306             bier_table_id_t fp_bier_tbl;
307         } bier_table;
308         struct {
309             /**
310              * The BIER imposition object
311              * this is part of the path's key, since the index_t
312              * of an imposition object is the object's key.
313              */
314             index_t fp_bier_imp;
315         } bier_imp;
316         struct {
317             /**
318              * The FIB index in which to perfom the next lookup
319              */
320             fib_node_index_t fp_tbl_id;
321             /**
322              * The RPF-ID to tag the packets with
323              */
324             fib_rpf_id_t fp_rpf_id;
325         } deag;
326         struct {
327         } special;
328         struct {
329             /**
330              * The user provided 'exclusive' DPO
331              */
332             dpo_id_t fp_ex_dpo;
333         } exclusive;
334         struct {
335             /**
336              * The interface on which the local address is configured
337              */
338             u32 fp_interface;
339             /**
340              * The next-hop
341              */
342             ip46_address_t fp_addr;
343         } receive;
344         struct {
345             /**
346              * The interface on which the packets will be input.
347              */
348             u32 fp_interface;
349         } intf_rx;
350         struct {
351             /**
352              * The UDP Encap object this path resolves through
353              */
354             u32 fp_udp_encap_id;
355         } udp_encap;
356         struct {
357             /**
358              * The UDP Encap object this path resolves through
359              */
360             u32 fp_classify_table_id;
361         } classify;
362         struct {
363             /**
364              * The interface
365              */
366             u32 fp_interface;
367         } dvr;
368     };
369     STRUCT_MARK(path_hash_end);
370
371     /**
372      * Memebers in this last section represent information that is
373      * dervied during resolution. It should not be copied to new paths
374      * nor compared.
375      */
376
377     /**
378      * Operational Flags
379      */
380     fib_path_oper_flags_t fp_oper_flags;
381
382     union {
383         /**
384          * the resolving via fib. not part of the union, since it it not part
385          * of the path's hash.
386          */
387         fib_node_index_t fp_via_fib;
388         /**
389          * the resolving bier-table
390          */
391         index_t fp_via_bier_tbl;
392         /**
393          * the resolving bier-fmask
394          */
395         index_t fp_via_bier_fmask;
396     };
397
398     /**
399      * The Data-path objects through which this path resolves for IP.
400      */
401     dpo_id_t fp_dpo;
402
403     /**
404      * the index of this path in the parent's child list.
405      */
406     u32 fp_sibling;
407 } fib_path_t;
408
409 /*
410  * Array of strings/names for the path types and attributes
411  */
412 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
413 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
414 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
415
416 /*
417  * The memory pool from which we allocate all the paths
418  */
419 static fib_path_t *fib_path_pool;
420
421 /**
422  * the logger
423  */
424 vlib_log_class_t fib_path_logger;
425
426 /*
427  * Debug macro
428  */
429 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
430 {                                                                       \
431     vlib_log_debug (fib_path_logger,                                    \
432                     "[%U]: " _fmt,                                      \
433                     format_fib_path, fib_path_get_index(_p), 0,         \
434                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
435                     ##_args);                                           \
436 }
437
438 static fib_path_t *
439 fib_path_get (fib_node_index_t index)
440 {
441     return (pool_elt_at_index(fib_path_pool, index));
442 }
443
444 static fib_node_index_t 
445 fib_path_get_index (fib_path_t *path)
446 {
447     return (path - fib_path_pool);
448 }
449
450 static fib_node_t *
451 fib_path_get_node (fib_node_index_t index)
452 {
453     return ((fib_node_t*)fib_path_get(index));
454 }
455
456 static fib_path_t*
457 fib_path_from_fib_node (fib_node_t *node)
458 {
459     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
460     return ((fib_path_t*)node);
461 }
462
463 u8 *
464 format_fib_path (u8 * s, va_list * args)
465 {
466     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
467     u32 indent = va_arg (*args, u32);
468     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
469     vnet_main_t * vnm = vnet_get_main();
470     fib_path_oper_attribute_t oattr;
471     fib_path_cfg_attribute_t cattr;
472     fib_path_t *path;
473     const char *eol;
474
475     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
476     {
477         eol = "";
478     }
479     else
480     {
481         eol = "\n";
482     }
483
484     path = fib_path_get(path_index);
485
486     s = format (s, "%Upath:[%d] ", format_white_space, indent,
487                 fib_path_get_index(path));
488     s = format (s, "pl-index:%d ", path->fp_pl_index);
489     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
490     s = format (s, "weight=%d ", path->fp_weight);
491     s = format (s, "pref=%d ", path->fp_preference);
492     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
493     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
494         s = format(s, " oper-flags:");
495         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
496             if ((1<<oattr) & path->fp_oper_flags) {
497                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
498             }
499         }
500     }
501     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
502         s = format(s, " cfg-flags:");
503         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
504             if ((1<<cattr) & path->fp_cfg_flags) {
505                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
506             }
507         }
508     }
509     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
510         s = format(s, "\n%U", format_white_space, indent+2);
511
512     switch (path->fp_type)
513     {
514     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
515         s = format (s, "%U", format_ip46_address,
516                     &path->attached_next_hop.fp_nh,
517                     IP46_TYPE_ANY);
518         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
519         {
520             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
521         }
522         else
523         {
524             s = format (s, " %U",
525                         format_vnet_sw_interface_name,
526                         vnm,
527                         vnet_get_sw_interface(
528                             vnm,
529                             path->attached_next_hop.fp_interface));
530             if (vnet_sw_interface_is_p2p(vnet_get_main(),
531                                          path->attached_next_hop.fp_interface))
532             {
533                 s = format (s, " (p2p)");
534             }
535         }
536         if (!dpo_id_is_valid(&path->fp_dpo))
537         {
538             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
539         }
540         else
541         {
542             s = format(s, "%s%U%U", eol,
543                        format_white_space, indent,
544                        format_dpo_id,
545                        &path->fp_dpo, 13);
546         }
547         break;
548     case FIB_PATH_TYPE_ATTACHED:
549         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
550         {
551             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
552         }
553         else
554         {
555             s = format (s, " %U",
556                         format_vnet_sw_interface_name,
557                         vnm,
558                         vnet_get_sw_interface(
559                             vnm,
560                             path->attached.fp_interface));
561         }
562         break;
563     case FIB_PATH_TYPE_RECURSIVE:
564         if (DPO_PROTO_MPLS == path->fp_nh_proto)
565         {
566             s = format (s, "via %U %U",
567                         format_mpls_unicast_label,
568                         path->recursive.fp_nh.fp_local_label,
569                         format_mpls_eos_bit,
570                         path->recursive.fp_nh.fp_eos);
571         }
572         else
573         {
574             s = format (s, "via %U",
575                         format_ip46_address,
576                         &path->recursive.fp_nh.fp_ip,
577                         IP46_TYPE_ANY);
578         }
579         s = format (s, " in fib:%d",
580                     path->recursive.fp_tbl_id,
581                     path->fp_via_fib); 
582         s = format (s, " via-fib:%d", path->fp_via_fib); 
583         s = format (s, " via-dpo:[%U:%d]",
584                     format_dpo_type, path->fp_dpo.dpoi_type, 
585                     path->fp_dpo.dpoi_index);
586
587         break;
588     case FIB_PATH_TYPE_UDP_ENCAP:
589         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
590         break;
591     case FIB_PATH_TYPE_BIER_TABLE:
592         s = format (s, "via bier-table:[%U}",
593                     format_bier_table_id,
594                     &path->bier_table.fp_bier_tbl);
595         s = format (s, " via-dpo:[%U:%d]",
596                     format_dpo_type, path->fp_dpo.dpoi_type,
597                     path->fp_dpo.dpoi_index);
598         break;
599     case FIB_PATH_TYPE_BIER_FMASK:
600         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
601         s = format (s, " via-dpo:[%U:%d]",
602                     format_dpo_type, path->fp_dpo.dpoi_type, 
603                     path->fp_dpo.dpoi_index);
604         break;
605     case FIB_PATH_TYPE_BIER_IMP:
606         s = format (s, "via %U", format_bier_imp,
607                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
608         break;
609     case FIB_PATH_TYPE_DVR:
610         s = format (s, " %U",
611                     format_vnet_sw_interface_name,
612                     vnm,
613                     vnet_get_sw_interface(
614                         vnm,
615                         path->dvr.fp_interface));
616         break;
617     case FIB_PATH_TYPE_DEAG:
618         s = format (s, " %sfib-index:%d",
619                     (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?  "m" : ""),
620                     path->deag.fp_tbl_id);
621         break;
622     case FIB_PATH_TYPE_RECEIVE:
623     case FIB_PATH_TYPE_INTF_RX:
624     case FIB_PATH_TYPE_SPECIAL:
625     case FIB_PATH_TYPE_EXCLUSIVE:
626         if (dpo_id_is_valid(&path->fp_dpo))
627         {
628             s = format(s, "%U", format_dpo_id,
629                        &path->fp_dpo, indent+2);
630         }
631         break;
632     }
633     return (s);
634 }
635
636 /*
637  * fib_path_last_lock_gone
638  *
639  * We don't share paths, we share path lists, so the [un]lock functions
640  * are no-ops
641  */
642 static void
643 fib_path_last_lock_gone (fib_node_t *node)
644 {
645     ASSERT(0);
646 }
647
648 static void
649 fib_path_attached_next_hop_get_adj (fib_path_t *path,
650                                     vnet_link_t link,
651                                     dpo_id_t *dpo)
652 {
653     fib_protocol_t nh_proto;
654     adj_index_t ai;
655
656     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
657
658     if (vnet_sw_interface_is_p2p(vnet_get_main(),
659                                  path->attached_next_hop.fp_interface))
660     {
661         /*
662          * if the interface is p2p then the adj for the specific
663          * neighbour on that link will never exist. on p2p links
664          * the subnet address (the attached route) links to the
665          * auto-adj (see below), we want that adj here too.
666          */
667         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
668                                  path->attached_next_hop.fp_interface);
669     }
670     else
671     {
672         ai = adj_nbr_add_or_lock(nh_proto, link,
673                                  &path->attached_next_hop.fp_nh,
674                                  path->attached_next_hop.fp_interface);
675     }
676
677     dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
678     adj_unlock(ai);
679 }
680
681 static void
682 fib_path_attached_next_hop_set (fib_path_t *path)
683 {
684     /*
685      * resolve directly via the adjacency discribed by the
686      * interface and next-hop
687      */
688     fib_path_attached_next_hop_get_adj(path,
689                                        dpo_proto_to_link(path->fp_nh_proto),
690                                        &path->fp_dpo);
691
692     ASSERT(dpo_is_adj(&path->fp_dpo));
693
694     /*
695      * become a child of the adjacency so we receive updates
696      * when its rewrite changes
697      */
698     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
699                                      FIB_NODE_TYPE_PATH,
700                                      fib_path_get_index(path));
701
702     if (!vnet_sw_interface_is_up(vnet_get_main(),
703                                  path->attached_next_hop.fp_interface) ||
704         !adj_is_up(path->fp_dpo.dpoi_index))
705     {
706         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
707     }
708 }
709
710 static void
711 fib_path_attached_get_adj (fib_path_t *path,
712                            vnet_link_t link,
713                            dpo_id_t *dpo)
714 {
715     fib_protocol_t nh_proto;
716
717     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
718
719     if (vnet_sw_interface_is_p2p(vnet_get_main(),
720                                  path->attached.fp_interface))
721     {
722         /*
723          * point-2-point interfaces do not require a glean, since
724          * there is nothing to ARP. Install a rewrite/nbr adj instead
725          */
726         adj_index_t ai;
727
728         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
729                                  path->attached.fp_interface);
730
731         dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
732         adj_unlock(ai);
733     }
734     else if (vnet_sw_interface_is_nbma(vnet_get_main(),
735                                        path->attached.fp_interface))
736     {
737         dpo_copy(dpo, drop_dpo_get(path->fp_nh_proto));
738     }
739     else
740     {
741         adj_index_t ai;
742
743         ai = adj_glean_add_or_lock(nh_proto, link,
744                                    path->attached.fp_interface,
745                                    NULL);
746         dpo_set(dpo, DPO_ADJACENCY_GLEAN, vnet_link_to_dpo_proto(link), ai);
747         adj_unlock(ai);
748     }
749 }
750
751 /*
752  * create of update the paths recursive adj
753  */
754 static void
755 fib_path_recursive_adj_update (fib_path_t *path,
756                                fib_forward_chain_type_t fct,
757                                dpo_id_t *dpo)
758 {
759     dpo_id_t via_dpo = DPO_INVALID;
760
761     /*
762      * get the DPO to resolve through from the via-entry
763      */
764     fib_entry_contribute_forwarding(path->fp_via_fib,
765                                     fct,
766                                     &via_dpo);
767
768
769     /*
770      * hope for the best - clear if restrictions apply.
771      */
772     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
773
774     /*
775      * Validate any recursion constraints and over-ride the via
776      * adj if not met
777      */
778     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
779     {
780         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
781         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
782     }
783     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
784     {
785         /*
786          * the via FIB must be a host route.
787          * note the via FIB just added will always be a host route
788          * since it is an RR source added host route. So what we need to
789          * check is whether the route has other sources. If it does then
790          * some other source has added it as a host route. If it doesn't
791          * then it was added only here and inherits forwarding from a cover.
792          * the cover is not a host route.
793          * The RR source is the lowest priority source, so we check if it
794          * is the best. if it is there are no other sources.
795          */
796         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
797         {
798             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
799             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
800
801             /*
802              * PIC edge trigger. let the load-balance maps know
803              */
804             load_balance_map_path_state_change(fib_path_get_index(path));
805         }
806     }
807     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
808     {
809         /*
810          * RR source entries inherit the flags from the cover, so
811          * we can check the via directly
812          */
813         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
814         {
815             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
816             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
817
818             /*
819              * PIC edge trigger. let the load-balance maps know
820              */
821             load_balance_map_path_state_change(fib_path_get_index(path));
822         }
823     }
824     /*
825      * check for over-riding factors on the FIB entry itself
826      */
827     if (!fib_entry_is_resolved(path->fp_via_fib))
828     {
829         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
830         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
831
832         /*
833          * PIC edge trigger. let the load-balance maps know
834          */
835         load_balance_map_path_state_change(fib_path_get_index(path));
836     }
837
838     /*
839      * If this path is contributing a drop, then it's not resolved
840      */
841     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
842     {
843         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
844     }
845
846     /*
847      * update the path's contributed DPO
848      */
849     dpo_copy(dpo, &via_dpo);
850
851     FIB_PATH_DBG(path, "recursive update:");
852
853     dpo_reset(&via_dpo);
854 }
855
856 /*
857  * re-evaulate the forwarding state for a via fmask path
858  */
859 static void
860 fib_path_bier_fmask_update (fib_path_t *path,
861                             dpo_id_t *dpo)
862 {
863     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
864
865     /*
866      * if we are stakcing on the drop, then the path is not resolved
867      */
868     if (dpo_is_drop(dpo))
869     {
870         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
871     }
872     else
873     {
874         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
875     }
876 }
877
878 /*
879  * fib_path_is_permanent_drop
880  *
881  * Return !0 if the path is configured to permanently drop,
882  * despite other attributes.
883  */
884 static int
885 fib_path_is_permanent_drop (fib_path_t *path)
886 {
887     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
888             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
889 }
890
891 /*
892  * fib_path_unresolve
893  *
894  * Remove our dependency on the resolution target
895  */
896 static void
897 fib_path_unresolve (fib_path_t *path)
898 {
899     /*
900      * the forced drop path does not need unresolving
901      */
902     if (fib_path_is_permanent_drop(path))
903     {
904         return;
905     }
906
907     switch (path->fp_type)
908     {
909     case FIB_PATH_TYPE_RECURSIVE:
910         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
911         {
912             fib_entry_child_remove(path->fp_via_fib,
913                                    path->fp_sibling);
914             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
915                                            fib_entry_get_prefix(path->fp_via_fib),
916                                            FIB_SOURCE_RR);
917             fib_table_unlock(path->recursive.fp_tbl_id,
918                              dpo_proto_to_fib(path->fp_nh_proto),
919                              FIB_SOURCE_RR);
920             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
921         }
922         break;
923     case FIB_PATH_TYPE_BIER_FMASK:
924         bier_fmask_child_remove(path->fp_via_bier_fmask,
925                                 path->fp_sibling);
926         break;
927     case FIB_PATH_TYPE_BIER_IMP:
928         bier_imp_unlock(path->fp_dpo.dpoi_index);
929         break;
930     case FIB_PATH_TYPE_BIER_TABLE:
931         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
932         break;
933     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
934     case FIB_PATH_TYPE_ATTACHED:
935         if (dpo_is_adj(&path->fp_dpo))
936             adj_child_remove(path->fp_dpo.dpoi_index,
937                              path->fp_sibling);
938         break;
939     case FIB_PATH_TYPE_UDP_ENCAP:
940         udp_encap_unlock(path->fp_dpo.dpoi_index);
941         break;
942     case FIB_PATH_TYPE_EXCLUSIVE:
943         dpo_reset(&path->exclusive.fp_ex_dpo);
944         break;
945     case FIB_PATH_TYPE_SPECIAL:
946     case FIB_PATH_TYPE_RECEIVE:
947     case FIB_PATH_TYPE_INTF_RX:
948     case FIB_PATH_TYPE_DEAG:
949     case FIB_PATH_TYPE_DVR:
950         /*
951          * these hold only the path's DPO, which is reset below.
952          */
953         break;
954     }
955
956     /*
957      * release the adj we were holding and pick up the
958      * drop just in case.
959      */
960     dpo_reset(&path->fp_dpo);
961     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
962
963     return;
964 }
965
966 static fib_forward_chain_type_t
967 fib_path_to_chain_type (const fib_path_t *path)
968 {
969     if (DPO_PROTO_MPLS == path->fp_nh_proto)
970     {
971         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
972             MPLS_EOS == path->recursive.fp_nh.fp_eos)
973         {
974             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
975         }
976         else
977         {
978             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
979         }
980     }
981     else
982     {
983         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
984     }
985 }
986
987 /*
988  * fib_path_back_walk_notify
989  *
990  * A back walk has reach this path.
991  */
992 static fib_node_back_walk_rc_t
993 fib_path_back_walk_notify (fib_node_t *node,
994                            fib_node_back_walk_ctx_t *ctx)
995 {
996     fib_path_t *path;
997
998     path = fib_path_from_fib_node(node);
999
1000     FIB_PATH_DBG(path, "bw:%U",
1001                  format_fib_node_bw_reason, ctx->fnbw_reason);
1002
1003     switch (path->fp_type)
1004     {
1005     case FIB_PATH_TYPE_RECURSIVE:
1006         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1007         {
1008             /*
1009              * modify the recursive adjacency to use the new forwarding
1010              * of the via-fib.
1011              * this update is visible to packets in flight in the DP.
1012              */
1013             fib_path_recursive_adj_update(
1014                 path,
1015                 fib_path_to_chain_type(path),
1016                 &path->fp_dpo);
1017         }
1018         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1019             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1020         {
1021             /*
1022              * ADJ updates (complete<->incomplete) do not need to propagate to
1023              * recursive entries.
1024              * The only reason its needed as far back as here, is that the adj
1025              * and the incomplete adj are a different DPO type, so the LBs need
1026              * to re-stack.
1027              * If this walk was quashed in the fib_entry, then any non-fib_path
1028              * children (like tunnels that collapse out the LB when they stack)
1029              * would not see the update.
1030              */
1031             return (FIB_NODE_BACK_WALK_CONTINUE);
1032         }
1033         break;
1034     case FIB_PATH_TYPE_BIER_FMASK:
1035         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1036         {
1037             /*
1038              * update to use the BIER fmask's new forwading
1039              */
1040             fib_path_bier_fmask_update(path, &path->fp_dpo);
1041         }
1042         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1043             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1044         {
1045             /*
1046              * ADJ updates (complete<->incomplete) do not need to propagate to
1047              * recursive entries.
1048              * The only reason its needed as far back as here, is that the adj
1049              * and the incomplete adj are a different DPO type, so the LBs need
1050              * to re-stack.
1051              * If this walk was quashed in the fib_entry, then any non-fib_path
1052              * children (like tunnels that collapse out the LB when they stack)
1053              * would not see the update.
1054              */
1055             return (FIB_NODE_BACK_WALK_CONTINUE);
1056         }
1057         break;
1058     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1059         /*
1060 FIXME comment
1061          * ADJ_UPDATE backwalk pass silently through here and up to
1062          * the path-list when the multipath adj collapse occurs.
1063          * The reason we do this is that the assumtption is that VPP
1064          * runs in an environment where the Control-Plane is remote
1065          * and hence reacts slowly to link up down. In order to remove
1066          * this down link from the ECMP set quickly, we back-walk.
1067          * VPP also has dedicated CPUs, so we are not stealing resources
1068          * from the CP to do so.
1069          */
1070         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1071         {
1072             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1073             {
1074                 /*
1075                  * alreday resolved. no need to walk back again
1076                  */
1077                 return (FIB_NODE_BACK_WALK_CONTINUE);
1078             }
1079             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1080         }
1081         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1082         {
1083             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1084             {
1085                 /*
1086                  * alreday unresolved. no need to walk back again
1087                  */
1088                 return (FIB_NODE_BACK_WALK_CONTINUE);
1089             }
1090             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1091         }
1092         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1093         {
1094             /*
1095              * The interface this path resolves through has been deleted.
1096              * This will leave the path in a permanent drop state. The route
1097              * needs to be removed and readded (and hence the path-list deleted)
1098              * before it can forward again.
1099              */
1100             fib_path_unresolve(path);
1101             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1102         }
1103         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1104         {
1105             /*
1106              * restack the DPO to pick up the correct DPO sub-type
1107              */
1108             uword if_is_up;
1109
1110             if_is_up = vnet_sw_interface_is_up(
1111                            vnet_get_main(),
1112                            path->attached_next_hop.fp_interface);
1113
1114             fib_path_attached_next_hop_get_adj(
1115                 path,
1116                 dpo_proto_to_link(path->fp_nh_proto),
1117                 &path->fp_dpo);
1118
1119             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1120             if (if_is_up && adj_is_up(path->fp_dpo.dpoi_index))
1121             {
1122                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1123             }
1124
1125             if (!if_is_up)
1126             {
1127                 /*
1128                  * If the interface is not up there is no reason to walk
1129                  * back to children. if we did they would only evalute
1130                  * that this path is unresolved and hence it would
1131                  * not contribute the adjacency - so it would be wasted
1132                  * CPU time.
1133                  */
1134                 return (FIB_NODE_BACK_WALK_CONTINUE);
1135             }
1136         }
1137         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1138         {
1139             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1140             {
1141                 /*
1142                  * alreday unresolved. no need to walk back again
1143                  */
1144                 return (FIB_NODE_BACK_WALK_CONTINUE);
1145             }
1146             /*
1147              * the adj has gone down. the path is no longer resolved.
1148              */
1149             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1150         }
1151         break;
1152     case FIB_PATH_TYPE_ATTACHED:
1153     case FIB_PATH_TYPE_DVR:
1154         /*
1155          * FIXME; this could schedule a lower priority walk, since attached
1156          * routes are not usually in ECMP configurations so the backwalk to
1157          * the FIB entry does not need to be high priority
1158          */
1159         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1160         {
1161             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1162         }
1163         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1164         {
1165             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1166         }
1167         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1168         {
1169             fib_path_unresolve(path);
1170             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1171         }
1172         break;
1173     case FIB_PATH_TYPE_UDP_ENCAP:
1174     {
1175         dpo_id_t via_dpo = DPO_INVALID;
1176
1177         /*
1178          * hope for the best - clear if restrictions apply.
1179          */
1180         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1181
1182         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1183                                         path->fp_nh_proto,
1184                                         &via_dpo);
1185         /*
1186          * If this path is contributing a drop, then it's not resolved
1187          */
1188         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1189         {
1190             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1191         }
1192
1193         /*
1194          * update the path's contributed DPO
1195          */
1196         dpo_copy(&path->fp_dpo, &via_dpo);
1197         dpo_reset(&via_dpo);
1198         break;
1199     }
1200     case FIB_PATH_TYPE_INTF_RX:
1201         ASSERT(0);
1202     case FIB_PATH_TYPE_DEAG:
1203         /*
1204          * FIXME When VRF delete is allowed this will need a poke.
1205          */
1206     case FIB_PATH_TYPE_SPECIAL:
1207     case FIB_PATH_TYPE_RECEIVE:
1208     case FIB_PATH_TYPE_EXCLUSIVE:
1209     case FIB_PATH_TYPE_BIER_TABLE:
1210     case FIB_PATH_TYPE_BIER_IMP:
1211         /*
1212          * these path types have no parents. so to be
1213          * walked from one is unexpected.
1214          */
1215         ASSERT(0);
1216         break;
1217     }
1218
1219     /*
1220      * propagate the backwalk further to the path-list
1221      */
1222     fib_path_list_back_walk(path->fp_pl_index, ctx);
1223
1224     return (FIB_NODE_BACK_WALK_CONTINUE);
1225 }
1226
1227 static void
1228 fib_path_memory_show (void)
1229 {
1230     fib_show_memory_usage("Path",
1231                           pool_elts(fib_path_pool),
1232                           pool_len(fib_path_pool),
1233                           sizeof(fib_path_t));
1234 }
1235
1236 /*
1237  * The FIB path's graph node virtual function table
1238  */
1239 static const fib_node_vft_t fib_path_vft = {
1240     .fnv_get = fib_path_get_node,
1241     .fnv_last_lock = fib_path_last_lock_gone,
1242     .fnv_back_walk = fib_path_back_walk_notify,
1243     .fnv_mem_show = fib_path_memory_show,
1244 };
1245
1246 static fib_path_cfg_flags_t
1247 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1248 {
1249     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1250
1251     if (rpath->frp_flags & FIB_ROUTE_PATH_POP_PW_CW)
1252         cfg_flags |= FIB_PATH_CFG_FLAG_POP_PW_CW;
1253     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1254         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1255     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1256         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1257     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1258         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1259     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1260         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1261     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1262         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1263     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1264         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1265     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1266         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1267     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1268         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1269     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1270         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1271     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_UNREACH)
1272         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_UNREACH;
1273     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_PROHIBIT)
1274         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_PROHIBIT;
1275
1276     return (cfg_flags);
1277 }
1278
1279 /*
1280  * fib_path_create
1281  *
1282  * Create and initialise a new path object.
1283  * return the index of the path.
1284  */
1285 fib_node_index_t
1286 fib_path_create (fib_node_index_t pl_index,
1287                  const fib_route_path_t *rpath)
1288 {
1289     fib_path_t *path;
1290
1291     pool_get(fib_path_pool, path);
1292     clib_memset(path, 0, sizeof(*path));
1293
1294     fib_node_init(&path->fp_node,
1295                   FIB_NODE_TYPE_PATH);
1296
1297     dpo_reset(&path->fp_dpo);
1298     path->fp_pl_index = pl_index;
1299     path->fp_nh_proto = rpath->frp_proto;
1300     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1301     path->fp_weight = rpath->frp_weight;
1302     if (0 == path->fp_weight)
1303     {
1304         /*
1305          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1306          * clients to always use 1, or we can accept it and fixup approrpiately.
1307          */
1308         path->fp_weight = 1;
1309     }
1310     path->fp_preference = rpath->frp_preference;
1311     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1312
1313     /*
1314      * deduce the path's tpye from the parementers and save what is needed.
1315      */
1316     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1317     {
1318         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1319         path->receive.fp_interface = rpath->frp_sw_if_index;
1320         path->receive.fp_addr = rpath->frp_addr;
1321     }
1322     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1323     {
1324         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1325         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1326     }
1327     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1328     {
1329         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1330         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1331     }
1332     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1333     {
1334         path->fp_type = FIB_PATH_TYPE_DEAG;
1335         path->deag.fp_tbl_id = rpath->frp_fib_index;
1336         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1337     }
1338     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1339     {
1340         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1341         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1342     }
1343     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1344     {
1345         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1346         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1347     }
1348     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1349     {
1350         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1351         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1352     }
1353     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1354     {
1355         path->fp_type = FIB_PATH_TYPE_DEAG;
1356         path->deag.fp_tbl_id = rpath->frp_fib_index;
1357     }
1358     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1359     {
1360         path->fp_type = FIB_PATH_TYPE_DVR;
1361         path->dvr.fp_interface = rpath->frp_sw_if_index;
1362     }
1363     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1364     {
1365         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1366         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1367     }
1368     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT) ||
1369         (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH))
1370     {
1371         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1372     }
1373     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY))
1374     {
1375         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1376         path->classify.fp_classify_table_id = rpath->frp_classify_table_id;
1377     }
1378     else if (~0 != rpath->frp_sw_if_index)
1379     {
1380         if (ip46_address_is_zero(&rpath->frp_addr))
1381         {
1382             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1383             path->attached.fp_interface = rpath->frp_sw_if_index;
1384         }
1385         else
1386         {
1387             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1388             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1389             path->attached_next_hop.fp_nh = rpath->frp_addr;
1390         }
1391     }
1392     else
1393     {
1394         if (ip46_address_is_zero(&rpath->frp_addr))
1395         {
1396             if (~0 == rpath->frp_fib_index)
1397             {
1398                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1399             }
1400             else
1401             {
1402                 path->fp_type = FIB_PATH_TYPE_DEAG;
1403                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1404                 path->deag.fp_rpf_id = ~0;
1405             }
1406         }
1407         else
1408         {
1409             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1410             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1411             {
1412                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1413                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1414             }
1415             else
1416             {
1417                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1418             }
1419             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1420         }
1421     }
1422
1423     FIB_PATH_DBG(path, "create");
1424
1425     return (fib_path_get_index(path));
1426 }
1427
1428 /*
1429  * fib_path_create_special
1430  *
1431  * Create and initialise a new path object.
1432  * return the index of the path.
1433  */
1434 fib_node_index_t
1435 fib_path_create_special (fib_node_index_t pl_index,
1436                          dpo_proto_t nh_proto,
1437                          fib_path_cfg_flags_t flags,
1438                          const dpo_id_t *dpo)
1439 {
1440     fib_path_t *path;
1441
1442     pool_get(fib_path_pool, path);
1443     clib_memset(path, 0, sizeof(*path));
1444
1445     fib_node_init(&path->fp_node,
1446                   FIB_NODE_TYPE_PATH);
1447     dpo_reset(&path->fp_dpo);
1448
1449     path->fp_pl_index = pl_index;
1450     path->fp_weight = 1;
1451     path->fp_preference = 0;
1452     path->fp_nh_proto = nh_proto;
1453     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1454     path->fp_cfg_flags = flags;
1455
1456     if (FIB_PATH_CFG_FLAG_DROP & flags)
1457     {
1458         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1459     }
1460     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1461     {
1462         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1463         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1464     }
1465     else
1466     {
1467         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1468         ASSERT(NULL != dpo);
1469         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1470     }
1471
1472     return (fib_path_get_index(path));
1473 }
1474
1475 /*
1476  * fib_path_copy
1477  *
1478  * Copy a path. return index of new path.
1479  */
1480 fib_node_index_t
1481 fib_path_copy (fib_node_index_t path_index,
1482                fib_node_index_t path_list_index)
1483 {
1484     fib_path_t *path, *orig_path;
1485
1486     pool_get(fib_path_pool, path);
1487
1488     orig_path = fib_path_get(path_index);
1489     ASSERT(NULL != orig_path);
1490
1491     clib_memcpy(path, orig_path, sizeof(*path));
1492
1493     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1494
1495     /*
1496      * reset the dynamic section
1497      */
1498     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1499     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1500     path->fp_pl_index  = path_list_index;
1501     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1502     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1503     dpo_reset(&path->fp_dpo);
1504
1505     return (fib_path_get_index(path));
1506 }
1507
1508 /*
1509  * fib_path_destroy
1510  *
1511  * destroy a path that is no longer required
1512  */
1513 void
1514 fib_path_destroy (fib_node_index_t path_index)
1515 {
1516     fib_path_t *path;
1517
1518     path = fib_path_get(path_index);
1519
1520     ASSERT(NULL != path);
1521     FIB_PATH_DBG(path, "destroy");
1522
1523     fib_path_unresolve(path);
1524
1525     fib_node_deinit(&path->fp_node);
1526     pool_put(fib_path_pool, path);
1527 }
1528
1529 /*
1530  * fib_path_destroy
1531  *
1532  * destroy a path that is no longer required
1533  */
1534 uword
1535 fib_path_hash (fib_node_index_t path_index)
1536 {
1537     fib_path_t *path;
1538
1539     path = fib_path_get(path_index);
1540
1541     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1542                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1543                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1544                         0));
1545 }
1546
1547 /*
1548  * fib_path_cmp_i
1549  *
1550  * Compare two paths for equivalence.
1551  */
1552 static int
1553 fib_path_cmp_i (const fib_path_t *path1,
1554                 const fib_path_t *path2)
1555 {
1556     int res;
1557
1558     res = 1;
1559
1560     /*
1561      * paths of different types and protocol are not equal.
1562      * different weights and/or preference only are the same path.
1563      */
1564     if (path1->fp_type != path2->fp_type)
1565     {
1566         res = (path1->fp_type - path2->fp_type);
1567     }
1568     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1569     {
1570         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1571     }
1572     else
1573     {
1574         /*
1575          * both paths are of the same type.
1576          * consider each type and its attributes in turn.
1577          */
1578         switch (path1->fp_type)
1579         {
1580         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1581             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1582                                    &path2->attached_next_hop.fp_nh);
1583             if (0 == res) {
1584                 res = (path1->attached_next_hop.fp_interface -
1585                        path2->attached_next_hop.fp_interface);
1586             }
1587             break;
1588         case FIB_PATH_TYPE_ATTACHED:
1589             res = (path1->attached.fp_interface -
1590                    path2->attached.fp_interface);
1591             break;
1592         case FIB_PATH_TYPE_RECURSIVE:
1593             res = ip46_address_cmp(&path1->recursive.fp_nh.fp_ip,
1594                                    &path2->recursive.fp_nh.fp_ip);
1595  
1596             if (0 == res)
1597             {
1598                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1599             }
1600             break;
1601         case FIB_PATH_TYPE_BIER_FMASK:
1602             res = (path1->bier_fmask.fp_bier_fmask -
1603                    path2->bier_fmask.fp_bier_fmask);
1604             break;
1605         case FIB_PATH_TYPE_BIER_IMP:
1606             res = (path1->bier_imp.fp_bier_imp -
1607                    path2->bier_imp.fp_bier_imp);
1608             break;
1609         case FIB_PATH_TYPE_BIER_TABLE:
1610             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1611                                     &path2->bier_table.fp_bier_tbl);
1612             break;
1613         case FIB_PATH_TYPE_DEAG:
1614             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1615             if (0 == res)
1616             {
1617                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1618             }
1619             break;
1620         case FIB_PATH_TYPE_INTF_RX:
1621             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1622             break;
1623         case FIB_PATH_TYPE_UDP_ENCAP:
1624             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1625             break;
1626         case FIB_PATH_TYPE_DVR:
1627             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1628             break;
1629         case FIB_PATH_TYPE_EXCLUSIVE:
1630             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1631             break;
1632         case FIB_PATH_TYPE_SPECIAL:
1633         case FIB_PATH_TYPE_RECEIVE:
1634             res = 0;
1635             break;
1636         }
1637     }
1638     return (res);
1639 }
1640
1641 /*
1642  * fib_path_cmp_for_sort
1643  *
1644  * Compare two paths for equivalence. Used during path sorting.
1645  * As usual 0 means equal.
1646  */
1647 int
1648 fib_path_cmp_for_sort (void * v1,
1649                        void * v2)
1650 {
1651     fib_node_index_t *pi1 = v1, *pi2 = v2;
1652     fib_path_t *path1, *path2;
1653
1654     path1 = fib_path_get(*pi1);
1655     path2 = fib_path_get(*pi2);
1656
1657     /*
1658      * when sorting paths we want the highest preference paths
1659      * first, so that the choices set built is in prefernce order
1660      */
1661     if (path1->fp_preference != path2->fp_preference)
1662     {
1663         return (path1->fp_preference - path2->fp_preference);
1664     }
1665
1666     return (fib_path_cmp_i(path1, path2));
1667 }
1668
1669 /*
1670  * fib_path_cmp
1671  *
1672  * Compare two paths for equivalence.
1673  */
1674 int
1675 fib_path_cmp (fib_node_index_t pi1,
1676               fib_node_index_t pi2)
1677 {
1678     fib_path_t *path1, *path2;
1679
1680     path1 = fib_path_get(pi1);
1681     path2 = fib_path_get(pi2);
1682
1683     return (fib_path_cmp_i(path1, path2));
1684 }
1685
1686 int
1687 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1688                            const fib_route_path_t *rpath)
1689 {
1690     fib_path_t *path;
1691     int res;
1692
1693     path = fib_path_get(path_index);
1694
1695     res = 1;
1696
1697     if (path->fp_weight != rpath->frp_weight)
1698     {
1699         res = (path->fp_weight - rpath->frp_weight);
1700     }
1701     else
1702     {
1703         /*
1704          * both paths are of the same type.
1705          * consider each type and its attributes in turn.
1706          */
1707         switch (path->fp_type)
1708         {
1709         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1710             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1711                                    &rpath->frp_addr);
1712             if (0 == res)
1713             {
1714                 res = (path->attached_next_hop.fp_interface -
1715                        rpath->frp_sw_if_index);
1716             }
1717             break;
1718         case FIB_PATH_TYPE_ATTACHED:
1719             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1720             break;
1721         case FIB_PATH_TYPE_RECURSIVE:
1722             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1723             {
1724                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1725
1726                 if (res == 0)
1727                 {
1728                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1729                 }
1730             }
1731             else
1732             {
1733                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1734                                        &rpath->frp_addr);
1735             }
1736
1737             if (0 == res)
1738             {
1739                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1740             }
1741             break;
1742         case FIB_PATH_TYPE_BIER_FMASK:
1743             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1744             break;
1745         case FIB_PATH_TYPE_BIER_IMP:
1746             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1747             break;
1748         case FIB_PATH_TYPE_BIER_TABLE:
1749             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1750                                     &rpath->frp_bier_tbl);
1751             break;
1752         case FIB_PATH_TYPE_INTF_RX:
1753             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1754             break;
1755         case FIB_PATH_TYPE_UDP_ENCAP:
1756             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1757             break;
1758         case FIB_PATH_TYPE_DEAG:
1759             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1760             if (0 == res)
1761             {
1762                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1763             }
1764             break;
1765         case FIB_PATH_TYPE_DVR:
1766             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1767             break;
1768         case FIB_PATH_TYPE_EXCLUSIVE:
1769             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1770             break;
1771         case FIB_PATH_TYPE_RECEIVE:
1772             if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1773             {
1774                 res = 0;
1775             }
1776             else
1777             {
1778                 res = 1;
1779             }
1780             break;
1781         case FIB_PATH_TYPE_SPECIAL:
1782             res = 0;
1783             break;
1784         }
1785     }
1786     return (res);
1787 }
1788
1789 /*
1790  * fib_path_recursive_loop_detect
1791  *
1792  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1793  * walk is initiated when an entry is linking to a new path list or from an old.
1794  * The entry vector passed contains all the FIB entrys that are children of this
1795  * path (it is all the entries encountered on the walk so far). If this vector
1796  * contains the entry this path resolve via, then a loop is about to form.
1797  * The loop must be allowed to form, since we need the dependencies in place
1798  * so that we can track when the loop breaks.
1799  * However, we MUST not produce a loop in the forwarding graph (else packets
1800  * would loop around the switch path until the loop breaks), so we mark recursive
1801  * paths as looped so that they do not contribute forwarding information.
1802  * By marking the path as looped, an etry such as;
1803  *    X/Y
1804  *     via a.a.a.a (looped)
1805  *     via b.b.b.b (not looped)
1806  * can still forward using the info provided by b.b.b.b only
1807  */
1808 int
1809 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1810                                 fib_node_index_t **entry_indicies)
1811 {
1812     fib_path_t *path;
1813
1814     path = fib_path_get(path_index);
1815
1816     /*
1817      * the forced drop path is never looped, cos it is never resolved.
1818      */
1819     if (fib_path_is_permanent_drop(path))
1820     {
1821         return (0);
1822     }
1823
1824     switch (path->fp_type)
1825     {
1826     case FIB_PATH_TYPE_RECURSIVE:
1827     {
1828         fib_node_index_t *entry_index, *entries;
1829         int looped = 0;
1830         entries = *entry_indicies;
1831
1832         vec_foreach(entry_index, entries) {
1833             if (*entry_index == path->fp_via_fib)
1834             {
1835                 /*
1836                  * the entry that is about to link to this path-list (or
1837                  * one of this path-list's children) is the same entry that
1838                  * this recursive path resolves through. this is a cycle.
1839                  * abort the walk.
1840                  */
1841                 looped = 1;
1842                 break;
1843             }
1844         }
1845
1846         if (looped)
1847         {
1848             FIB_PATH_DBG(path, "recursive loop formed");
1849             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1850
1851             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1852         }
1853         else
1854         {
1855             /*
1856              * no loop here yet. keep forward walking the graph.
1857              */
1858             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1859             {
1860                 FIB_PATH_DBG(path, "recursive loop formed");
1861                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1862             }
1863             else
1864             {
1865                 FIB_PATH_DBG(path, "recursive loop cleared");
1866                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1867             }
1868         }
1869         break;
1870     }
1871     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1872     case FIB_PATH_TYPE_ATTACHED:
1873         if (dpo_is_adj(&path->fp_dpo) &&
1874             adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1875                                       entry_indicies))
1876         {
1877             FIB_PATH_DBG(path, "recursive loop formed");
1878             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1879         }
1880         else
1881         {
1882             FIB_PATH_DBG(path, "recursive loop cleared");
1883             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1884         }
1885         break;
1886     case FIB_PATH_TYPE_SPECIAL:
1887     case FIB_PATH_TYPE_DEAG:
1888     case FIB_PATH_TYPE_DVR:
1889     case FIB_PATH_TYPE_RECEIVE:
1890     case FIB_PATH_TYPE_INTF_RX:
1891     case FIB_PATH_TYPE_UDP_ENCAP:
1892     case FIB_PATH_TYPE_EXCLUSIVE:
1893     case FIB_PATH_TYPE_BIER_FMASK:
1894     case FIB_PATH_TYPE_BIER_TABLE:
1895     case FIB_PATH_TYPE_BIER_IMP:
1896         /*
1897          * these path types cannot be part of a loop, since they are the leaves
1898          * of the graph.
1899          */
1900         break;
1901     }
1902
1903     return (fib_path_is_looped(path_index));
1904 }
1905
1906 int
1907 fib_path_resolve (fib_node_index_t path_index)
1908 {
1909     fib_path_t *path;
1910
1911     path = fib_path_get(path_index);
1912
1913     /*
1914      * hope for the best.
1915      */
1916     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1917
1918     /*
1919      * the forced drop path resolves via the drop adj
1920      */
1921     if (fib_path_is_permanent_drop(path))
1922     {
1923         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1924         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1925         return (fib_path_is_resolved(path_index));
1926     }
1927
1928     switch (path->fp_type)
1929     {
1930     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1931         fib_path_attached_next_hop_set(path);
1932         break;
1933     case FIB_PATH_TYPE_ATTACHED:
1934     {
1935         dpo_id_t tmp = DPO_INVALID;
1936
1937         /*
1938          * path->attached.fp_interface
1939          */
1940         if (!vnet_sw_interface_is_up(vnet_get_main(),
1941                                      path->attached.fp_interface))
1942         {
1943             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1944         }
1945         fib_path_attached_get_adj(path,
1946                                   dpo_proto_to_link(path->fp_nh_proto),
1947                                   &tmp);
1948
1949         /*
1950          * re-fetch after possible mem realloc
1951          */
1952         path = fib_path_get(path_index);
1953         dpo_copy(&path->fp_dpo, &tmp);
1954
1955         /*
1956          * become a child of the adjacency so we receive updates
1957          * when the interface state changes
1958          */
1959         if (dpo_is_adj(&path->fp_dpo))
1960         {
1961             path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1962                                              FIB_NODE_TYPE_PATH,
1963                                              fib_path_get_index(path));
1964         }
1965         dpo_reset(&tmp);
1966         break;
1967     }
1968     case FIB_PATH_TYPE_RECURSIVE:
1969     {
1970         /*
1971          * Create a RR source entry in the table for the address
1972          * that this path recurses through.
1973          * This resolve action is recursive, hence we may create
1974          * more paths in the process. more creates mean maybe realloc
1975          * of this path.
1976          */
1977         fib_node_index_t fei;
1978         fib_prefix_t pfx;
1979
1980         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1981
1982         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1983         {
1984             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1985                                        path->recursive.fp_nh.fp_eos,
1986                                        &pfx);
1987         }
1988         else
1989         {
1990             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1991         }
1992
1993         fib_table_lock(path->recursive.fp_tbl_id,
1994                        dpo_proto_to_fib(path->fp_nh_proto),
1995                        FIB_SOURCE_RR);
1996         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1997                                           &pfx,
1998                                           FIB_SOURCE_RR,
1999                                           FIB_ENTRY_FLAG_NONE);
2000
2001         path = fib_path_get(path_index);
2002         path->fp_via_fib = fei;
2003
2004         /*
2005          * become a dependent child of the entry so the path is 
2006          * informed when the forwarding for the entry changes.
2007          */
2008         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
2009                                                FIB_NODE_TYPE_PATH,
2010                                                fib_path_get_index(path));
2011
2012         /*
2013          * create and configure the IP DPO
2014          */
2015         fib_path_recursive_adj_update(
2016             path,
2017             fib_path_to_chain_type(path),
2018             &path->fp_dpo);
2019
2020         break;
2021     }
2022     case FIB_PATH_TYPE_BIER_FMASK:
2023     {
2024         /*
2025          * become a dependent child of the entry so the path is
2026          * informed when the forwarding for the entry changes.
2027          */
2028         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
2029                                                 FIB_NODE_TYPE_PATH,
2030                                                 fib_path_get_index(path));
2031
2032         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
2033         fib_path_bier_fmask_update(path, &path->fp_dpo);
2034
2035         break;
2036     }
2037     case FIB_PATH_TYPE_BIER_IMP:
2038         bier_imp_lock(path->bier_imp.fp_bier_imp);
2039         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2040                                        DPO_PROTO_IP4,
2041                                        &path->fp_dpo);
2042         break;
2043     case FIB_PATH_TYPE_BIER_TABLE:
2044     {
2045         /*
2046          * Find/create the BIER table to link to
2047          */
2048         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
2049
2050         path->fp_via_bier_tbl =
2051             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
2052
2053         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
2054                                          &path->fp_dpo);
2055         break;
2056     }
2057     case FIB_PATH_TYPE_SPECIAL:
2058         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2059         {
2060             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2061                                       IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
2062                                       &path->fp_dpo);
2063         }
2064         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2065         {
2066             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2067                                       IP_NULL_ACTION_SEND_ICMP_UNREACH,
2068                                       &path->fp_dpo);
2069         }
2070         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY)
2071         {
2072             dpo_set (&path->fp_dpo, DPO_CLASSIFY,
2073                      path->fp_nh_proto,
2074                      classify_dpo_create (path->fp_nh_proto,
2075                                           path->classify.fp_classify_table_id));
2076         }
2077         else
2078         {
2079             /*
2080              * Resolve via the drop
2081              */
2082             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
2083         }
2084         break;
2085     case FIB_PATH_TYPE_DEAG:
2086     {
2087         if (DPO_PROTO_BIER == path->fp_nh_proto)
2088         {
2089             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2090                                                   &path->fp_dpo);
2091         }
2092         else
2093         {
2094             /*
2095              * Resolve via a lookup DPO.
2096              * FIXME. control plane should add routes with a table ID
2097              */
2098             lookup_input_t input;
2099             lookup_cast_t cast;
2100
2101             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2102                     LOOKUP_MULTICAST :
2103                     LOOKUP_UNICAST);
2104             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2105                      LOOKUP_INPUT_SRC_ADDR :
2106                      LOOKUP_INPUT_DST_ADDR);
2107
2108             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2109                                                path->fp_nh_proto,
2110                                                cast,
2111                                                input,
2112                                                LOOKUP_TABLE_FROM_CONFIG,
2113                                                &path->fp_dpo);
2114         }
2115         break;
2116     }
2117     case FIB_PATH_TYPE_DVR:
2118         dvr_dpo_add_or_lock(path->attached.fp_interface,
2119                             path->fp_nh_proto,
2120                             &path->fp_dpo);
2121         break;
2122     case FIB_PATH_TYPE_RECEIVE:
2123         /*
2124          * Resolve via a receive DPO.
2125          */
2126         receive_dpo_add_or_lock(path->fp_nh_proto,
2127                                 path->receive.fp_interface,
2128                                 &path->receive.fp_addr,
2129                                 &path->fp_dpo);
2130         break;
2131     case FIB_PATH_TYPE_UDP_ENCAP:
2132         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2133         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2134                                         path->fp_nh_proto,
2135                                         &path->fp_dpo);
2136         break;
2137     case FIB_PATH_TYPE_INTF_RX: {
2138         /*
2139          * Resolve via a receive DPO.
2140          */
2141         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2142                                      path->intf_rx.fp_interface,
2143                                      &path->fp_dpo);
2144         break;
2145     }
2146     case FIB_PATH_TYPE_EXCLUSIVE:
2147         /*
2148          * Resolve via the user provided DPO
2149          */
2150         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2151         break;
2152     }
2153
2154     return (fib_path_is_resolved(path_index));
2155 }
2156
2157 u32
2158 fib_path_get_resolving_interface (fib_node_index_t path_index)
2159 {
2160     fib_path_t *path;
2161
2162     path = fib_path_get(path_index);
2163
2164     switch (path->fp_type)
2165     {
2166     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2167         return (path->attached_next_hop.fp_interface);
2168     case FIB_PATH_TYPE_ATTACHED:
2169         return (path->attached.fp_interface);
2170     case FIB_PATH_TYPE_RECEIVE:
2171         return (path->receive.fp_interface);
2172     case FIB_PATH_TYPE_RECURSIVE:
2173         if (fib_path_is_resolved(path_index))
2174         {
2175             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2176         }
2177         break;
2178     case FIB_PATH_TYPE_DVR:
2179         return (path->dvr.fp_interface);
2180     case FIB_PATH_TYPE_INTF_RX:
2181     case FIB_PATH_TYPE_UDP_ENCAP:
2182     case FIB_PATH_TYPE_SPECIAL:
2183     case FIB_PATH_TYPE_DEAG:
2184     case FIB_PATH_TYPE_EXCLUSIVE:
2185     case FIB_PATH_TYPE_BIER_FMASK:
2186     case FIB_PATH_TYPE_BIER_TABLE:
2187     case FIB_PATH_TYPE_BIER_IMP:
2188         break;
2189     }
2190     return (dpo_get_urpf(&path->fp_dpo));
2191 }
2192
2193 index_t
2194 fib_path_get_resolving_index (fib_node_index_t path_index)
2195 {
2196     fib_path_t *path;
2197
2198     path = fib_path_get(path_index);
2199
2200     switch (path->fp_type)
2201     {
2202     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2203     case FIB_PATH_TYPE_ATTACHED:
2204     case FIB_PATH_TYPE_RECEIVE:
2205     case FIB_PATH_TYPE_INTF_RX:
2206     case FIB_PATH_TYPE_SPECIAL:
2207     case FIB_PATH_TYPE_DEAG:
2208     case FIB_PATH_TYPE_DVR:
2209     case FIB_PATH_TYPE_EXCLUSIVE:
2210         break;
2211     case FIB_PATH_TYPE_UDP_ENCAP:
2212         return (path->udp_encap.fp_udp_encap_id);
2213     case FIB_PATH_TYPE_RECURSIVE:
2214         return (path->fp_via_fib);
2215     case FIB_PATH_TYPE_BIER_FMASK:
2216         return (path->bier_fmask.fp_bier_fmask);
2217    case FIB_PATH_TYPE_BIER_TABLE:
2218        return (path->fp_via_bier_tbl);
2219    case FIB_PATH_TYPE_BIER_IMP:
2220        return (path->bier_imp.fp_bier_imp);
2221     }
2222     return (~0);
2223 }
2224
2225 adj_index_t
2226 fib_path_get_adj (fib_node_index_t path_index)
2227 {
2228     fib_path_t *path;
2229
2230     path = fib_path_get(path_index);
2231
2232     if (dpo_is_adj(&path->fp_dpo))
2233     {
2234         return (path->fp_dpo.dpoi_index);
2235     }
2236     return (ADJ_INDEX_INVALID);
2237 }
2238
2239 u16
2240 fib_path_get_weight (fib_node_index_t path_index)
2241 {
2242     fib_path_t *path;
2243
2244     path = fib_path_get(path_index);
2245
2246     ASSERT(path);
2247
2248     return (path->fp_weight);
2249 }
2250
2251 u16
2252 fib_path_get_preference (fib_node_index_t path_index)
2253 {
2254     fib_path_t *path;
2255
2256     path = fib_path_get(path_index);
2257
2258     ASSERT(path);
2259
2260     return (path->fp_preference);
2261 }
2262
2263 u32
2264 fib_path_get_rpf_id (fib_node_index_t path_index)
2265 {
2266     fib_path_t *path;
2267
2268     path = fib_path_get(path_index);
2269
2270     ASSERT(path);
2271
2272     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2273     {
2274         return (path->deag.fp_rpf_id);
2275     }
2276
2277     return (~0);
2278 }
2279
2280 /**
2281  * @brief Contribute the path's adjacency to the list passed.
2282  * By calling this function over all paths, recursively, a child
2283  * can construct its full set of forwarding adjacencies, and hence its
2284  * uRPF list.
2285  */
2286 void
2287 fib_path_contribute_urpf (fib_node_index_t path_index,
2288                           index_t urpf)
2289 {
2290     fib_path_t *path;
2291
2292     path = fib_path_get(path_index);
2293
2294     /*
2295      * resolved and unresolved paths contribute to the RPF list.
2296      */
2297     switch (path->fp_type)
2298     {
2299     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2300         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2301         break;
2302
2303     case FIB_PATH_TYPE_ATTACHED:
2304         fib_urpf_list_append(urpf, path->attached.fp_interface);
2305         break;
2306
2307     case FIB_PATH_TYPE_RECURSIVE:
2308         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2309             !fib_path_is_looped(path_index))
2310         {
2311             /*
2312              * there's unresolved due to constraints, and there's unresolved
2313              * due to ain't got no via. can't do nowt w'out via.
2314              */
2315             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2316         }
2317         break;
2318
2319     case FIB_PATH_TYPE_EXCLUSIVE:
2320     case FIB_PATH_TYPE_SPECIAL:
2321     {
2322         /*
2323          * these path types may link to an adj, if that's what
2324          * the clinet gave
2325          */
2326         u32 rpf_sw_if_index;
2327
2328         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2329
2330         if (~0 != rpf_sw_if_index)
2331         {
2332             fib_urpf_list_append(urpf, rpf_sw_if_index);
2333         }
2334         break;
2335     }
2336     case FIB_PATH_TYPE_DVR:
2337         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2338         break;
2339     case FIB_PATH_TYPE_DEAG:
2340     case FIB_PATH_TYPE_RECEIVE:
2341     case FIB_PATH_TYPE_INTF_RX:
2342     case FIB_PATH_TYPE_UDP_ENCAP:
2343     case FIB_PATH_TYPE_BIER_FMASK:
2344     case FIB_PATH_TYPE_BIER_TABLE:
2345     case FIB_PATH_TYPE_BIER_IMP:
2346         /*
2347          * these path types don't link to an adj
2348          */
2349         break;
2350     }
2351 }
2352
2353 void
2354 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2355                           dpo_proto_t payload_proto,
2356                           fib_mpls_lsp_mode_t mode,
2357                           dpo_id_t *dpo)
2358 {
2359     fib_path_t *path;
2360
2361     path = fib_path_get(path_index);
2362
2363     ASSERT(path);
2364
2365     switch (path->fp_type)
2366     {
2367     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2368     {
2369         dpo_id_t tmp = DPO_INVALID;
2370
2371         dpo_copy(&tmp, dpo);
2372
2373         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2374         dpo_reset(&tmp);
2375         break;
2376     }                
2377     case FIB_PATH_TYPE_DEAG:
2378     {
2379         dpo_id_t tmp = DPO_INVALID;
2380
2381         dpo_copy(&tmp, dpo);
2382
2383         mpls_disp_dpo_create(payload_proto,
2384                              path->deag.fp_rpf_id,
2385                              mode, &tmp, dpo);
2386         dpo_reset(&tmp);
2387         break;
2388     }
2389     case FIB_PATH_TYPE_RECEIVE:
2390     case FIB_PATH_TYPE_ATTACHED:
2391     case FIB_PATH_TYPE_RECURSIVE:
2392     case FIB_PATH_TYPE_INTF_RX:
2393     case FIB_PATH_TYPE_UDP_ENCAP:
2394     case FIB_PATH_TYPE_EXCLUSIVE:
2395     case FIB_PATH_TYPE_SPECIAL:
2396     case FIB_PATH_TYPE_BIER_FMASK:
2397     case FIB_PATH_TYPE_BIER_TABLE:
2398     case FIB_PATH_TYPE_BIER_IMP:
2399     case FIB_PATH_TYPE_DVR:
2400         break;
2401     }
2402
2403     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_POP_PW_CW)
2404     {
2405         dpo_id_t tmp = DPO_INVALID;
2406
2407         dpo_copy(&tmp, dpo);
2408
2409         pw_cw_dpo_create(&tmp, dpo);
2410         dpo_reset(&tmp);
2411     }
2412 }
2413
2414 void
2415 fib_path_contribute_forwarding (fib_node_index_t path_index,
2416                                 fib_forward_chain_type_t fct,
2417                                 dpo_id_t *dpo)
2418 {
2419     fib_path_t *path;
2420
2421     path = fib_path_get(path_index);
2422
2423     ASSERT(path);
2424     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2425
2426     /*
2427      * The DPO stored in the path was created when the path was resolved.
2428      * This then represents the path's 'native' protocol; IP.
2429      * For all others will need to go find something else.
2430      */
2431     if (fib_path_to_chain_type(path) == fct)
2432     {
2433         dpo_copy(dpo, &path->fp_dpo);
2434     }
2435     else
2436     {
2437         switch (path->fp_type)
2438         {
2439         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2440             switch (fct)
2441             {
2442             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2443             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2444             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2445             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2446             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2447             case FIB_FORW_CHAIN_TYPE_NSH:
2448             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2449             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2450                 fib_path_attached_next_hop_get_adj(
2451                          path,
2452                          fib_forw_chain_type_to_link_type(fct),
2453                          dpo);
2454                 break;
2455             case FIB_FORW_CHAIN_TYPE_BIER:
2456                 break;
2457             }
2458             break;
2459         case FIB_PATH_TYPE_RECURSIVE:
2460             switch (fct)
2461             {
2462             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2463             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2464             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2465             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2466             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2467             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2468             case FIB_FORW_CHAIN_TYPE_BIER:
2469                 fib_path_recursive_adj_update(path, fct, dpo);
2470                 break;
2471             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2472             case FIB_FORW_CHAIN_TYPE_NSH:
2473                 ASSERT(0);
2474                 break;
2475             }
2476             break;
2477         case FIB_PATH_TYPE_BIER_TABLE:
2478             switch (fct)
2479             {
2480             case FIB_FORW_CHAIN_TYPE_BIER:
2481                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2482                 break;
2483             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2484             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2485             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2486             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2487             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2488             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2489             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2490             case FIB_FORW_CHAIN_TYPE_NSH:
2491                 ASSERT(0);
2492                 break;
2493             }
2494             break;
2495         case FIB_PATH_TYPE_BIER_FMASK:
2496             switch (fct)
2497             {
2498             case FIB_FORW_CHAIN_TYPE_BIER:
2499                 fib_path_bier_fmask_update(path, dpo);
2500                 break;
2501             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2502             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2503             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2504             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2505             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2506             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2507             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2508             case FIB_FORW_CHAIN_TYPE_NSH:
2509                 ASSERT(0);
2510                 break;
2511             }
2512             break;
2513         case FIB_PATH_TYPE_BIER_IMP:
2514             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2515                                            fib_forw_chain_type_to_dpo_proto(fct),
2516                                            dpo);
2517             break;
2518         case FIB_PATH_TYPE_DEAG:
2519             switch (fct)
2520             {
2521             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2522                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2523                                                   DPO_PROTO_MPLS,
2524                                                   LOOKUP_UNICAST,
2525                                                   LOOKUP_INPUT_DST_ADDR,
2526                                                   LOOKUP_TABLE_FROM_CONFIG,
2527                                                   dpo);
2528                 break;
2529             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2530             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2531             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2532             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2533             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2534                 dpo_copy(dpo, &path->fp_dpo);
2535                 break;
2536             case FIB_FORW_CHAIN_TYPE_BIER:
2537                 break;
2538             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2539             case FIB_FORW_CHAIN_TYPE_NSH:
2540                 ASSERT(0);
2541                 break;
2542             }
2543             break;
2544         case FIB_PATH_TYPE_EXCLUSIVE:
2545             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2546             break;
2547         case FIB_PATH_TYPE_ATTACHED:
2548             switch (fct)
2549             {
2550             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2551             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2552             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2553             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2554             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2555             case FIB_FORW_CHAIN_TYPE_NSH:
2556             case FIB_FORW_CHAIN_TYPE_BIER:
2557                 fib_path_attached_get_adj(path,
2558                                           fib_forw_chain_type_to_link_type(fct),
2559                                           dpo);
2560                 break;
2561             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2562             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2563                 {
2564                     adj_index_t ai;
2565
2566                     /*
2567                      * Create the adj needed for sending IP multicast traffic
2568                      */
2569                     if (vnet_sw_interface_is_p2p(vnet_get_main(),
2570                                                  path->attached.fp_interface))
2571                     {
2572                         /*
2573                          * point-2-point interfaces do not require a glean, since
2574                          * there is nothing to ARP. Install a rewrite/nbr adj instead
2575                          */
2576                         ai = adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2577                                                  fib_forw_chain_type_to_link_type(fct),
2578                                                  &zero_addr,
2579                                                  path->attached.fp_interface);
2580                     }
2581                     else
2582                     {
2583                         ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2584                                                    fib_forw_chain_type_to_link_type(fct),
2585                                                    path->attached.fp_interface);
2586                     }
2587                     dpo_set(dpo, DPO_ADJACENCY,
2588                             fib_forw_chain_type_to_dpo_proto(fct),
2589                             ai);
2590                     adj_unlock(ai);
2591                 }
2592                 break;
2593             }
2594             break;
2595         case FIB_PATH_TYPE_INTF_RX:
2596             /*
2597              * Create the adj needed for sending IP multicast traffic
2598              */
2599             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2600                                          path->attached.fp_interface,
2601                                          dpo);
2602             break;
2603         case FIB_PATH_TYPE_UDP_ENCAP:
2604             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2605                                             path->fp_nh_proto,
2606                                             dpo);
2607             break;
2608         case FIB_PATH_TYPE_RECEIVE:
2609         case FIB_PATH_TYPE_SPECIAL:
2610         case FIB_PATH_TYPE_DVR:
2611             dpo_copy(dpo, &path->fp_dpo);
2612             break;
2613         }
2614     }
2615 }
2616
2617 load_balance_path_t *
2618 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2619                                        fib_forward_chain_type_t fct,
2620                                        load_balance_path_t *hash_key)
2621 {
2622     load_balance_path_t *mnh;
2623     fib_path_t *path;
2624
2625     path = fib_path_get(path_index);
2626
2627     ASSERT(path);
2628
2629     vec_add2(hash_key, mnh, 1);
2630
2631     mnh->path_weight = path->fp_weight;
2632     mnh->path_index = path_index;
2633
2634     if (fib_path_is_resolved(path_index))
2635     {
2636         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2637     }
2638     else
2639     {
2640         dpo_copy(&mnh->path_dpo,
2641                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2642     }
2643     return (hash_key);
2644 }
2645
2646 int
2647 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2648 {
2649     fib_path_t *path;
2650
2651     path = fib_path_get(path_index);
2652
2653     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2654             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2655              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2656 }
2657
2658 int
2659 fib_path_is_exclusive (fib_node_index_t path_index)
2660 {
2661     fib_path_t *path;
2662
2663     path = fib_path_get(path_index);
2664
2665     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2666 }
2667
2668 int
2669 fib_path_is_deag (fib_node_index_t path_index)
2670 {
2671     fib_path_t *path;
2672
2673     path = fib_path_get(path_index);
2674
2675     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2676 }
2677
2678 int
2679 fib_path_is_resolved (fib_node_index_t path_index)
2680 {
2681     fib_path_t *path;
2682
2683     path = fib_path_get(path_index);
2684
2685     return (dpo_id_is_valid(&path->fp_dpo) &&
2686             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2687             !fib_path_is_looped(path_index) &&
2688             !fib_path_is_permanent_drop(path));
2689 }
2690
2691 int
2692 fib_path_is_looped (fib_node_index_t path_index)
2693 {
2694     fib_path_t *path;
2695
2696     path = fib_path_get(path_index);
2697
2698     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2699 }
2700
2701 fib_path_list_walk_rc_t
2702 fib_path_encode (fib_node_index_t path_list_index,
2703                  fib_node_index_t path_index,
2704                  const fib_path_ext_t *path_ext,
2705                  void *args)
2706 {
2707     fib_path_encode_ctx_t *ctx = args;
2708     fib_route_path_t *rpath;
2709     fib_path_t *path;
2710
2711     path = fib_path_get(path_index);
2712     if (!path)
2713       return (FIB_PATH_LIST_WALK_CONTINUE);
2714
2715     vec_add2(ctx->rpaths, rpath, 1);
2716     rpath->frp_weight = path->fp_weight;
2717     rpath->frp_preference = path->fp_preference;
2718     rpath->frp_proto = path->fp_nh_proto;
2719     rpath->frp_sw_if_index = ~0;
2720     rpath->frp_fib_index = 0;
2721
2722     switch (path->fp_type)
2723     {
2724       case FIB_PATH_TYPE_RECEIVE:
2725         rpath->frp_addr = path->receive.fp_addr;
2726         rpath->frp_sw_if_index = path->receive.fp_interface;
2727         rpath->frp_flags |= FIB_ROUTE_PATH_LOCAL;
2728         break;
2729       case FIB_PATH_TYPE_ATTACHED:
2730         rpath->frp_sw_if_index = path->attached.fp_interface;
2731         break;
2732       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2733         rpath->frp_sw_if_index = path->attached_next_hop.fp_interface;
2734         rpath->frp_addr = path->attached_next_hop.fp_nh;
2735         break;
2736       case FIB_PATH_TYPE_BIER_FMASK:
2737         rpath->frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2738         break;
2739       case FIB_PATH_TYPE_SPECIAL:
2740         break;
2741       case FIB_PATH_TYPE_DEAG:
2742         rpath->frp_fib_index = path->deag.fp_tbl_id;
2743         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
2744         {
2745             rpath->frp_flags |= FIB_ROUTE_PATH_RPF_ID;
2746         }
2747         break;
2748       case FIB_PATH_TYPE_RECURSIVE:
2749         rpath->frp_addr = path->recursive.fp_nh.fp_ip;
2750         rpath->frp_fib_index = path->recursive.fp_tbl_id;
2751         break;
2752       case FIB_PATH_TYPE_DVR:
2753           rpath->frp_sw_if_index = path->dvr.fp_interface;
2754           rpath->frp_flags |= FIB_ROUTE_PATH_DVR;
2755           break;
2756       case FIB_PATH_TYPE_UDP_ENCAP:
2757           rpath->frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2758           rpath->frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2759           break;
2760       case FIB_PATH_TYPE_INTF_RX:
2761           rpath->frp_sw_if_index = path->receive.fp_interface;
2762           rpath->frp_flags |= FIB_ROUTE_PATH_INTF_RX;
2763           break;
2764       case FIB_PATH_TYPE_EXCLUSIVE:
2765         rpath->frp_flags |= FIB_ROUTE_PATH_EXCLUSIVE;
2766       default:
2767         break;
2768     }
2769
2770     if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS) 
2771     {
2772         rpath->frp_label_stack = path_ext->fpe_path.frp_label_stack;
2773     }
2774
2775     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP)
2776         rpath->frp_flags |= FIB_ROUTE_PATH_DROP;
2777     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2778         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_UNREACH;
2779     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2780         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_PROHIBIT;
2781
2782     return (FIB_PATH_LIST_WALK_CONTINUE);
2783 }
2784
2785 dpo_proto_t
2786 fib_path_get_proto (fib_node_index_t path_index)
2787 {
2788     fib_path_t *path;
2789
2790     path = fib_path_get(path_index);
2791
2792     return (path->fp_nh_proto);
2793 }
2794
2795 void
2796 fib_path_module_init (void)
2797 {
2798     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2799     fib_path_logger = vlib_log_register_class ("fib", "path");
2800 }
2801
2802 static clib_error_t *
2803 show_fib_path_command (vlib_main_t * vm,
2804                         unformat_input_t * input,
2805                         vlib_cli_command_t * cmd)
2806 {
2807     fib_node_index_t pi;
2808     fib_path_t *path;
2809
2810     if (unformat (input, "%d", &pi))
2811     {
2812         /*
2813          * show one in detail
2814          */
2815         if (!pool_is_free_index(fib_path_pool, pi))
2816         {
2817             path = fib_path_get(pi);
2818             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2819                            FIB_PATH_FORMAT_FLAGS_NONE);
2820             s = format(s, "\n  children:");
2821             s = fib_node_children_format(path->fp_node.fn_children, s);
2822             vlib_cli_output (vm, "%s", s);
2823             vec_free(s);
2824         }
2825         else
2826         {
2827             vlib_cli_output (vm, "path %d invalid", pi);
2828         }
2829     }
2830     else
2831     {
2832         vlib_cli_output (vm, "FIB Paths");
2833         pool_foreach_index (pi, fib_path_pool,
2834         ({
2835             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2836                              FIB_PATH_FORMAT_FLAGS_NONE);
2837         }));
2838     }
2839
2840     return (NULL);
2841 }
2842
2843 VLIB_CLI_COMMAND (show_fib_path, static) = {
2844   .path = "show fib paths",
2845   .function = show_fib_path_command,
2846   .short_help = "show fib paths",
2847 };