vnet: allow format deleted swifidx
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/ip_null_dpo.h>
28 #include <vnet/dpo/classify_dpo.h>
29 #include <vnet/dpo/pw_cw.h>
30
31 #include <vnet/adj/adj.h>
32 #include <vnet/adj/adj_mcast.h>
33
34 #include <vnet/fib/fib_path.h>
35 #include <vnet/fib/fib_node.h>
36 #include <vnet/fib/fib_table.h>
37 #include <vnet/fib/fib_entry.h>
38 #include <vnet/fib/fib_path_list.h>
39 #include <vnet/fib/fib_internal.h>
40 #include <vnet/fib/fib_urpf_list.h>
41 #include <vnet/fib/mpls_fib.h>
42 #include <vnet/fib/fib_path_ext.h>
43 #include <vnet/udp/udp_encap.h>
44 #include <vnet/bier/bier_fmask.h>
45 #include <vnet/bier/bier_table.h>
46 #include <vnet/bier/bier_imp.h>
47 #include <vnet/bier/bier_disp_table.h>
48
49 /**
50  * Enurmeration of path types
51  */
52 typedef enum fib_path_type_t_ {
53     /**
54      * Marker. Add new types after this one.
55      */
56     FIB_PATH_TYPE_FIRST = 0,
57     /**
58      * Attached-nexthop. An interface and a nexthop are known.
59      */
60     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
61     /**
62      * attached. Only the interface is known.
63      */
64     FIB_PATH_TYPE_ATTACHED,
65     /**
66      * recursive. Only the next-hop is known.
67      */
68     FIB_PATH_TYPE_RECURSIVE,
69     /**
70      * special. nothing is known. so we drop.
71      */
72     FIB_PATH_TYPE_SPECIAL,
73     /**
74      * exclusive. user provided adj.
75      */
76     FIB_PATH_TYPE_EXCLUSIVE,
77     /**
78      * deag. Link to a lookup adj in the next table
79      */
80     FIB_PATH_TYPE_DEAG,
81     /**
82      * interface receive.
83      */
84     FIB_PATH_TYPE_INTF_RX,
85     /**
86      * Path resolves via a UDP encap object.
87      */
88     FIB_PATH_TYPE_UDP_ENCAP,
89     /**
90      * receive. it's for-us.
91      */
92     FIB_PATH_TYPE_RECEIVE,
93     /**
94      * bier-imp. it's via a BIER imposition.
95      */
96     FIB_PATH_TYPE_BIER_IMP,
97     /**
98      * bier-fmask. it's via a BIER ECMP-table.
99      */
100     FIB_PATH_TYPE_BIER_TABLE,
101     /**
102      * bier-fmask. it's via a BIER f-mask.
103      */
104     FIB_PATH_TYPE_BIER_FMASK,
105     /**
106      * via a DVR.
107      */
108     FIB_PATH_TYPE_DVR,
109 } __attribute__ ((packed)) fib_path_type_t;
110
111 #define FIB_PATH_TYPES {                                        \
112     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
113     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
114     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
115     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
116     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
117     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
118     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
119     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
120     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
121     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
122     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
123     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
124     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
125 }
126
127 /**
128  * Enurmeration of path operational (i.e. derived) attributes
129  */
130 typedef enum fib_path_oper_attribute_t_ {
131     /**
132      * Marker. Add new types after this one.
133      */
134     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
135     /**
136      * The path forms part of a recursive loop.
137      */
138     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
139     /**
140      * The path is resolved
141      */
142     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
143     /**
144      * The path has become a permanent drop.
145      */
146     FIB_PATH_OPER_ATTRIBUTE_DROP,
147     /**
148      * Marker. Add new types before this one, then update it.
149      */
150     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
151 } __attribute__ ((packed)) fib_path_oper_attribute_t;
152
153 /**
154  * The maximum number of path operational attributes
155  */
156 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
157
158 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
159     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
160     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
161     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
162 }
163
164 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
165     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
166          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
167          _item++)
168
169 /**
170  * Path flags from the attributes
171  */
172 typedef enum fib_path_oper_flags_t_ {
173     FIB_PATH_OPER_FLAG_NONE = 0,
174     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
175     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
176     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
177 } __attribute__ ((packed)) fib_path_oper_flags_t;
178
179 /**
180  * A FIB path
181  */
182 typedef struct fib_path_t_ {
183     /**
184      * A path is a node in the FIB graph.
185      */
186     fib_node_t fp_node;
187
188     /**
189      * The index of the path-list to which this path belongs
190      */
191     u32 fp_pl_index;
192
193     /**
194      * This marks the start of the memory area used to hash
195      * the path
196      */
197     STRUCT_MARK(path_hash_start);
198
199     /**
200      * Configuration Flags
201      */
202     fib_path_cfg_flags_t fp_cfg_flags;
203
204     /**
205      * The type of the path. This is the selector for the union
206      */
207     fib_path_type_t fp_type;
208
209     /**
210      * The protocol of the next-hop, i.e. the address family of the
211      * next-hop's address. We can't derive this from the address itself
212      * since the address can be all zeros
213      */
214     dpo_proto_t fp_nh_proto;
215
216     /**
217      * UCMP [unnormalised] weigth
218      */
219     u8 fp_weight;
220
221     /**
222      * A path preference. 0 is the best.
223      * Only paths of the best preference, that are 'up', are considered
224      * for forwarding.
225      */
226     u8 fp_preference;
227
228     /**
229      * per-type union of the data required to resolve the path
230      */
231     union {
232         struct {
233             /**
234              * The next-hop
235              */
236             ip46_address_t fp_nh;
237             /**
238              * The interface
239              */
240             u32 fp_interface;
241         } attached_next_hop;
242         struct {
243             /**
244              * The Connected local address
245              */
246             fib_prefix_t fp_connected;
247             /**
248              * The interface
249              */
250             u32 fp_interface;
251         } attached;
252         struct {
253             union
254             {
255                 /**
256                  * The next-hop
257                  */
258                 ip46_address_t fp_ip;
259                 struct {
260                     /**
261                      * The local label to resolve through.
262                      */
263                     mpls_label_t fp_local_label;
264                     /**
265                      * The EOS bit of the resolving label
266                      */
267                     mpls_eos_bit_t fp_eos;
268                 };
269             } fp_nh;
270             /**
271              * The FIB table index in which to find the next-hop.
272              */
273             fib_node_index_t fp_tbl_id;
274         } recursive;
275         struct {
276             /**
277              * BIER FMask ID
278              */
279             index_t fp_bier_fmask;
280         } bier_fmask;
281         struct {
282             /**
283              * The BIER table's ID
284              */
285             bier_table_id_t fp_bier_tbl;
286         } bier_table;
287         struct {
288             /**
289              * The BIER imposition object
290              * this is part of the path's key, since the index_t
291              * of an imposition object is the object's key.
292              */
293             index_t fp_bier_imp;
294         } bier_imp;
295         struct {
296             /**
297              * The FIB index in which to perfom the next lookup
298              */
299             fib_node_index_t fp_tbl_id;
300             /**
301              * The RPF-ID to tag the packets with
302              */
303             fib_rpf_id_t fp_rpf_id;
304         } deag;
305         struct {
306         } special;
307         struct {
308             /**
309              * The user provided 'exclusive' DPO
310              */
311             dpo_id_t fp_ex_dpo;
312         } exclusive;
313         struct {
314             /**
315              * The interface on which the local address is configured
316              */
317             u32 fp_interface;
318             /**
319              * The next-hop
320              */
321             ip46_address_t fp_addr;
322         } receive;
323         struct {
324             /**
325              * The interface on which the packets will be input.
326              */
327             u32 fp_interface;
328         } intf_rx;
329         struct {
330             /**
331              * The UDP Encap object this path resolves through
332              */
333             u32 fp_udp_encap_id;
334         } udp_encap;
335         struct {
336             /**
337              * The UDP Encap object this path resolves through
338              */
339             u32 fp_classify_table_id;
340         } classify;
341         struct {
342             /**
343              * The interface
344              */
345             u32 fp_interface;
346         } dvr;
347     };
348     STRUCT_MARK(path_hash_end);
349
350     /**
351      * Members in this last section represent information that is
352      * dervied during resolution. It should not be copied to new paths
353      * nor compared.
354      */
355
356     /**
357      * Operational Flags
358      */
359     fib_path_oper_flags_t fp_oper_flags;
360
361     union {
362         /**
363          * the resolving via fib. not part of the union, since it it not part
364          * of the path's hash.
365          */
366         fib_node_index_t fp_via_fib;
367         /**
368          * the resolving bier-table
369          */
370         index_t fp_via_bier_tbl;
371         /**
372          * the resolving bier-fmask
373          */
374         index_t fp_via_bier_fmask;
375     };
376
377     /**
378      * The Data-path objects through which this path resolves for IP.
379      */
380     dpo_id_t fp_dpo;
381
382     /**
383      * the index of this path in the parent's child list.
384      */
385     u32 fp_sibling;
386 } fib_path_t;
387
388 /*
389  * Array of strings/names for the path types and attributes
390  */
391 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
392 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
393 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
394
395 /*
396  * The memory pool from which we allocate all the paths
397  */
398 static fib_path_t *fib_path_pool;
399
400 /**
401  * the logger
402  */
403 vlib_log_class_t fib_path_logger;
404
405 /*
406  * Debug macro
407  */
408 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
409 {                                                                       \
410     vlib_log_debug (fib_path_logger,                                    \
411                     "[%U]: " _fmt,                                      \
412                     format_fib_path, fib_path_get_index(_p), 0,         \
413                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
414                     ##_args);                                           \
415 }
416
417 static fib_path_t *
418 fib_path_get (fib_node_index_t index)
419 {
420     return (pool_elt_at_index(fib_path_pool, index));
421 }
422
423 static fib_node_index_t 
424 fib_path_get_index (fib_path_t *path)
425 {
426     return (path - fib_path_pool);
427 }
428
429 static fib_node_t *
430 fib_path_get_node (fib_node_index_t index)
431 {
432     return ((fib_node_t*)fib_path_get(index));
433 }
434
435 static fib_path_t*
436 fib_path_from_fib_node (fib_node_t *node)
437 {
438     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
439     return ((fib_path_t*)node);
440 }
441
442 u8 *
443 format_fib_path (u8 * s, va_list * args)
444 {
445     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
446     u32 indent = va_arg (*args, u32);
447     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
448     vnet_main_t * vnm = vnet_get_main();
449     fib_path_oper_attribute_t oattr;
450     fib_path_cfg_attribute_t cattr;
451     fib_path_t *path;
452     const char *eol;
453
454     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
455     {
456         eol = "";
457     }
458     else
459     {
460         eol = "\n";
461     }
462
463     path = fib_path_get(path_index);
464
465     s = format (s, "%Upath:[%d] ", format_white_space, indent,
466                 fib_path_get_index(path));
467     s = format (s, "pl-index:%d ", path->fp_pl_index);
468     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
469     s = format (s, "weight=%d ", path->fp_weight);
470     s = format (s, "pref=%d ", path->fp_preference);
471     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
472     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
473         s = format(s, " oper-flags:");
474         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
475             if ((1<<oattr) & path->fp_oper_flags) {
476                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
477             }
478         }
479     }
480     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
481         s = format(s, " cfg-flags:");
482         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
483             if ((1<<cattr) & path->fp_cfg_flags) {
484                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
485             }
486         }
487     }
488     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
489         s = format(s, "\n%U", format_white_space, indent+2);
490
491     switch (path->fp_type)
492     {
493     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
494         s = format (s, "%U", format_ip46_address,
495                     &path->attached_next_hop.fp_nh,
496                     IP46_TYPE_ANY);
497         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
498         {
499             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
500         }
501         else
502         {
503             s = format (s, " %U",
504                         format_vnet_sw_if_index_name,
505                         vnm,
506                         path->attached_next_hop.fp_interface);
507             if (vnet_sw_interface_is_p2p(vnet_get_main(),
508                                          path->attached_next_hop.fp_interface))
509             {
510                 s = format (s, " (p2p)");
511             }
512         }
513         if (!dpo_id_is_valid(&path->fp_dpo))
514         {
515             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
516         }
517         else
518         {
519             s = format(s, "%s%U%U", eol,
520                        format_white_space, indent,
521                        format_dpo_id,
522                        &path->fp_dpo, 13);
523         }
524         break;
525     case FIB_PATH_TYPE_ATTACHED:
526         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
527         {
528             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
529         }
530         else
531         {
532             s = format (s, " %U",
533                         format_vnet_sw_interface_name,
534                         vnm,
535                         vnet_get_sw_interface(
536                             vnm,
537                             path->attached.fp_interface));
538         }
539         break;
540     case FIB_PATH_TYPE_RECURSIVE:
541         if (DPO_PROTO_MPLS == path->fp_nh_proto)
542         {
543             s = format (s, "via %U %U",
544                         format_mpls_unicast_label,
545                         path->recursive.fp_nh.fp_local_label,
546                         format_mpls_eos_bit,
547                         path->recursive.fp_nh.fp_eos);
548         }
549         else
550         {
551             s = format (s, "via %U",
552                         format_ip46_address,
553                         &path->recursive.fp_nh.fp_ip,
554                         IP46_TYPE_ANY);
555         }
556         s = format (s, " in fib:%d",
557                     path->recursive.fp_tbl_id,
558                     path->fp_via_fib); 
559         s = format (s, " via-fib:%d", path->fp_via_fib); 
560         s = format (s, " via-dpo:[%U:%d]",
561                     format_dpo_type, path->fp_dpo.dpoi_type, 
562                     path->fp_dpo.dpoi_index);
563
564         break;
565     case FIB_PATH_TYPE_UDP_ENCAP:
566         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
567         break;
568     case FIB_PATH_TYPE_BIER_TABLE:
569         s = format (s, "via bier-table:[%U}",
570                     format_bier_table_id,
571                     &path->bier_table.fp_bier_tbl);
572         s = format (s, " via-dpo:[%U:%d]",
573                     format_dpo_type, path->fp_dpo.dpoi_type,
574                     path->fp_dpo.dpoi_index);
575         break;
576     case FIB_PATH_TYPE_BIER_FMASK:
577         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
578         s = format (s, " via-dpo:[%U:%d]",
579                     format_dpo_type, path->fp_dpo.dpoi_type, 
580                     path->fp_dpo.dpoi_index);
581         break;
582     case FIB_PATH_TYPE_BIER_IMP:
583         s = format (s, "via %U", format_bier_imp,
584                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
585         break;
586     case FIB_PATH_TYPE_DVR:
587         s = format (s, " %U",
588                     format_vnet_sw_interface_name,
589                     vnm,
590                     vnet_get_sw_interface(
591                         vnm,
592                         path->dvr.fp_interface));
593         break;
594     case FIB_PATH_TYPE_DEAG:
595         s = format (s, " %sfib-index:%d",
596                     (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?  "m" : ""),
597                     path->deag.fp_tbl_id);
598         break;
599     case FIB_PATH_TYPE_RECEIVE:
600     case FIB_PATH_TYPE_INTF_RX:
601     case FIB_PATH_TYPE_SPECIAL:
602     case FIB_PATH_TYPE_EXCLUSIVE:
603         if (dpo_id_is_valid(&path->fp_dpo))
604         {
605             s = format(s, "%U", format_dpo_id,
606                        &path->fp_dpo, indent+2);
607         }
608         break;
609     }
610     return (s);
611 }
612
613 /*
614  * fib_path_last_lock_gone
615  *
616  * We don't share paths, we share path lists, so the [un]lock functions
617  * are no-ops
618  */
619 static void
620 fib_path_last_lock_gone (fib_node_t *node)
621 {
622     ASSERT(0);
623 }
624
625 static fib_path_t*
626 fib_path_attached_next_hop_get_adj (fib_path_t *path,
627                                     vnet_link_t link,
628                                     dpo_id_t *dpo)
629 {
630     fib_node_index_t fib_path_index;
631     fib_protocol_t nh_proto;
632     adj_index_t ai;
633
634     fib_path_index = fib_path_get_index(path);
635     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
636
637     if (vnet_sw_interface_is_p2p(vnet_get_main(),
638                                  path->attached_next_hop.fp_interface))
639     {
640         /*
641          * if the interface is p2p then the adj for the specific
642          * neighbour on that link will never exist. on p2p links
643          * the subnet address (the attached route) links to the
644          * auto-adj (see below), we want that adj here too.
645          */
646         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
647                                  path->attached_next_hop.fp_interface);
648     }
649     else
650     {
651         ai = adj_nbr_add_or_lock(nh_proto, link,
652                                  &path->attached_next_hop.fp_nh,
653                                  path->attached_next_hop.fp_interface);
654     }
655
656     dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
657     adj_unlock(ai);
658
659     return (fib_path_get(fib_path_index));
660 }
661
662 static void
663 fib_path_attached_next_hop_set (fib_path_t *path)
664 {
665     dpo_id_t tmp = DPO_INVALID;
666
667     /*
668      * resolve directly via the adjacency discribed by the
669      * interface and next-hop
670      */
671     dpo_copy (&tmp, &path->fp_dpo);
672     path = fib_path_attached_next_hop_get_adj(path,
673                                               dpo_proto_to_link(path->fp_nh_proto),
674                                               &tmp);
675     dpo_copy(&path->fp_dpo, &tmp);
676     dpo_reset(&tmp);
677     ASSERT(dpo_is_adj(&path->fp_dpo));
678
679     /*
680      * become a child of the adjacency so we receive updates
681      * when its rewrite changes
682      */
683     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
684                                      FIB_NODE_TYPE_PATH,
685                                      fib_path_get_index(path));
686
687     if (!vnet_sw_interface_is_up(vnet_get_main(),
688                                  path->attached_next_hop.fp_interface) ||
689         !adj_is_up(path->fp_dpo.dpoi_index))
690     {
691         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
692     }
693 }
694
695 static void
696 fib_path_attached_get_adj (fib_path_t *path,
697                            vnet_link_t link,
698                            dpo_id_t *dpo)
699 {
700     fib_protocol_t nh_proto;
701
702     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
703
704     if (vnet_sw_interface_is_p2p(vnet_get_main(),
705                                  path->attached.fp_interface))
706     {
707         /*
708          * point-2-point interfaces do not require a glean, since
709          * there is nothing to ARP. Install a rewrite/nbr adj instead
710          */
711         adj_index_t ai;
712
713         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
714                                  path->attached.fp_interface);
715
716         dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
717         adj_unlock(ai);
718     }
719     else if (vnet_sw_interface_is_nbma(vnet_get_main(),
720                                        path->attached.fp_interface))
721     {
722         dpo_copy(dpo, drop_dpo_get(path->fp_nh_proto));
723     }
724     else
725     {
726         adj_index_t ai;
727
728         ai = adj_glean_add_or_lock(nh_proto, link,
729                                    path->attached.fp_interface,
730                                    &path->attached.fp_connected);
731         dpo_set(dpo, DPO_ADJACENCY_GLEAN, vnet_link_to_dpo_proto(link), ai);
732         adj_unlock(ai);
733     }
734 }
735
736 /*
737  * create of update the paths recursive adj
738  */
739 static void
740 fib_path_recursive_adj_update (fib_path_t *path,
741                                fib_forward_chain_type_t fct,
742                                dpo_id_t *dpo)
743 {
744     dpo_id_t via_dpo = DPO_INVALID;
745
746     /*
747      * get the DPO to resolve through from the via-entry
748      */
749     fib_entry_contribute_forwarding(path->fp_via_fib,
750                                     fct,
751                                     &via_dpo);
752
753
754     /*
755      * hope for the best - clear if restrictions apply.
756      */
757     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
758
759     /*
760      * Validate any recursion constraints and over-ride the via
761      * adj if not met
762      */
763     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
764     {
765         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
766         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
767     }
768     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
769     {
770         /*
771          * the via FIB must be a host route.
772          * note the via FIB just added will always be a host route
773          * since it is an RR source added host route. So what we need to
774          * check is whether the route has other sources. If it does then
775          * some other source has added it as a host route. If it doesn't
776          * then it was added only here and inherits forwarding from a cover.
777          * the cover is not a host route.
778          * The RR source is the lowest priority source, so we check if it
779          * is the best. if it is there are no other sources.
780          */
781         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
782         {
783             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
784             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
785
786             /*
787              * PIC edge trigger. let the load-balance maps know
788              */
789             load_balance_map_path_state_change(fib_path_get_index(path));
790         }
791     }
792     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
793     {
794         /*
795          * RR source entries inherit the flags from the cover, so
796          * we can check the via directly
797          */
798         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
799         {
800             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
801             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
802
803             /*
804              * PIC edge trigger. let the load-balance maps know
805              */
806             load_balance_map_path_state_change(fib_path_get_index(path));
807         }
808     }
809     /*
810      * check for over-riding factors on the FIB entry itself
811      */
812     if (!fib_entry_is_resolved(path->fp_via_fib))
813     {
814         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
815         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
816
817         /*
818          * PIC edge trigger. let the load-balance maps know
819          */
820         load_balance_map_path_state_change(fib_path_get_index(path));
821     }
822
823     /*
824      * If this path is contributing a drop, then it's not resolved
825      */
826     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
827     {
828         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
829     }
830
831     /*
832      * update the path's contributed DPO
833      */
834     dpo_copy(dpo, &via_dpo);
835
836     FIB_PATH_DBG(path, "recursive update:");
837
838     dpo_reset(&via_dpo);
839 }
840
841 /*
842  * re-evaulate the forwarding state for a via fmask path
843  */
844 static void
845 fib_path_bier_fmask_update (fib_path_t *path,
846                             dpo_id_t *dpo)
847 {
848     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
849
850     /*
851      * if we are stakcing on the drop, then the path is not resolved
852      */
853     if (dpo_is_drop(dpo))
854     {
855         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
856     }
857     else
858     {
859         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
860     }
861 }
862
863 /*
864  * fib_path_is_permanent_drop
865  *
866  * Return !0 if the path is configured to permanently drop,
867  * despite other attributes.
868  */
869 static int
870 fib_path_is_permanent_drop (fib_path_t *path)
871 {
872     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
873             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
874 }
875
876 /*
877  * fib_path_unresolve
878  *
879  * Remove our dependency on the resolution target
880  */
881 static void
882 fib_path_unresolve (fib_path_t *path)
883 {
884     /*
885      * the forced drop path does not need unresolving
886      */
887     if (fib_path_is_permanent_drop(path))
888     {
889         return;
890     }
891
892     switch (path->fp_type)
893     {
894     case FIB_PATH_TYPE_RECURSIVE:
895         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
896         {
897             fib_entry_child_remove(path->fp_via_fib,
898                                    path->fp_sibling);
899             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
900                                            fib_entry_get_prefix(path->fp_via_fib),
901                                            FIB_SOURCE_RR);
902             fib_table_unlock(path->recursive.fp_tbl_id,
903                              dpo_proto_to_fib(path->fp_nh_proto),
904                              FIB_SOURCE_RR);
905             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
906         }
907         break;
908     case FIB_PATH_TYPE_BIER_FMASK:
909         bier_fmask_child_remove(path->fp_via_bier_fmask,
910                                 path->fp_sibling);
911         break;
912     case FIB_PATH_TYPE_BIER_IMP:
913         bier_imp_unlock(path->fp_dpo.dpoi_index);
914         break;
915     case FIB_PATH_TYPE_BIER_TABLE:
916         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
917         break;
918     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
919     case FIB_PATH_TYPE_ATTACHED:
920         if (dpo_is_adj(&path->fp_dpo))
921             adj_child_remove(path->fp_dpo.dpoi_index,
922                              path->fp_sibling);
923         break;
924     case FIB_PATH_TYPE_UDP_ENCAP:
925         udp_encap_unlock(path->fp_dpo.dpoi_index);
926         break;
927     case FIB_PATH_TYPE_EXCLUSIVE:
928         dpo_reset(&path->exclusive.fp_ex_dpo);
929         break;
930     case FIB_PATH_TYPE_SPECIAL:
931     case FIB_PATH_TYPE_RECEIVE:
932     case FIB_PATH_TYPE_INTF_RX:
933     case FIB_PATH_TYPE_DEAG:
934     case FIB_PATH_TYPE_DVR:
935         /*
936          * these hold only the path's DPO, which is reset below.
937          */
938         break;
939     }
940
941     /*
942      * release the adj we were holding and pick up the
943      * drop just in case.
944      */
945     dpo_reset(&path->fp_dpo);
946     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
947
948     return;
949 }
950
951 static fib_forward_chain_type_t
952 fib_path_to_chain_type (const fib_path_t *path)
953 {
954     if (DPO_PROTO_MPLS == path->fp_nh_proto)
955     {
956         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
957             MPLS_EOS == path->recursive.fp_nh.fp_eos)
958         {
959             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
960         }
961         else
962         {
963             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
964         }
965     }
966     else
967     {
968         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
969     }
970 }
971
972 /*
973  * fib_path_back_walk_notify
974  *
975  * A back walk has reach this path.
976  */
977 static fib_node_back_walk_rc_t
978 fib_path_back_walk_notify (fib_node_t *node,
979                            fib_node_back_walk_ctx_t *ctx)
980 {
981     fib_path_t *path;
982
983     path = fib_path_from_fib_node(node);
984
985     FIB_PATH_DBG(path, "bw:%U",
986                  format_fib_node_bw_reason, ctx->fnbw_reason);
987
988     switch (path->fp_type)
989     {
990     case FIB_PATH_TYPE_RECURSIVE:
991         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
992         {
993             /*
994              * modify the recursive adjacency to use the new forwarding
995              * of the via-fib.
996              * this update is visible to packets in flight in the DP.
997              */
998             fib_path_recursive_adj_update(
999                 path,
1000                 fib_path_to_chain_type(path),
1001                 &path->fp_dpo);
1002         }
1003         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1004             (FIB_NODE_BW_REASON_FLAG_ADJ_MTU    & ctx->fnbw_reason) ||
1005             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1006         {
1007             /*
1008              * ADJ updates (complete<->incomplete) do not need to propagate to
1009              * recursive entries.
1010              * The only reason its needed as far back as here, is that the adj
1011              * and the incomplete adj are a different DPO type, so the LBs need
1012              * to re-stack.
1013              * If this walk was quashed in the fib_entry, then any non-fib_path
1014              * children (like tunnels that collapse out the LB when they stack)
1015              * would not see the update.
1016              */
1017             return (FIB_NODE_BACK_WALK_CONTINUE);
1018         }
1019         break;
1020     case FIB_PATH_TYPE_BIER_FMASK:
1021         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1022         {
1023             /*
1024              * update to use the BIER fmask's new forwading
1025              */
1026             fib_path_bier_fmask_update(path, &path->fp_dpo);
1027         }
1028         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1029             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1030         {
1031             /*
1032              * ADJ updates (complete<->incomplete) do not need to propagate to
1033              * recursive entries.
1034              * The only reason its needed as far back as here, is that the adj
1035              * and the incomplete adj are a different DPO type, so the LBs need
1036              * to re-stack.
1037              * If this walk was quashed in the fib_entry, then any non-fib_path
1038              * children (like tunnels that collapse out the LB when they stack)
1039              * would not see the update.
1040              */
1041             return (FIB_NODE_BACK_WALK_CONTINUE);
1042         }
1043         break;
1044     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1045         /*
1046 FIXME comment
1047          * ADJ_UPDATE backwalk pass silently through here and up to
1048          * the path-list when the multipath adj collapse occurs.
1049          * The reason we do this is that the assumtption is that VPP
1050          * runs in an environment where the Control-Plane is remote
1051          * and hence reacts slowly to link up down. In order to remove
1052          * this down link from the ECMP set quickly, we back-walk.
1053          * VPP also has dedicated CPUs, so we are not stealing resources
1054          * from the CP to do so.
1055          */
1056         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1057         {
1058             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1059             {
1060                 /*
1061                  * alreday resolved. no need to walk back again
1062                  */
1063                 return (FIB_NODE_BACK_WALK_CONTINUE);
1064             }
1065             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1066         }
1067         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1068         {
1069             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1070             {
1071                 /*
1072                  * alreday unresolved. no need to walk back again
1073                  */
1074                 return (FIB_NODE_BACK_WALK_CONTINUE);
1075             }
1076             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1077         }
1078         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1079         {
1080             /*
1081              * The interface this path resolves through has been deleted.
1082              * This will leave the path in a permanent drop state. The route
1083              * needs to be removed and readded (and hence the path-list deleted)
1084              * before it can forward again.
1085              */
1086             fib_path_unresolve(path);
1087             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1088         }
1089         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1090         {
1091             /*
1092              * restack the DPO to pick up the correct DPO sub-type
1093              */
1094             dpo_id_t tmp = DPO_INVALID;
1095             uword if_is_up;
1096
1097             if_is_up = vnet_sw_interface_is_up(
1098                            vnet_get_main(),
1099                            path->attached_next_hop.fp_interface);
1100
1101             dpo_copy (&tmp, &path->fp_dpo);
1102             path = fib_path_attached_next_hop_get_adj(
1103                 path,
1104                 dpo_proto_to_link(path->fp_nh_proto),
1105                 &tmp);
1106             dpo_copy(&path->fp_dpo, &tmp);
1107             dpo_reset(&tmp);
1108
1109             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1110             if (if_is_up && adj_is_up(path->fp_dpo.dpoi_index))
1111             {
1112                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1113             }
1114
1115             if (!if_is_up)
1116             {
1117                 /*
1118                  * If the interface is not up there is no reason to walk
1119                  * back to children. if we did they would only evalute
1120                  * that this path is unresolved and hence it would
1121                  * not contribute the adjacency - so it would be wasted
1122                  * CPU time.
1123                  */
1124                 return (FIB_NODE_BACK_WALK_CONTINUE);
1125             }
1126         }
1127         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1128         {
1129             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1130             {
1131                 /*
1132                  * alreday unresolved. no need to walk back again
1133                  */
1134                 return (FIB_NODE_BACK_WALK_CONTINUE);
1135             }
1136             /*
1137              * the adj has gone down. the path is no longer resolved.
1138              */
1139             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1140         }
1141         break;
1142     case FIB_PATH_TYPE_ATTACHED:
1143     case FIB_PATH_TYPE_DVR:
1144         /*
1145          * FIXME; this could schedule a lower priority walk, since attached
1146          * routes are not usually in ECMP configurations so the backwalk to
1147          * the FIB entry does not need to be high priority
1148          */
1149         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1150         {
1151             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1152         }
1153         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1154         {
1155             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1156         }
1157         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1158         {
1159             fib_path_unresolve(path);
1160             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1161         }
1162         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_BIND & ctx->fnbw_reason)
1163         {
1164             /* bind walks should appear here and pass silently up to
1165              * to the fib_entry */
1166         }
1167         break;
1168     case FIB_PATH_TYPE_UDP_ENCAP:
1169     {
1170         dpo_id_t via_dpo = DPO_INVALID;
1171
1172         /*
1173          * hope for the best - clear if restrictions apply.
1174          */
1175         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1176
1177         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1178                                         path->fp_nh_proto,
1179                                         &via_dpo);
1180         /*
1181          * If this path is contributing a drop, then it's not resolved
1182          */
1183         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1184         {
1185             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1186         }
1187
1188         /*
1189          * update the path's contributed DPO
1190          */
1191         dpo_copy(&path->fp_dpo, &via_dpo);
1192         dpo_reset(&via_dpo);
1193         break;
1194     }
1195     case FIB_PATH_TYPE_INTF_RX:
1196         ASSERT(0);
1197     case FIB_PATH_TYPE_DEAG:
1198         /*
1199          * FIXME When VRF delete is allowed this will need a poke.
1200          */
1201     case FIB_PATH_TYPE_SPECIAL:
1202     case FIB_PATH_TYPE_RECEIVE:
1203     case FIB_PATH_TYPE_EXCLUSIVE:
1204     case FIB_PATH_TYPE_BIER_TABLE:
1205     case FIB_PATH_TYPE_BIER_IMP:
1206         /*
1207          * these path types have no parents. so to be
1208          * walked from one is unexpected.
1209          */
1210         ASSERT(0);
1211         break;
1212     }
1213
1214     /*
1215      * propagate the backwalk further to the path-list
1216      */
1217     fib_path_list_back_walk(path->fp_pl_index, ctx);
1218
1219     return (FIB_NODE_BACK_WALK_CONTINUE);
1220 }
1221
1222 static void
1223 fib_path_memory_show (void)
1224 {
1225     fib_show_memory_usage("Path",
1226                           pool_elts(fib_path_pool),
1227                           pool_len(fib_path_pool),
1228                           sizeof(fib_path_t));
1229 }
1230
1231 /*
1232  * The FIB path's graph node virtual function table
1233  */
1234 static const fib_node_vft_t fib_path_vft = {
1235     .fnv_get = fib_path_get_node,
1236     .fnv_last_lock = fib_path_last_lock_gone,
1237     .fnv_back_walk = fib_path_back_walk_notify,
1238     .fnv_mem_show = fib_path_memory_show,
1239 };
1240
1241 static fib_path_cfg_flags_t
1242 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1243 {
1244     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1245
1246     if (rpath->frp_flags & FIB_ROUTE_PATH_POP_PW_CW)
1247         cfg_flags |= FIB_PATH_CFG_FLAG_POP_PW_CW;
1248     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1249         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1250     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1251         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1252     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1253         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1254     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1255         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1256     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1257         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1258     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1259         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1260     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1261         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1262     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1263         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1264     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1265         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1266     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_UNREACH)
1267         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_UNREACH;
1268     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_PROHIBIT)
1269         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_PROHIBIT;
1270     if (rpath->frp_flags & FIB_ROUTE_PATH_GLEAN)
1271         cfg_flags |= FIB_PATH_CFG_FLAG_GLEAN;
1272
1273     return (cfg_flags);
1274 }
1275
1276 /*
1277  * fib_path_create
1278  *
1279  * Create and initialise a new path object.
1280  * return the index of the path.
1281  */
1282 fib_node_index_t
1283 fib_path_create (fib_node_index_t pl_index,
1284                  const fib_route_path_t *rpath)
1285 {
1286     fib_path_t *path;
1287
1288     pool_get(fib_path_pool, path);
1289     clib_memset(path, 0, sizeof(*path));
1290
1291     fib_node_init(&path->fp_node,
1292                   FIB_NODE_TYPE_PATH);
1293
1294     dpo_reset(&path->fp_dpo);
1295     path->fp_pl_index = pl_index;
1296     path->fp_nh_proto = rpath->frp_proto;
1297     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1298     path->fp_weight = rpath->frp_weight;
1299     if (0 == path->fp_weight)
1300     {
1301         /*
1302          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1303          * clients to always use 1, or we can accept it and fixup approrpiately.
1304          */
1305         path->fp_weight = 1;
1306     }
1307     path->fp_preference = rpath->frp_preference;
1308     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1309
1310     /*
1311      * deduce the path's tpye from the parementers and save what is needed.
1312      */
1313     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1314     {
1315         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1316         path->receive.fp_interface = rpath->frp_sw_if_index;
1317         path->receive.fp_addr = rpath->frp_addr;
1318     }
1319     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1320     {
1321         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1322         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1323     }
1324     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1325     {
1326         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1327         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1328     }
1329     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1330     {
1331         path->fp_type = FIB_PATH_TYPE_DEAG;
1332         path->deag.fp_tbl_id = rpath->frp_fib_index;
1333         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1334     }
1335     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1336     {
1337         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1338         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1339     }
1340     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1341     {
1342         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1343         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1344     }
1345     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1346     {
1347         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1348         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1349     }
1350     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1351     {
1352         path->fp_type = FIB_PATH_TYPE_DEAG;
1353         path->deag.fp_tbl_id = rpath->frp_fib_index;
1354     }
1355     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1356     {
1357         path->fp_type = FIB_PATH_TYPE_DVR;
1358         path->dvr.fp_interface = rpath->frp_sw_if_index;
1359     }
1360     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1361     {
1362         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1363         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1364     }
1365     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT) ||
1366         (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH) ||
1367         (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP))
1368     {
1369         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1370     }
1371     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY))
1372     {
1373         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1374         path->classify.fp_classify_table_id = rpath->frp_classify_table_id;
1375     }
1376     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_GLEAN)
1377     {
1378         path->fp_type = FIB_PATH_TYPE_ATTACHED;
1379         path->attached.fp_interface = rpath->frp_sw_if_index;
1380         path->attached.fp_connected = rpath->frp_connected;
1381     }
1382     else if (~0 != rpath->frp_sw_if_index)
1383     {
1384         if (ip46_address_is_zero(&rpath->frp_addr))
1385         {
1386             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1387             path->attached.fp_interface = rpath->frp_sw_if_index;
1388         }
1389         else
1390         {
1391             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1392             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1393             path->attached_next_hop.fp_nh = rpath->frp_addr;
1394         }
1395     }
1396     else
1397     {
1398         if (ip46_address_is_zero(&rpath->frp_addr))
1399         {
1400             if (~0 == rpath->frp_fib_index)
1401             {
1402                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1403             }
1404             else
1405             {
1406                 path->fp_type = FIB_PATH_TYPE_DEAG;
1407                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1408                 path->deag.fp_rpf_id = ~0;
1409             }
1410         }
1411         else
1412         {
1413             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1414             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1415             {
1416                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1417                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1418             }
1419             else
1420             {
1421                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1422             }
1423             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1424         }
1425     }
1426
1427     FIB_PATH_DBG(path, "create");
1428
1429     return (fib_path_get_index(path));
1430 }
1431
1432 /*
1433  * fib_path_create_special
1434  *
1435  * Create and initialise a new path object.
1436  * return the index of the path.
1437  */
1438 fib_node_index_t
1439 fib_path_create_special (fib_node_index_t pl_index,
1440                          dpo_proto_t nh_proto,
1441                          fib_path_cfg_flags_t flags,
1442                          const dpo_id_t *dpo)
1443 {
1444     fib_path_t *path;
1445
1446     pool_get(fib_path_pool, path);
1447     clib_memset(path, 0, sizeof(*path));
1448
1449     fib_node_init(&path->fp_node,
1450                   FIB_NODE_TYPE_PATH);
1451     dpo_reset(&path->fp_dpo);
1452
1453     path->fp_pl_index = pl_index;
1454     path->fp_weight = 1;
1455     path->fp_preference = 0;
1456     path->fp_nh_proto = nh_proto;
1457     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1458     path->fp_cfg_flags = flags;
1459
1460     if (FIB_PATH_CFG_FLAG_DROP & flags)
1461     {
1462         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1463     }
1464     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1465     {
1466         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1467         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1468     }
1469     else
1470     {
1471         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1472         ASSERT(NULL != dpo);
1473         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1474     }
1475
1476     return (fib_path_get_index(path));
1477 }
1478
1479 /*
1480  * fib_path_copy
1481  *
1482  * Copy a path. return index of new path.
1483  */
1484 fib_node_index_t
1485 fib_path_copy (fib_node_index_t path_index,
1486                fib_node_index_t path_list_index)
1487 {
1488     fib_path_t *path, *orig_path;
1489
1490     pool_get(fib_path_pool, path);
1491
1492     orig_path = fib_path_get(path_index);
1493     ASSERT(NULL != orig_path);
1494
1495     clib_memcpy(path, orig_path, sizeof(*path));
1496
1497     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1498
1499     /*
1500      * reset the dynamic section
1501      */
1502     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1503     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1504     path->fp_pl_index  = path_list_index;
1505     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1506     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1507     dpo_reset(&path->fp_dpo);
1508
1509     if (path->fp_type == FIB_PATH_TYPE_EXCLUSIVE)
1510     {
1511         clib_memset(&path->exclusive.fp_ex_dpo, 0, sizeof(dpo_id_t));
1512         dpo_copy(&path->exclusive.fp_ex_dpo, &orig_path->exclusive.fp_ex_dpo);
1513     }
1514
1515     return (fib_path_get_index(path));
1516 }
1517
1518 /*
1519  * fib_path_destroy
1520  *
1521  * destroy a path that is no longer required
1522  */
1523 void
1524 fib_path_destroy (fib_node_index_t path_index)
1525 {
1526     fib_path_t *path;
1527
1528     path = fib_path_get(path_index);
1529
1530     ASSERT(NULL != path);
1531     FIB_PATH_DBG(path, "destroy");
1532
1533     fib_path_unresolve(path);
1534
1535     fib_node_deinit(&path->fp_node);
1536     pool_put(fib_path_pool, path);
1537 }
1538
1539 /*
1540  * fib_path_destroy
1541  *
1542  * destroy a path that is no longer required
1543  */
1544 uword
1545 fib_path_hash (fib_node_index_t path_index)
1546 {
1547     fib_path_t *path;
1548
1549     path = fib_path_get(path_index);
1550
1551     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1552                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1553                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1554                         0));
1555 }
1556
1557 /*
1558  * fib_path_cmp_i
1559  *
1560  * Compare two paths for equivalence.
1561  */
1562 static int
1563 fib_path_cmp_i (const fib_path_t *path1,
1564                 const fib_path_t *path2)
1565 {
1566     int res;
1567
1568     res = 1;
1569
1570     /*
1571      * paths of different types and protocol are not equal.
1572      * different weights and/or preference only are the same path.
1573      */
1574     if (path1->fp_type != path2->fp_type)
1575     {
1576         res = (path1->fp_type - path2->fp_type);
1577     }
1578     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1579     {
1580         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1581     }
1582     else
1583     {
1584         /*
1585          * both paths are of the same type.
1586          * consider each type and its attributes in turn.
1587          */
1588         switch (path1->fp_type)
1589         {
1590         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1591             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1592                                    &path2->attached_next_hop.fp_nh);
1593             if (0 == res) {
1594                 res = (path1->attached_next_hop.fp_interface -
1595                        path2->attached_next_hop.fp_interface);
1596             }
1597             break;
1598         case FIB_PATH_TYPE_ATTACHED:
1599             res = (path1->attached.fp_interface -
1600                    path2->attached.fp_interface);
1601             break;
1602         case FIB_PATH_TYPE_RECURSIVE:
1603             res = ip46_address_cmp(&path1->recursive.fp_nh.fp_ip,
1604                                    &path2->recursive.fp_nh.fp_ip);
1605  
1606             if (0 == res)
1607             {
1608                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1609             }
1610             break;
1611         case FIB_PATH_TYPE_BIER_FMASK:
1612             res = (path1->bier_fmask.fp_bier_fmask -
1613                    path2->bier_fmask.fp_bier_fmask);
1614             break;
1615         case FIB_PATH_TYPE_BIER_IMP:
1616             res = (path1->bier_imp.fp_bier_imp -
1617                    path2->bier_imp.fp_bier_imp);
1618             break;
1619         case FIB_PATH_TYPE_BIER_TABLE:
1620             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1621                                     &path2->bier_table.fp_bier_tbl);
1622             break;
1623         case FIB_PATH_TYPE_DEAG:
1624             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1625             if (0 == res)
1626             {
1627                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1628             }
1629             break;
1630         case FIB_PATH_TYPE_INTF_RX:
1631             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1632             break;
1633         case FIB_PATH_TYPE_UDP_ENCAP:
1634             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1635             break;
1636         case FIB_PATH_TYPE_DVR:
1637             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1638             break;
1639         case FIB_PATH_TYPE_EXCLUSIVE:
1640             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1641             break;
1642         case FIB_PATH_TYPE_SPECIAL:
1643         case FIB_PATH_TYPE_RECEIVE:
1644             res = 0;
1645             break;
1646         }
1647     }
1648     return (res);
1649 }
1650
1651 /*
1652  * fib_path_cmp_for_sort
1653  *
1654  * Compare two paths for equivalence. Used during path sorting.
1655  * As usual 0 means equal.
1656  */
1657 int
1658 fib_path_cmp_for_sort (void * v1,
1659                        void * v2)
1660 {
1661     fib_node_index_t *pi1 = v1, *pi2 = v2;
1662     fib_path_t *path1, *path2;
1663
1664     path1 = fib_path_get(*pi1);
1665     path2 = fib_path_get(*pi2);
1666
1667     /*
1668      * when sorting paths we want the highest preference paths
1669      * first, so that the choices set built is in prefernce order
1670      */
1671     if (path1->fp_preference != path2->fp_preference)
1672     {
1673         return (path1->fp_preference - path2->fp_preference);
1674     }
1675
1676     return (fib_path_cmp_i(path1, path2));
1677 }
1678
1679 /*
1680  * fib_path_cmp
1681  *
1682  * Compare two paths for equivalence.
1683  */
1684 int
1685 fib_path_cmp (fib_node_index_t pi1,
1686               fib_node_index_t pi2)
1687 {
1688     fib_path_t *path1, *path2;
1689
1690     path1 = fib_path_get(pi1);
1691     path2 = fib_path_get(pi2);
1692
1693     return (fib_path_cmp_i(path1, path2));
1694 }
1695
1696 int
1697 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1698                            const fib_route_path_t *rpath)
1699 {
1700     fib_path_t *path;
1701     int res;
1702
1703     path = fib_path_get(path_index);
1704
1705     res = 1;
1706
1707     if (path->fp_weight != rpath->frp_weight)
1708     {
1709         res = (path->fp_weight - rpath->frp_weight);
1710     }
1711     else
1712     {
1713         /*
1714          * both paths are of the same type.
1715          * consider each type and its attributes in turn.
1716          */
1717         switch (path->fp_type)
1718         {
1719         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1720             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1721                                    &rpath->frp_addr);
1722             if (0 == res)
1723             {
1724                 res = (path->attached_next_hop.fp_interface -
1725                        rpath->frp_sw_if_index);
1726             }
1727             break;
1728         case FIB_PATH_TYPE_ATTACHED:
1729             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1730             break;
1731         case FIB_PATH_TYPE_RECURSIVE:
1732             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1733             {
1734                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1735
1736                 if (res == 0)
1737                 {
1738                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1739                 }
1740             }
1741             else
1742             {
1743                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1744                                        &rpath->frp_addr);
1745             }
1746
1747             if (0 == res)
1748             {
1749                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1750             }
1751             break;
1752         case FIB_PATH_TYPE_BIER_FMASK:
1753             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1754             break;
1755         case FIB_PATH_TYPE_BIER_IMP:
1756             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1757             break;
1758         case FIB_PATH_TYPE_BIER_TABLE:
1759             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1760                                     &rpath->frp_bier_tbl);
1761             break;
1762         case FIB_PATH_TYPE_INTF_RX:
1763             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1764             break;
1765         case FIB_PATH_TYPE_UDP_ENCAP:
1766             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1767             break;
1768         case FIB_PATH_TYPE_DEAG:
1769             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1770             if (0 == res)
1771             {
1772                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1773             }
1774             break;
1775         case FIB_PATH_TYPE_DVR:
1776             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1777             break;
1778         case FIB_PATH_TYPE_EXCLUSIVE:
1779             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1780             break;
1781         case FIB_PATH_TYPE_RECEIVE:
1782             if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1783             {
1784                 res = 0;
1785             }
1786             else
1787             {
1788                 res = 1;
1789             }
1790             break;
1791         case FIB_PATH_TYPE_SPECIAL:
1792             res = 0;
1793             break;
1794         }
1795     }
1796     return (res);
1797 }
1798
1799 /*
1800  * fib_path_recursive_loop_detect
1801  *
1802  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1803  * walk is initiated when an entry is linking to a new path list or from an old.
1804  * The entry vector passed contains all the FIB entrys that are children of this
1805  * path (it is all the entries encountered on the walk so far). If this vector
1806  * contains the entry this path resolve via, then a loop is about to form.
1807  * The loop must be allowed to form, since we need the dependencies in place
1808  * so that we can track when the loop breaks.
1809  * However, we MUST not produce a loop in the forwarding graph (else packets
1810  * would loop around the switch path until the loop breaks), so we mark recursive
1811  * paths as looped so that they do not contribute forwarding information.
1812  * By marking the path as looped, an etry such as;
1813  *    X/Y
1814  *     via a.a.a.a (looped)
1815  *     via b.b.b.b (not looped)
1816  * can still forward using the info provided by b.b.b.b only
1817  */
1818 int
1819 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1820                                 fib_node_index_t **entry_indicies)
1821 {
1822     fib_path_t *path;
1823
1824     path = fib_path_get(path_index);
1825
1826     /*
1827      * the forced drop path is never looped, cos it is never resolved.
1828      */
1829     if (fib_path_is_permanent_drop(path))
1830     {
1831         return (0);
1832     }
1833
1834     switch (path->fp_type)
1835     {
1836     case FIB_PATH_TYPE_RECURSIVE:
1837     {
1838         fib_node_index_t *entry_index, *entries;
1839         int looped = 0;
1840         entries = *entry_indicies;
1841
1842         vec_foreach(entry_index, entries) {
1843             if (*entry_index == path->fp_via_fib)
1844             {
1845                 /*
1846                  * the entry that is about to link to this path-list (or
1847                  * one of this path-list's children) is the same entry that
1848                  * this recursive path resolves through. this is a cycle.
1849                  * abort the walk.
1850                  */
1851                 looped = 1;
1852                 break;
1853             }
1854         }
1855
1856         if (looped)
1857         {
1858             FIB_PATH_DBG(path, "recursive loop formed");
1859             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1860
1861             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1862         }
1863         else
1864         {
1865             /*
1866              * no loop here yet. keep forward walking the graph.
1867              */
1868             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1869             {
1870                 FIB_PATH_DBG(path, "recursive loop formed");
1871                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1872             }
1873             else
1874             {
1875                 FIB_PATH_DBG(path, "recursive loop cleared");
1876                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1877             }
1878         }
1879         break;
1880     }
1881     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1882     case FIB_PATH_TYPE_ATTACHED:
1883         if (dpo_is_adj(&path->fp_dpo) &&
1884             adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1885                                       entry_indicies))
1886         {
1887             FIB_PATH_DBG(path, "recursive loop formed");
1888             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1889         }
1890         else
1891         {
1892             FIB_PATH_DBG(path, "recursive loop cleared");
1893             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1894         }
1895         break;
1896     case FIB_PATH_TYPE_SPECIAL:
1897     case FIB_PATH_TYPE_DEAG:
1898     case FIB_PATH_TYPE_DVR:
1899     case FIB_PATH_TYPE_RECEIVE:
1900     case FIB_PATH_TYPE_INTF_RX:
1901     case FIB_PATH_TYPE_UDP_ENCAP:
1902     case FIB_PATH_TYPE_EXCLUSIVE:
1903     case FIB_PATH_TYPE_BIER_FMASK:
1904     case FIB_PATH_TYPE_BIER_TABLE:
1905     case FIB_PATH_TYPE_BIER_IMP:
1906         /*
1907          * these path types cannot be part of a loop, since they are the leaves
1908          * of the graph.
1909          */
1910         break;
1911     }
1912
1913     return (fib_path_is_looped(path_index));
1914 }
1915
1916 int
1917 fib_path_resolve (fib_node_index_t path_index)
1918 {
1919     fib_path_t *path;
1920
1921     path = fib_path_get(path_index);
1922
1923     /*
1924      * hope for the best.
1925      */
1926     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1927
1928     /*
1929      * the forced drop path resolves via the drop adj
1930      */
1931     if (fib_path_is_permanent_drop(path))
1932     {
1933         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1934         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1935         return (fib_path_is_resolved(path_index));
1936     }
1937
1938     switch (path->fp_type)
1939     {
1940     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1941         fib_path_attached_next_hop_set(path);
1942         break;
1943     case FIB_PATH_TYPE_ATTACHED:
1944     {
1945         dpo_id_t tmp = DPO_INVALID;
1946
1947         /*
1948          * path->attached.fp_interface
1949          */
1950         if (!vnet_sw_interface_is_up(vnet_get_main(),
1951                                      path->attached.fp_interface))
1952         {
1953             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1954         }
1955         fib_path_attached_get_adj(path,
1956                                   dpo_proto_to_link(path->fp_nh_proto),
1957                                   &tmp);
1958
1959         /*
1960          * re-fetch after possible mem realloc
1961          */
1962         path = fib_path_get(path_index);
1963         dpo_copy(&path->fp_dpo, &tmp);
1964
1965         /*
1966          * become a child of the adjacency so we receive updates
1967          * when the interface state changes
1968          */
1969         if (dpo_is_adj(&path->fp_dpo))
1970         {
1971             path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1972                                              FIB_NODE_TYPE_PATH,
1973                                              fib_path_get_index(path));
1974         }
1975         dpo_reset(&tmp);
1976         break;
1977     }
1978     case FIB_PATH_TYPE_RECURSIVE:
1979     {
1980         /*
1981          * Create a RR source entry in the table for the address
1982          * that this path recurses through.
1983          * This resolve action is recursive, hence we may create
1984          * more paths in the process. more creates mean maybe realloc
1985          * of this path.
1986          */
1987         fib_node_index_t fei;
1988         fib_prefix_t pfx;
1989
1990         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1991
1992         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1993         {
1994             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1995                                        path->recursive.fp_nh.fp_eos,
1996                                        &pfx);
1997         }
1998         else
1999         {
2000             ASSERT(!ip46_address_is_zero(&path->recursive.fp_nh.fp_ip));
2001
2002             fib_protocol_t fp = (ip46_address_is_ip4(&path->recursive.fp_nh.fp_ip) ?
2003                                         FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6);
2004             fib_prefix_from_ip46_addr(fp, &path->recursive.fp_nh.fp_ip, &pfx);
2005         }
2006
2007         fib_table_lock(path->recursive.fp_tbl_id,
2008                        dpo_proto_to_fib(path->fp_nh_proto),
2009                        FIB_SOURCE_RR);
2010         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
2011                                           &pfx,
2012                                           FIB_SOURCE_RR,
2013                                           FIB_ENTRY_FLAG_NONE);
2014
2015         path = fib_path_get(path_index);
2016         path->fp_via_fib = fei;
2017
2018         /*
2019          * become a dependent child of the entry so the path is 
2020          * informed when the forwarding for the entry changes.
2021          */
2022         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
2023                                                FIB_NODE_TYPE_PATH,
2024                                                fib_path_get_index(path));
2025
2026         /*
2027          * create and configure the IP DPO
2028          */
2029         fib_path_recursive_adj_update(
2030             path,
2031             fib_path_to_chain_type(path),
2032             &path->fp_dpo);
2033
2034         break;
2035     }
2036     case FIB_PATH_TYPE_BIER_FMASK:
2037     {
2038         /*
2039          * become a dependent child of the entry so the path is
2040          * informed when the forwarding for the entry changes.
2041          */
2042         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
2043                                                 FIB_NODE_TYPE_PATH,
2044                                                 fib_path_get_index(path));
2045
2046         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
2047         fib_path_bier_fmask_update(path, &path->fp_dpo);
2048
2049         break;
2050     }
2051     case FIB_PATH_TYPE_BIER_IMP:
2052         bier_imp_lock(path->bier_imp.fp_bier_imp);
2053         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2054                                        DPO_PROTO_IP4,
2055                                        &path->fp_dpo);
2056         break;
2057     case FIB_PATH_TYPE_BIER_TABLE:
2058     {
2059         /*
2060          * Find/create the BIER table to link to
2061          */
2062         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
2063
2064         path->fp_via_bier_tbl =
2065             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
2066
2067         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
2068                                          &path->fp_dpo);
2069         break;
2070     }
2071     case FIB_PATH_TYPE_SPECIAL:
2072         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2073         {
2074             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2075                                       IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
2076                                       &path->fp_dpo);
2077         }
2078         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2079         {
2080             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2081                                       IP_NULL_ACTION_SEND_ICMP_UNREACH,
2082                                       &path->fp_dpo);
2083         }
2084         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY)
2085         {
2086             dpo_set (&path->fp_dpo, DPO_CLASSIFY,
2087                      path->fp_nh_proto,
2088                      classify_dpo_create (path->fp_nh_proto,
2089                                           path->classify.fp_classify_table_id));
2090         }
2091         else
2092         {
2093             /*
2094              * Resolve via the drop
2095              */
2096             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
2097         }
2098         break;
2099     case FIB_PATH_TYPE_DEAG:
2100     {
2101         if (DPO_PROTO_BIER == path->fp_nh_proto)
2102         {
2103             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2104                                                   &path->fp_dpo);
2105         }
2106         else
2107         {
2108             /*
2109              * Resolve via a lookup DPO.
2110              * FIXME. control plane should add routes with a table ID
2111              */
2112             lookup_input_t input;
2113             lookup_cast_t cast;
2114
2115             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2116                     LOOKUP_MULTICAST :
2117                     LOOKUP_UNICAST);
2118             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2119                      LOOKUP_INPUT_SRC_ADDR :
2120                      LOOKUP_INPUT_DST_ADDR);
2121
2122             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2123                                                path->fp_nh_proto,
2124                                                cast,
2125                                                input,
2126                                                LOOKUP_TABLE_FROM_CONFIG,
2127                                                &path->fp_dpo);
2128         }
2129         break;
2130     }
2131     case FIB_PATH_TYPE_DVR:
2132         dvr_dpo_add_or_lock(path->dvr.fp_interface,
2133                             path->fp_nh_proto,
2134                             &path->fp_dpo);
2135         break;
2136     case FIB_PATH_TYPE_RECEIVE:
2137         /*
2138          * Resolve via a receive DPO.
2139          */
2140         receive_dpo_add_or_lock(path->fp_nh_proto,
2141                                 path->receive.fp_interface,
2142                                 &path->receive.fp_addr,
2143                                 &path->fp_dpo);
2144         break;
2145     case FIB_PATH_TYPE_UDP_ENCAP:
2146         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2147         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2148                                         path->fp_nh_proto,
2149                                         &path->fp_dpo);
2150         break;
2151     case FIB_PATH_TYPE_INTF_RX: {
2152         /*
2153          * Resolve via a receive DPO.
2154          */
2155         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2156                                      path->intf_rx.fp_interface,
2157                                      &path->fp_dpo);
2158         break;
2159     }
2160     case FIB_PATH_TYPE_EXCLUSIVE:
2161         /*
2162          * Resolve via the user provided DPO
2163          */
2164         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2165         break;
2166     }
2167
2168     return (fib_path_is_resolved(path_index));
2169 }
2170
2171 u32
2172 fib_path_get_resolving_interface (fib_node_index_t path_index)
2173 {
2174     fib_path_t *path;
2175
2176     path = fib_path_get(path_index);
2177
2178     switch (path->fp_type)
2179     {
2180     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2181         return (path->attached_next_hop.fp_interface);
2182     case FIB_PATH_TYPE_ATTACHED:
2183         return (path->attached.fp_interface);
2184     case FIB_PATH_TYPE_RECEIVE:
2185         return (path->receive.fp_interface);
2186     case FIB_PATH_TYPE_RECURSIVE:
2187         if (fib_path_is_resolved(path_index))
2188         {
2189             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2190         }
2191         break;
2192     case FIB_PATH_TYPE_DVR:
2193         return (path->dvr.fp_interface);
2194     case FIB_PATH_TYPE_INTF_RX:
2195     case FIB_PATH_TYPE_UDP_ENCAP:
2196     case FIB_PATH_TYPE_SPECIAL:
2197     case FIB_PATH_TYPE_DEAG:
2198     case FIB_PATH_TYPE_EXCLUSIVE:
2199     case FIB_PATH_TYPE_BIER_FMASK:
2200     case FIB_PATH_TYPE_BIER_TABLE:
2201     case FIB_PATH_TYPE_BIER_IMP:
2202         break;
2203     }
2204     return (dpo_get_urpf(&path->fp_dpo));
2205 }
2206
2207 index_t
2208 fib_path_get_resolving_index (fib_node_index_t path_index)
2209 {
2210     fib_path_t *path;
2211
2212     path = fib_path_get(path_index);
2213
2214     switch (path->fp_type)
2215     {
2216     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2217     case FIB_PATH_TYPE_ATTACHED:
2218     case FIB_PATH_TYPE_RECEIVE:
2219     case FIB_PATH_TYPE_INTF_RX:
2220     case FIB_PATH_TYPE_SPECIAL:
2221     case FIB_PATH_TYPE_DEAG:
2222     case FIB_PATH_TYPE_DVR:
2223     case FIB_PATH_TYPE_EXCLUSIVE:
2224         break;
2225     case FIB_PATH_TYPE_UDP_ENCAP:
2226         return (path->udp_encap.fp_udp_encap_id);
2227     case FIB_PATH_TYPE_RECURSIVE:
2228         return (path->fp_via_fib);
2229     case FIB_PATH_TYPE_BIER_FMASK:
2230         return (path->bier_fmask.fp_bier_fmask);
2231    case FIB_PATH_TYPE_BIER_TABLE:
2232        return (path->fp_via_bier_tbl);
2233    case FIB_PATH_TYPE_BIER_IMP:
2234        return (path->bier_imp.fp_bier_imp);
2235     }
2236     return (~0);
2237 }
2238
2239 adj_index_t
2240 fib_path_get_adj (fib_node_index_t path_index)
2241 {
2242     fib_path_t *path;
2243
2244     path = fib_path_get(path_index);
2245
2246     if (dpo_is_adj(&path->fp_dpo))
2247     {
2248         return (path->fp_dpo.dpoi_index);
2249     }
2250     return (ADJ_INDEX_INVALID);
2251 }
2252
2253 u16
2254 fib_path_get_weight (fib_node_index_t path_index)
2255 {
2256     fib_path_t *path;
2257
2258     path = fib_path_get(path_index);
2259
2260     ASSERT(path);
2261
2262     return (path->fp_weight);
2263 }
2264
2265 u16
2266 fib_path_get_preference (fib_node_index_t path_index)
2267 {
2268     fib_path_t *path;
2269
2270     path = fib_path_get(path_index);
2271
2272     ASSERT(path);
2273
2274     return (path->fp_preference);
2275 }
2276
2277 u32
2278 fib_path_get_rpf_id (fib_node_index_t path_index)
2279 {
2280     fib_path_t *path;
2281
2282     path = fib_path_get(path_index);
2283
2284     ASSERT(path);
2285
2286     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2287     {
2288         return (path->deag.fp_rpf_id);
2289     }
2290
2291     return (~0);
2292 }
2293
2294 /**
2295  * @brief Contribute the path's adjacency to the list passed.
2296  * By calling this function over all paths, recursively, a child
2297  * can construct its full set of forwarding adjacencies, and hence its
2298  * uRPF list.
2299  */
2300 void
2301 fib_path_contribute_urpf (fib_node_index_t path_index,
2302                           index_t urpf)
2303 {
2304     fib_path_t *path;
2305
2306     path = fib_path_get(path_index);
2307
2308     /*
2309      * resolved and unresolved paths contribute to the RPF list.
2310      */
2311     switch (path->fp_type)
2312     {
2313     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2314         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2315         break;
2316
2317     case FIB_PATH_TYPE_ATTACHED:
2318         fib_urpf_list_append(urpf, path->attached.fp_interface);
2319         break;
2320
2321     case FIB_PATH_TYPE_RECURSIVE:
2322         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2323             !fib_path_is_looped(path_index))
2324         {
2325             /*
2326              * there's unresolved due to constraints, and there's unresolved
2327              * due to ain't got no via. can't do nowt w'out via.
2328              */
2329             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2330         }
2331         break;
2332
2333     case FIB_PATH_TYPE_EXCLUSIVE:
2334     case FIB_PATH_TYPE_SPECIAL:
2335     {
2336         /*
2337          * these path types may link to an adj, if that's what
2338          * the clinet gave
2339          */
2340         u32 rpf_sw_if_index;
2341
2342         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2343
2344         if (~0 != rpf_sw_if_index)
2345         {
2346             fib_urpf_list_append(urpf, rpf_sw_if_index);
2347         }
2348         break;
2349     }
2350     case FIB_PATH_TYPE_DVR:
2351         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2352         break;
2353     case FIB_PATH_TYPE_UDP_ENCAP:
2354         fib_urpf_list_append(urpf, path->udp_encap.fp_udp_encap_id);
2355         break;
2356     case FIB_PATH_TYPE_DEAG:
2357     case FIB_PATH_TYPE_RECEIVE:
2358     case FIB_PATH_TYPE_INTF_RX:
2359     case FIB_PATH_TYPE_BIER_FMASK:
2360     case FIB_PATH_TYPE_BIER_TABLE:
2361     case FIB_PATH_TYPE_BIER_IMP:
2362         /*
2363          * these path types don't link to an adj
2364          */
2365         break;
2366     }
2367 }
2368
2369 void
2370 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2371                           dpo_proto_t payload_proto,
2372                           fib_mpls_lsp_mode_t mode,
2373                           dpo_id_t *dpo)
2374 {
2375     fib_path_t *path;
2376
2377     path = fib_path_get(path_index);
2378
2379     ASSERT(path);
2380
2381     switch (path->fp_type)
2382     {
2383     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2384     {
2385         dpo_id_t tmp = DPO_INVALID;
2386
2387         dpo_copy(&tmp, dpo);
2388
2389         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2390         dpo_reset(&tmp);
2391         break;
2392     }                
2393     case FIB_PATH_TYPE_DEAG:
2394     {
2395         dpo_id_t tmp = DPO_INVALID;
2396
2397         dpo_copy(&tmp, dpo);
2398
2399         mpls_disp_dpo_create(payload_proto,
2400                              path->deag.fp_rpf_id,
2401                              mode, &tmp, dpo);
2402         dpo_reset(&tmp);
2403         break;
2404     }
2405     case FIB_PATH_TYPE_RECEIVE:
2406     case FIB_PATH_TYPE_ATTACHED:
2407     case FIB_PATH_TYPE_RECURSIVE:
2408     case FIB_PATH_TYPE_INTF_RX:
2409     case FIB_PATH_TYPE_UDP_ENCAP:
2410     case FIB_PATH_TYPE_EXCLUSIVE:
2411     case FIB_PATH_TYPE_SPECIAL:
2412     case FIB_PATH_TYPE_BIER_FMASK:
2413     case FIB_PATH_TYPE_BIER_TABLE:
2414     case FIB_PATH_TYPE_BIER_IMP:
2415     case FIB_PATH_TYPE_DVR:
2416         break;
2417     }
2418
2419     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_POP_PW_CW)
2420     {
2421         dpo_id_t tmp = DPO_INVALID;
2422
2423         dpo_copy(&tmp, dpo);
2424
2425         pw_cw_dpo_create(&tmp, dpo);
2426         dpo_reset(&tmp);
2427     }
2428 }
2429
2430 void
2431 fib_path_contribute_forwarding (fib_node_index_t path_index,
2432                                 fib_forward_chain_type_t fct,
2433                                 dpo_proto_t payload_proto,
2434                                 dpo_id_t *dpo)
2435 {
2436     fib_path_t *path;
2437
2438     path = fib_path_get(path_index);
2439
2440     ASSERT(path);
2441
2442     /*
2443      * The DPO stored in the path was created when the path was resolved.
2444      * This then represents the path's 'native' protocol; IP.
2445      * For all others will need to go find something else.
2446      */
2447     if (fib_path_to_chain_type(path) == fct)
2448     {
2449         dpo_copy(dpo, &path->fp_dpo);
2450     }
2451     else
2452     {
2453         switch (path->fp_type)
2454         {
2455         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2456             switch (fct)
2457             {
2458             case FIB_FORW_CHAIN_TYPE_MPLS_EOS: {
2459                     dpo_id_t tmp = DPO_INVALID;
2460                     dpo_copy (&tmp, dpo);
2461                     path = fib_path_attached_next_hop_get_adj(
2462                            path,
2463                            dpo_proto_to_link(payload_proto),
2464                            &tmp);
2465                     dpo_copy (dpo, &tmp);
2466                     dpo_reset(&tmp);
2467                     break;
2468             }
2469             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2470             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2471             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2472             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2473             case FIB_FORW_CHAIN_TYPE_NSH:
2474             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2475             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2476                 {
2477                     dpo_id_t tmp = DPO_INVALID;
2478                     dpo_copy (&tmp, dpo);
2479                     path = fib_path_attached_next_hop_get_adj(
2480                            path,
2481                            fib_forw_chain_type_to_link_type(fct),
2482                            &tmp);
2483                     dpo_copy (dpo, &tmp);
2484                     dpo_reset(&tmp);
2485                     break;
2486                 }
2487             case FIB_FORW_CHAIN_TYPE_BIER:
2488                 break;
2489             }
2490             break;
2491         case FIB_PATH_TYPE_RECURSIVE:
2492             switch (fct)
2493             {
2494             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2495             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2496             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2497             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2498             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2499             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2500             case FIB_FORW_CHAIN_TYPE_BIER:
2501                 fib_path_recursive_adj_update(path, fct, dpo);
2502                 break;
2503             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2504             case FIB_FORW_CHAIN_TYPE_NSH:
2505                 ASSERT(0);
2506                 break;
2507             }
2508             break;
2509         case FIB_PATH_TYPE_BIER_TABLE:
2510             switch (fct)
2511             {
2512             case FIB_FORW_CHAIN_TYPE_BIER:
2513                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2514                 break;
2515             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2516             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2517             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2518             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2519             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2520             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2521             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2522             case FIB_FORW_CHAIN_TYPE_NSH:
2523                 ASSERT(0);
2524                 break;
2525             }
2526             break;
2527         case FIB_PATH_TYPE_BIER_FMASK:
2528             switch (fct)
2529             {
2530             case FIB_FORW_CHAIN_TYPE_BIER:
2531                 fib_path_bier_fmask_update(path, dpo);
2532                 break;
2533             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2534             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2535             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2536             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2537             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2538             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2539             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2540             case FIB_FORW_CHAIN_TYPE_NSH:
2541                 ASSERT(0);
2542                 break;
2543             }
2544             break;
2545         case FIB_PATH_TYPE_BIER_IMP:
2546             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2547                                            fib_forw_chain_type_to_dpo_proto(fct),
2548                                            dpo);
2549             break;
2550         case FIB_PATH_TYPE_DEAG:
2551             switch (fct)
2552             {
2553             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2554                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2555                                                   DPO_PROTO_MPLS,
2556                                                   LOOKUP_UNICAST,
2557                                                   LOOKUP_INPUT_DST_ADDR,
2558                                                   LOOKUP_TABLE_FROM_CONFIG,
2559                                                   dpo);
2560                 break;
2561             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2562             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2563             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2564             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2565             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2566                 dpo_copy(dpo, &path->fp_dpo);
2567                 break;
2568             case FIB_FORW_CHAIN_TYPE_BIER:
2569                 break;
2570             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2571             case FIB_FORW_CHAIN_TYPE_NSH:
2572                 ASSERT(0);
2573                 break;
2574             }
2575             break;
2576         case FIB_PATH_TYPE_EXCLUSIVE:
2577             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2578             break;
2579         case FIB_PATH_TYPE_ATTACHED:
2580             switch (fct)
2581             {
2582             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2583                 /*
2584                  * End of stack traffic via an attacehd path (a glean)
2585                  * must forace an IP lookup so that the IP packet can
2586                  * match against any installed adj-fibs
2587                  */
2588                 lookup_dpo_add_or_lock_w_fib_index(
2589                     fib_table_get_index_for_sw_if_index(
2590                         dpo_proto_to_fib(payload_proto),
2591                         path->attached.fp_interface),
2592                     payload_proto,
2593                     LOOKUP_UNICAST,
2594                     LOOKUP_INPUT_DST_ADDR,
2595                     LOOKUP_TABLE_FROM_CONFIG,
2596                     dpo);
2597                 break;
2598             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2599             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2600             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2601             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2602             case FIB_FORW_CHAIN_TYPE_NSH:
2603             case FIB_FORW_CHAIN_TYPE_BIER:
2604                 fib_path_attached_get_adj(path,
2605                                           fib_forw_chain_type_to_link_type(fct),
2606                                           dpo);
2607                 break;
2608             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2609             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2610                 {
2611                     adj_index_t ai;
2612
2613                     /*
2614                      * Create the adj needed for sending IP multicast traffic
2615                      */
2616                     if (vnet_sw_interface_is_p2p(vnet_get_main(),
2617                                                  path->attached.fp_interface))
2618                     {
2619                         /*
2620                          * point-2-point interfaces do not require a glean, since
2621                          * there is nothing to ARP. Install a rewrite/nbr adj instead
2622                          */
2623                         ai = adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2624                                                  fib_forw_chain_type_to_link_type(fct),
2625                                                  &zero_addr,
2626                                                  path->attached.fp_interface);
2627                     }
2628                     else
2629                     {
2630                         ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2631                                                    fib_forw_chain_type_to_link_type(fct),
2632                                                    path->attached.fp_interface);
2633                     }
2634                     dpo_set(dpo, DPO_ADJACENCY,
2635                             fib_forw_chain_type_to_dpo_proto(fct),
2636                             ai);
2637                     adj_unlock(ai);
2638                 }
2639                 break;
2640             }
2641             break;
2642         case FIB_PATH_TYPE_INTF_RX:
2643             /*
2644              * Create the adj needed for sending IP multicast traffic
2645              */
2646             interface_rx_dpo_add_or_lock(payload_proto,
2647                                          path->intf_rx.fp_interface,
2648                                          dpo);
2649             break;
2650         case FIB_PATH_TYPE_UDP_ENCAP:
2651             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2652                                             path->fp_nh_proto,
2653                                             dpo);
2654             break;
2655         case FIB_PATH_TYPE_RECEIVE:
2656         case FIB_PATH_TYPE_SPECIAL:
2657         case FIB_PATH_TYPE_DVR:
2658             dpo_copy(dpo, &path->fp_dpo);
2659             break;
2660         }
2661     }
2662 }
2663
2664 load_balance_path_t *
2665 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2666                                        fib_forward_chain_type_t fct,
2667                                        dpo_proto_t payload_proto,
2668                                        load_balance_path_t *hash_key)
2669 {
2670     load_balance_path_t *mnh;
2671     fib_path_t *path;
2672
2673     path = fib_path_get(path_index);
2674
2675     ASSERT(path);
2676
2677     vec_add2(hash_key, mnh, 1);
2678
2679     mnh->path_weight = path->fp_weight;
2680     mnh->path_index = path_index;
2681
2682     if (fib_path_is_resolved(path_index))
2683     {
2684         fib_path_contribute_forwarding(path_index, fct, payload_proto, &mnh->path_dpo);
2685     }
2686     else
2687     {
2688         dpo_copy(&mnh->path_dpo,
2689                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2690     }
2691     return (hash_key);
2692 }
2693
2694 int
2695 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2696 {
2697     fib_path_t *path;
2698
2699     path = fib_path_get(path_index);
2700
2701     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2702             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2703              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2704 }
2705
2706 int
2707 fib_path_is_exclusive (fib_node_index_t path_index)
2708 {
2709     fib_path_t *path;
2710
2711     path = fib_path_get(path_index);
2712
2713     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2714 }
2715
2716 int
2717 fib_path_is_deag (fib_node_index_t path_index)
2718 {
2719     fib_path_t *path;
2720
2721     path = fib_path_get(path_index);
2722
2723     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2724 }
2725
2726 int
2727 fib_path_is_resolved (fib_node_index_t path_index)
2728 {
2729     fib_path_t *path;
2730
2731     path = fib_path_get(path_index);
2732
2733     return (dpo_id_is_valid(&path->fp_dpo) &&
2734             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2735             !fib_path_is_looped(path_index) &&
2736             !fib_path_is_permanent_drop(path));
2737 }
2738
2739 int
2740 fib_path_is_looped (fib_node_index_t path_index)
2741 {
2742     fib_path_t *path;
2743
2744     path = fib_path_get(path_index);
2745
2746     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2747 }
2748
2749 fib_path_list_walk_rc_t
2750 fib_path_encode (fib_node_index_t path_list_index,
2751                  fib_node_index_t path_index,
2752                  const fib_path_ext_t *path_ext,
2753                  void *args)
2754 {
2755     fib_path_encode_ctx_t *ctx = args;
2756     fib_route_path_t *rpath;
2757     fib_path_t *path;
2758
2759     path = fib_path_get(path_index);
2760     if (!path)
2761       return (FIB_PATH_LIST_WALK_CONTINUE);
2762
2763     vec_add2(ctx->rpaths, rpath, 1);
2764     rpath->frp_weight = path->fp_weight;
2765     rpath->frp_preference = path->fp_preference;
2766     rpath->frp_proto = path->fp_nh_proto;
2767     rpath->frp_sw_if_index = ~0;
2768     rpath->frp_fib_index = 0;
2769
2770     switch (path->fp_type)
2771     {
2772       case FIB_PATH_TYPE_RECEIVE:
2773         rpath->frp_addr = path->receive.fp_addr;
2774         rpath->frp_sw_if_index = path->receive.fp_interface;
2775         rpath->frp_flags |= FIB_ROUTE_PATH_LOCAL;
2776         break;
2777       case FIB_PATH_TYPE_ATTACHED:
2778         rpath->frp_sw_if_index = path->attached.fp_interface;
2779         break;
2780       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2781         rpath->frp_sw_if_index = path->attached_next_hop.fp_interface;
2782         rpath->frp_addr = path->attached_next_hop.fp_nh;
2783         break;
2784       case FIB_PATH_TYPE_BIER_FMASK:
2785         rpath->frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2786         break;
2787       case FIB_PATH_TYPE_SPECIAL:
2788         break;
2789       case FIB_PATH_TYPE_DEAG:
2790         rpath->frp_fib_index = path->deag.fp_tbl_id;
2791         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
2792         {
2793             rpath->frp_flags |= FIB_ROUTE_PATH_RPF_ID;
2794         }
2795         break;
2796       case FIB_PATH_TYPE_RECURSIVE:
2797         rpath->frp_addr = path->recursive.fp_nh.fp_ip;
2798         rpath->frp_fib_index = path->recursive.fp_tbl_id;
2799         break;
2800       case FIB_PATH_TYPE_DVR:
2801           rpath->frp_sw_if_index = path->dvr.fp_interface;
2802           rpath->frp_flags |= FIB_ROUTE_PATH_DVR;
2803           break;
2804       case FIB_PATH_TYPE_UDP_ENCAP:
2805           rpath->frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2806           rpath->frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2807           break;
2808       case FIB_PATH_TYPE_INTF_RX:
2809           rpath->frp_sw_if_index = path->receive.fp_interface;
2810           rpath->frp_flags |= FIB_ROUTE_PATH_INTF_RX;
2811           break;
2812       case FIB_PATH_TYPE_EXCLUSIVE:
2813         rpath->frp_flags |= FIB_ROUTE_PATH_EXCLUSIVE;
2814       default:
2815         break;
2816     }
2817
2818     if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS) 
2819     {
2820         rpath->frp_label_stack = path_ext->fpe_path.frp_label_stack;
2821     }
2822
2823     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP)
2824         rpath->frp_flags |= FIB_ROUTE_PATH_DROP;
2825     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2826         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_UNREACH;
2827     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2828         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_PROHIBIT;
2829
2830     return (FIB_PATH_LIST_WALK_CONTINUE);
2831 }
2832
2833 dpo_proto_t
2834 fib_path_get_proto (fib_node_index_t path_index)
2835 {
2836     fib_path_t *path;
2837
2838     path = fib_path_get(path_index);
2839
2840     return (path->fp_nh_proto);
2841 }
2842
2843 void
2844 fib_path_module_init (void)
2845 {
2846     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2847     fib_path_logger = vlib_log_register_class ("fib", "path");
2848 }
2849
2850 static clib_error_t *
2851 show_fib_path_command (vlib_main_t * vm,
2852                         unformat_input_t * input,
2853                         vlib_cli_command_t * cmd)
2854 {
2855     fib_node_index_t pi;
2856     fib_path_t *path;
2857
2858     if (unformat (input, "%d", &pi))
2859     {
2860         /*
2861          * show one in detail
2862          */
2863         if (!pool_is_free_index(fib_path_pool, pi))
2864         {
2865             path = fib_path_get(pi);
2866             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2867                            FIB_PATH_FORMAT_FLAGS_NONE);
2868             s = format(s, "\n  children:");
2869             s = fib_node_children_format(path->fp_node.fn_children, s);
2870             vlib_cli_output (vm, "%v", s);
2871             vec_free(s);
2872         }
2873         else
2874         {
2875             vlib_cli_output (vm, "path %d invalid", pi);
2876         }
2877     }
2878     else
2879     {
2880         vlib_cli_output (vm, "FIB Paths");
2881         pool_foreach_index (pi, fib_path_pool)
2882          {
2883             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2884                              FIB_PATH_FORMAT_FLAGS_NONE);
2885         }
2886     }
2887
2888     return (NULL);
2889 }
2890
2891 VLIB_CLI_COMMAND (show_fib_path, static) = {
2892   .path = "show fib paths",
2893   .function = show_fib_path_command,
2894   .short_help = "show fib paths",
2895 };