udp: add udp decapsulation
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/ip_null_dpo.h>
28 #include <vnet/dpo/classify_dpo.h>
29 #include <vnet/dpo/pw_cw.h>
30
31 #include <vnet/adj/adj.h>
32 #include <vnet/adj/adj_mcast.h>
33
34 #include <vnet/fib/fib_path.h>
35 #include <vnet/fib/fib_node.h>
36 #include <vnet/fib/fib_table.h>
37 #include <vnet/fib/fib_entry.h>
38 #include <vnet/fib/fib_path_list.h>
39 #include <vnet/fib/fib_internal.h>
40 #include <vnet/fib/fib_urpf_list.h>
41 #include <vnet/fib/mpls_fib.h>
42 #include <vnet/fib/fib_path_ext.h>
43 #include <vnet/udp/udp_encap.h>
44 #include <vnet/bier/bier_fmask.h>
45 #include <vnet/bier/bier_table.h>
46 #include <vnet/bier/bier_imp.h>
47 #include <vnet/bier/bier_disp_table.h>
48
49 /**
50  * Enurmeration of path types
51  */
52 typedef enum fib_path_type_t_ {
53     /**
54      * Marker. Add new types after this one.
55      */
56     FIB_PATH_TYPE_FIRST = 0,
57     /**
58      * Attached-nexthop. An interface and a nexthop are known.
59      */
60     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
61     /**
62      * attached. Only the interface is known.
63      */
64     FIB_PATH_TYPE_ATTACHED,
65     /**
66      * recursive. Only the next-hop is known.
67      */
68     FIB_PATH_TYPE_RECURSIVE,
69     /**
70      * special. nothing is known. so we drop.
71      */
72     FIB_PATH_TYPE_SPECIAL,
73     /**
74      * exclusive. user provided adj.
75      */
76     FIB_PATH_TYPE_EXCLUSIVE,
77     /**
78      * deag. Link to a lookup adj in the next table
79      */
80     FIB_PATH_TYPE_DEAG,
81     /**
82      * interface receive.
83      */
84     FIB_PATH_TYPE_INTF_RX,
85     /**
86      * Path resolves via a UDP encap object.
87      */
88     FIB_PATH_TYPE_UDP_ENCAP,
89     /**
90      * receive. it's for-us.
91      */
92     FIB_PATH_TYPE_RECEIVE,
93     /**
94      * bier-imp. it's via a BIER imposition.
95      */
96     FIB_PATH_TYPE_BIER_IMP,
97     /**
98      * bier-fmask. it's via a BIER ECMP-table.
99      */
100     FIB_PATH_TYPE_BIER_TABLE,
101     /**
102      * bier-fmask. it's via a BIER f-mask.
103      */
104     FIB_PATH_TYPE_BIER_FMASK,
105     /**
106      * via a DVR.
107      */
108     FIB_PATH_TYPE_DVR,
109 } __attribute__ ((packed)) fib_path_type_t;
110
111 #define FIB_PATH_TYPES {                                        \
112     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
113     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
114     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
115     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
116     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
117     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
118     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
119     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
120     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
121     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
122     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
123     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
124     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
125 }
126
127 /**
128  * Enurmeration of path operational (i.e. derived) attributes
129  */
130 typedef enum fib_path_oper_attribute_t_ {
131     /**
132      * Marker. Add new types after this one.
133      */
134     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
135     /**
136      * The path forms part of a recursive loop.
137      */
138     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
139     /**
140      * The path is resolved
141      */
142     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
143     /**
144      * The path has become a permanent drop.
145      */
146     FIB_PATH_OPER_ATTRIBUTE_DROP,
147     /**
148      * Marker. Add new types before this one, then update it.
149      */
150     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
151 } __attribute__ ((packed)) fib_path_oper_attribute_t;
152
153 /**
154  * The maximum number of path operational attributes
155  */
156 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
157
158 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
159     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
160     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
161     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
162 }
163
164 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
165     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
166          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
167          _item++)
168
169 /**
170  * Path flags from the attributes
171  */
172 typedef enum fib_path_oper_flags_t_ {
173     FIB_PATH_OPER_FLAG_NONE = 0,
174     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
175     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
176     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
177 } __attribute__ ((packed)) fib_path_oper_flags_t;
178
179 /**
180  * A FIB path
181  */
182 typedef struct fib_path_t_ {
183     /**
184      * A path is a node in the FIB graph.
185      */
186     fib_node_t fp_node;
187
188     /**
189      * The index of the path-list to which this path belongs
190      */
191     u32 fp_pl_index;
192
193     /**
194      * This marks the start of the memory area used to hash
195      * the path
196      */
197     STRUCT_MARK(path_hash_start);
198
199     /**
200      * Configuration Flags
201      */
202     fib_path_cfg_flags_t fp_cfg_flags;
203
204     /**
205      * The type of the path. This is the selector for the union
206      */
207     fib_path_type_t fp_type;
208
209     /**
210      * The protocol of the next-hop, i.e. the address family of the
211      * next-hop's address. We can't derive this from the address itself
212      * since the address can be all zeros
213      */
214     dpo_proto_t fp_nh_proto;
215
216     /**
217      * UCMP [unnormalised] weigth
218      */
219     u8 fp_weight;
220
221     /**
222      * A path preference. 0 is the best.
223      * Only paths of the best preference, that are 'up', are considered
224      * for forwarding.
225      */
226     u8 fp_preference;
227
228     /**
229      * per-type union of the data required to resolve the path
230      */
231     union {
232         struct {
233             /**
234              * The next-hop
235              */
236             ip46_address_t fp_nh;
237             /**
238              * The interface
239              */
240             u32 fp_interface;
241         } attached_next_hop;
242         struct {
243             /**
244              * The Connected local address
245              */
246             fib_prefix_t fp_connected;
247             /**
248              * The interface
249              */
250             u32 fp_interface;
251         } attached;
252         struct {
253             union
254             {
255                 /**
256                  * The next-hop
257                  */
258                 ip46_address_t fp_ip;
259                 struct {
260                     /**
261                      * The local label to resolve through.
262                      */
263                     mpls_label_t fp_local_label;
264                     /**
265                      * The EOS bit of the resolving label
266                      */
267                     mpls_eos_bit_t fp_eos;
268                 };
269             } fp_nh;
270             /**
271              * The FIB table index in which to find the next-hop.
272              */
273             fib_node_index_t fp_tbl_id;
274         } recursive;
275         struct {
276             /**
277              * BIER FMask ID
278              */
279             index_t fp_bier_fmask;
280         } bier_fmask;
281         struct {
282             /**
283              * The BIER table's ID
284              */
285             bier_table_id_t fp_bier_tbl;
286         } bier_table;
287         struct {
288             /**
289              * The BIER imposition object
290              * this is part of the path's key, since the index_t
291              * of an imposition object is the object's key.
292              */
293             index_t fp_bier_imp;
294         } bier_imp;
295         struct {
296             /**
297              * The FIB index in which to perfom the next lookup
298              */
299             fib_node_index_t fp_tbl_id;
300             /**
301              * The RPF-ID to tag the packets with
302              */
303             fib_rpf_id_t fp_rpf_id;
304         } deag;
305         struct {
306         } special;
307         struct {
308             /**
309              * The user provided 'exclusive' DPO
310              */
311             dpo_id_t fp_ex_dpo;
312         } exclusive;
313         struct {
314             /**
315              * The interface on which the local address is configured
316              */
317             u32 fp_interface;
318             /**
319              * The next-hop
320              */
321             ip46_address_t fp_addr;
322         } receive;
323         struct {
324             /**
325              * The interface on which the packets will be input.
326              */
327             u32 fp_interface;
328         } intf_rx;
329         struct {
330             /**
331              * The UDP Encap object this path resolves through
332              */
333             u32 fp_udp_encap_id;
334         } udp_encap;
335         struct {
336             /**
337              * The UDP Encap object this path resolves through
338              */
339             u32 fp_classify_table_id;
340         } classify;
341         struct {
342             /**
343              * The interface
344              */
345             u32 fp_interface;
346         } dvr;
347     };
348     STRUCT_MARK(path_hash_end);
349
350     /**
351      * Members in this last section represent information that is
352      * dervied during resolution. It should not be copied to new paths
353      * nor compared.
354      */
355
356     /**
357      * Operational Flags
358      */
359     fib_path_oper_flags_t fp_oper_flags;
360
361     union {
362         /**
363          * the resolving via fib. not part of the union, since it it not part
364          * of the path's hash.
365          */
366         fib_node_index_t fp_via_fib;
367         /**
368          * the resolving bier-table
369          */
370         index_t fp_via_bier_tbl;
371         /**
372          * the resolving bier-fmask
373          */
374         index_t fp_via_bier_fmask;
375     };
376
377     /**
378      * The Data-path objects through which this path resolves for IP.
379      */
380     dpo_id_t fp_dpo;
381
382     /**
383      * the index of this path in the parent's child list.
384      */
385     u32 fp_sibling;
386 } fib_path_t;
387
388 /*
389  * Array of strings/names for the path types and attributes
390  */
391 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
392 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
393 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
394
395 /*
396  * The memory pool from which we allocate all the paths
397  */
398 static fib_path_t *fib_path_pool;
399
400 /**
401  * the logger
402  */
403 vlib_log_class_t fib_path_logger;
404
405 /*
406  * Debug macro
407  */
408 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
409 {                                                                       \
410     vlib_log_debug (fib_path_logger,                                    \
411                     "[%U]: " _fmt,                                      \
412                     format_fib_path, fib_path_get_index(_p), 0,         \
413                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
414                     ##_args);                                           \
415 }
416
417 static fib_path_t *
418 fib_path_get (fib_node_index_t index)
419 {
420     return (pool_elt_at_index(fib_path_pool, index));
421 }
422
423 static fib_node_index_t 
424 fib_path_get_index (fib_path_t *path)
425 {
426     return (path - fib_path_pool);
427 }
428
429 static fib_node_t *
430 fib_path_get_node (fib_node_index_t index)
431 {
432     return ((fib_node_t*)fib_path_get(index));
433 }
434
435 static fib_path_t*
436 fib_path_from_fib_node (fib_node_t *node)
437 {
438     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
439     return ((fib_path_t*)node);
440 }
441
442 u8 *
443 format_fib_path (u8 * s, va_list * args)
444 {
445     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
446     u32 indent = va_arg (*args, u32);
447     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
448     vnet_main_t * vnm = vnet_get_main();
449     fib_path_oper_attribute_t oattr;
450     fib_path_cfg_attribute_t cattr;
451     fib_path_t *path;
452     const char *eol;
453
454     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
455     {
456         eol = "";
457     }
458     else
459     {
460         eol = "\n";
461     }
462
463     path = fib_path_get(path_index);
464
465     s = format (s, "%Upath:[%d] ", format_white_space, indent,
466                 fib_path_get_index(path));
467     s = format (s, "pl-index:%d ", path->fp_pl_index);
468     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
469     s = format (s, "weight=%d ", path->fp_weight);
470     s = format (s, "pref=%d ", path->fp_preference);
471     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
472     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
473         s = format(s, " oper-flags:");
474         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
475             if ((1<<oattr) & path->fp_oper_flags) {
476                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
477             }
478         }
479     }
480     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
481         s = format(s, " cfg-flags:");
482         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
483             if ((1<<cattr) & path->fp_cfg_flags) {
484                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
485             }
486         }
487     }
488     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
489         s = format(s, "\n%U", format_white_space, indent+2);
490
491     switch (path->fp_type)
492     {
493     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
494         s = format (s, "%U", format_ip46_address,
495                     &path->attached_next_hop.fp_nh,
496                     IP46_TYPE_ANY);
497         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
498         {
499             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
500         }
501         else
502         {
503             s = format (s, " %U",
504                         format_vnet_sw_interface_name,
505                         vnm,
506                         vnet_get_sw_interface(
507                             vnm,
508                             path->attached_next_hop.fp_interface));
509             if (vnet_sw_interface_is_p2p(vnet_get_main(),
510                                          path->attached_next_hop.fp_interface))
511             {
512                 s = format (s, " (p2p)");
513             }
514         }
515         if (!dpo_id_is_valid(&path->fp_dpo))
516         {
517             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
518         }
519         else
520         {
521             s = format(s, "%s%U%U", eol,
522                        format_white_space, indent,
523                        format_dpo_id,
524                        &path->fp_dpo, 13);
525         }
526         break;
527     case FIB_PATH_TYPE_ATTACHED:
528         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
529         {
530             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
531         }
532         else
533         {
534             s = format (s, " %U",
535                         format_vnet_sw_interface_name,
536                         vnm,
537                         vnet_get_sw_interface(
538                             vnm,
539                             path->attached.fp_interface));
540         }
541         break;
542     case FIB_PATH_TYPE_RECURSIVE:
543         if (DPO_PROTO_MPLS == path->fp_nh_proto)
544         {
545             s = format (s, "via %U %U",
546                         format_mpls_unicast_label,
547                         path->recursive.fp_nh.fp_local_label,
548                         format_mpls_eos_bit,
549                         path->recursive.fp_nh.fp_eos);
550         }
551         else
552         {
553             s = format (s, "via %U",
554                         format_ip46_address,
555                         &path->recursive.fp_nh.fp_ip,
556                         IP46_TYPE_ANY);
557         }
558         s = format (s, " in fib:%d",
559                     path->recursive.fp_tbl_id,
560                     path->fp_via_fib); 
561         s = format (s, " via-fib:%d", path->fp_via_fib); 
562         s = format (s, " via-dpo:[%U:%d]",
563                     format_dpo_type, path->fp_dpo.dpoi_type, 
564                     path->fp_dpo.dpoi_index);
565
566         break;
567     case FIB_PATH_TYPE_UDP_ENCAP:
568         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
569         break;
570     case FIB_PATH_TYPE_BIER_TABLE:
571         s = format (s, "via bier-table:[%U}",
572                     format_bier_table_id,
573                     &path->bier_table.fp_bier_tbl);
574         s = format (s, " via-dpo:[%U:%d]",
575                     format_dpo_type, path->fp_dpo.dpoi_type,
576                     path->fp_dpo.dpoi_index);
577         break;
578     case FIB_PATH_TYPE_BIER_FMASK:
579         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
580         s = format (s, " via-dpo:[%U:%d]",
581                     format_dpo_type, path->fp_dpo.dpoi_type, 
582                     path->fp_dpo.dpoi_index);
583         break;
584     case FIB_PATH_TYPE_BIER_IMP:
585         s = format (s, "via %U", format_bier_imp,
586                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
587         break;
588     case FIB_PATH_TYPE_DVR:
589         s = format (s, " %U",
590                     format_vnet_sw_interface_name,
591                     vnm,
592                     vnet_get_sw_interface(
593                         vnm,
594                         path->dvr.fp_interface));
595         break;
596     case FIB_PATH_TYPE_DEAG:
597         s = format (s, " %sfib-index:%d",
598                     (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?  "m" : ""),
599                     path->deag.fp_tbl_id);
600         break;
601     case FIB_PATH_TYPE_RECEIVE:
602     case FIB_PATH_TYPE_INTF_RX:
603     case FIB_PATH_TYPE_SPECIAL:
604     case FIB_PATH_TYPE_EXCLUSIVE:
605         if (dpo_id_is_valid(&path->fp_dpo))
606         {
607             s = format(s, "%U", format_dpo_id,
608                        &path->fp_dpo, indent+2);
609         }
610         break;
611     }
612     return (s);
613 }
614
615 /*
616  * fib_path_last_lock_gone
617  *
618  * We don't share paths, we share path lists, so the [un]lock functions
619  * are no-ops
620  */
621 static void
622 fib_path_last_lock_gone (fib_node_t *node)
623 {
624     ASSERT(0);
625 }
626
627 static fib_path_t*
628 fib_path_attached_next_hop_get_adj (fib_path_t *path,
629                                     vnet_link_t link,
630                                     dpo_id_t *dpo)
631 {
632     fib_node_index_t fib_path_index;
633     fib_protocol_t nh_proto;
634     adj_index_t ai;
635
636     fib_path_index = fib_path_get_index(path);
637     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
638
639     if (vnet_sw_interface_is_p2p(vnet_get_main(),
640                                  path->attached_next_hop.fp_interface))
641     {
642         /*
643          * if the interface is p2p then the adj for the specific
644          * neighbour on that link will never exist. on p2p links
645          * the subnet address (the attached route) links to the
646          * auto-adj (see below), we want that adj here too.
647          */
648         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
649                                  path->attached_next_hop.fp_interface);
650     }
651     else
652     {
653         ai = adj_nbr_add_or_lock(nh_proto, link,
654                                  &path->attached_next_hop.fp_nh,
655                                  path->attached_next_hop.fp_interface);
656     }
657
658     dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
659     adj_unlock(ai);
660
661     return (fib_path_get(fib_path_index));
662 }
663
664 static void
665 fib_path_attached_next_hop_set (fib_path_t *path)
666 {
667     /*
668      * resolve directly via the adjacency discribed by the
669      * interface and next-hop
670      */
671     path = fib_path_attached_next_hop_get_adj(path,
672                                               dpo_proto_to_link(path->fp_nh_proto),
673                                               &path->fp_dpo);
674
675     ASSERT(dpo_is_adj(&path->fp_dpo));
676
677     /*
678      * become a child of the adjacency so we receive updates
679      * when its rewrite changes
680      */
681     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
682                                      FIB_NODE_TYPE_PATH,
683                                      fib_path_get_index(path));
684
685     if (!vnet_sw_interface_is_up(vnet_get_main(),
686                                  path->attached_next_hop.fp_interface) ||
687         !adj_is_up(path->fp_dpo.dpoi_index))
688     {
689         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
690     }
691 }
692
693 static void
694 fib_path_attached_get_adj (fib_path_t *path,
695                            vnet_link_t link,
696                            dpo_id_t *dpo)
697 {
698     fib_protocol_t nh_proto;
699
700     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
701
702     if (vnet_sw_interface_is_p2p(vnet_get_main(),
703                                  path->attached.fp_interface))
704     {
705         /*
706          * point-2-point interfaces do not require a glean, since
707          * there is nothing to ARP. Install a rewrite/nbr adj instead
708          */
709         adj_index_t ai;
710
711         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
712                                  path->attached.fp_interface);
713
714         dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
715         adj_unlock(ai);
716     }
717     else if (vnet_sw_interface_is_nbma(vnet_get_main(),
718                                        path->attached.fp_interface))
719     {
720         dpo_copy(dpo, drop_dpo_get(path->fp_nh_proto));
721     }
722     else
723     {
724         adj_index_t ai;
725
726         ai = adj_glean_add_or_lock(nh_proto, link,
727                                    path->attached.fp_interface,
728                                    &path->attached.fp_connected);
729         dpo_set(dpo, DPO_ADJACENCY_GLEAN, vnet_link_to_dpo_proto(link), ai);
730         adj_unlock(ai);
731     }
732 }
733
734 /*
735  * create of update the paths recursive adj
736  */
737 static void
738 fib_path_recursive_adj_update (fib_path_t *path,
739                                fib_forward_chain_type_t fct,
740                                dpo_id_t *dpo)
741 {
742     dpo_id_t via_dpo = DPO_INVALID;
743
744     /*
745      * get the DPO to resolve through from the via-entry
746      */
747     fib_entry_contribute_forwarding(path->fp_via_fib,
748                                     fct,
749                                     &via_dpo);
750
751
752     /*
753      * hope for the best - clear if restrictions apply.
754      */
755     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
756
757     /*
758      * Validate any recursion constraints and over-ride the via
759      * adj if not met
760      */
761     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
762     {
763         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
764         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
765     }
766     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
767     {
768         /*
769          * the via FIB must be a host route.
770          * note the via FIB just added will always be a host route
771          * since it is an RR source added host route. So what we need to
772          * check is whether the route has other sources. If it does then
773          * some other source has added it as a host route. If it doesn't
774          * then it was added only here and inherits forwarding from a cover.
775          * the cover is not a host route.
776          * The RR source is the lowest priority source, so we check if it
777          * is the best. if it is there are no other sources.
778          */
779         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
780         {
781             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
782             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
783
784             /*
785              * PIC edge trigger. let the load-balance maps know
786              */
787             load_balance_map_path_state_change(fib_path_get_index(path));
788         }
789     }
790     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
791     {
792         /*
793          * RR source entries inherit the flags from the cover, so
794          * we can check the via directly
795          */
796         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
797         {
798             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
799             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
800
801             /*
802              * PIC edge trigger. let the load-balance maps know
803              */
804             load_balance_map_path_state_change(fib_path_get_index(path));
805         }
806     }
807     /*
808      * check for over-riding factors on the FIB entry itself
809      */
810     if (!fib_entry_is_resolved(path->fp_via_fib))
811     {
812         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
813         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
814
815         /*
816          * PIC edge trigger. let the load-balance maps know
817          */
818         load_balance_map_path_state_change(fib_path_get_index(path));
819     }
820
821     /*
822      * If this path is contributing a drop, then it's not resolved
823      */
824     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
825     {
826         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
827     }
828
829     /*
830      * update the path's contributed DPO
831      */
832     dpo_copy(dpo, &via_dpo);
833
834     FIB_PATH_DBG(path, "recursive update:");
835
836     dpo_reset(&via_dpo);
837 }
838
839 /*
840  * re-evaulate the forwarding state for a via fmask path
841  */
842 static void
843 fib_path_bier_fmask_update (fib_path_t *path,
844                             dpo_id_t *dpo)
845 {
846     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
847
848     /*
849      * if we are stakcing on the drop, then the path is not resolved
850      */
851     if (dpo_is_drop(dpo))
852     {
853         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
854     }
855     else
856     {
857         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
858     }
859 }
860
861 /*
862  * fib_path_is_permanent_drop
863  *
864  * Return !0 if the path is configured to permanently drop,
865  * despite other attributes.
866  */
867 static int
868 fib_path_is_permanent_drop (fib_path_t *path)
869 {
870     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
871             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
872 }
873
874 /*
875  * fib_path_unresolve
876  *
877  * Remove our dependency on the resolution target
878  */
879 static void
880 fib_path_unresolve (fib_path_t *path)
881 {
882     /*
883      * the forced drop path does not need unresolving
884      */
885     if (fib_path_is_permanent_drop(path))
886     {
887         return;
888     }
889
890     switch (path->fp_type)
891     {
892     case FIB_PATH_TYPE_RECURSIVE:
893         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
894         {
895             fib_entry_child_remove(path->fp_via_fib,
896                                    path->fp_sibling);
897             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
898                                            fib_entry_get_prefix(path->fp_via_fib),
899                                            FIB_SOURCE_RR);
900             fib_table_unlock(path->recursive.fp_tbl_id,
901                              dpo_proto_to_fib(path->fp_nh_proto),
902                              FIB_SOURCE_RR);
903             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
904         }
905         break;
906     case FIB_PATH_TYPE_BIER_FMASK:
907         bier_fmask_child_remove(path->fp_via_bier_fmask,
908                                 path->fp_sibling);
909         break;
910     case FIB_PATH_TYPE_BIER_IMP:
911         bier_imp_unlock(path->fp_dpo.dpoi_index);
912         break;
913     case FIB_PATH_TYPE_BIER_TABLE:
914         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
915         break;
916     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
917     case FIB_PATH_TYPE_ATTACHED:
918         if (dpo_is_adj(&path->fp_dpo))
919             adj_child_remove(path->fp_dpo.dpoi_index,
920                              path->fp_sibling);
921         break;
922     case FIB_PATH_TYPE_UDP_ENCAP:
923         udp_encap_unlock(path->fp_dpo.dpoi_index);
924         break;
925     case FIB_PATH_TYPE_EXCLUSIVE:
926         dpo_reset(&path->exclusive.fp_ex_dpo);
927         break;
928     case FIB_PATH_TYPE_SPECIAL:
929     case FIB_PATH_TYPE_RECEIVE:
930     case FIB_PATH_TYPE_INTF_RX:
931     case FIB_PATH_TYPE_DEAG:
932     case FIB_PATH_TYPE_DVR:
933         /*
934          * these hold only the path's DPO, which is reset below.
935          */
936         break;
937     }
938
939     /*
940      * release the adj we were holding and pick up the
941      * drop just in case.
942      */
943     dpo_reset(&path->fp_dpo);
944     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
945
946     return;
947 }
948
949 static fib_forward_chain_type_t
950 fib_path_to_chain_type (const fib_path_t *path)
951 {
952     if (DPO_PROTO_MPLS == path->fp_nh_proto)
953     {
954         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
955             MPLS_EOS == path->recursive.fp_nh.fp_eos)
956         {
957             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
958         }
959         else
960         {
961             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
962         }
963     }
964     else
965     {
966         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
967     }
968 }
969
970 /*
971  * fib_path_back_walk_notify
972  *
973  * A back walk has reach this path.
974  */
975 static fib_node_back_walk_rc_t
976 fib_path_back_walk_notify (fib_node_t *node,
977                            fib_node_back_walk_ctx_t *ctx)
978 {
979     fib_path_t *path;
980
981     path = fib_path_from_fib_node(node);
982
983     FIB_PATH_DBG(path, "bw:%U",
984                  format_fib_node_bw_reason, ctx->fnbw_reason);
985
986     switch (path->fp_type)
987     {
988     case FIB_PATH_TYPE_RECURSIVE:
989         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
990         {
991             /*
992              * modify the recursive adjacency to use the new forwarding
993              * of the via-fib.
994              * this update is visible to packets in flight in the DP.
995              */
996             fib_path_recursive_adj_update(
997                 path,
998                 fib_path_to_chain_type(path),
999                 &path->fp_dpo);
1000         }
1001         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1002             (FIB_NODE_BW_REASON_FLAG_ADJ_MTU    & ctx->fnbw_reason) ||
1003             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1004         {
1005             /*
1006              * ADJ updates (complete<->incomplete) do not need to propagate to
1007              * recursive entries.
1008              * The only reason its needed as far back as here, is that the adj
1009              * and the incomplete adj are a different DPO type, so the LBs need
1010              * to re-stack.
1011              * If this walk was quashed in the fib_entry, then any non-fib_path
1012              * children (like tunnels that collapse out the LB when they stack)
1013              * would not see the update.
1014              */
1015             return (FIB_NODE_BACK_WALK_CONTINUE);
1016         }
1017         break;
1018     case FIB_PATH_TYPE_BIER_FMASK:
1019         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1020         {
1021             /*
1022              * update to use the BIER fmask's new forwading
1023              */
1024             fib_path_bier_fmask_update(path, &path->fp_dpo);
1025         }
1026         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1027             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1028         {
1029             /*
1030              * ADJ updates (complete<->incomplete) do not need to propagate to
1031              * recursive entries.
1032              * The only reason its needed as far back as here, is that the adj
1033              * and the incomplete adj are a different DPO type, so the LBs need
1034              * to re-stack.
1035              * If this walk was quashed in the fib_entry, then any non-fib_path
1036              * children (like tunnels that collapse out the LB when they stack)
1037              * would not see the update.
1038              */
1039             return (FIB_NODE_BACK_WALK_CONTINUE);
1040         }
1041         break;
1042     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1043         /*
1044 FIXME comment
1045          * ADJ_UPDATE backwalk pass silently through here and up to
1046          * the path-list when the multipath adj collapse occurs.
1047          * The reason we do this is that the assumtption is that VPP
1048          * runs in an environment where the Control-Plane is remote
1049          * and hence reacts slowly to link up down. In order to remove
1050          * this down link from the ECMP set quickly, we back-walk.
1051          * VPP also has dedicated CPUs, so we are not stealing resources
1052          * from the CP to do so.
1053          */
1054         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1055         {
1056             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1057             {
1058                 /*
1059                  * alreday resolved. no need to walk back again
1060                  */
1061                 return (FIB_NODE_BACK_WALK_CONTINUE);
1062             }
1063             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1064         }
1065         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1066         {
1067             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1068             {
1069                 /*
1070                  * alreday unresolved. no need to walk back again
1071                  */
1072                 return (FIB_NODE_BACK_WALK_CONTINUE);
1073             }
1074             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1075         }
1076         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1077         {
1078             /*
1079              * The interface this path resolves through has been deleted.
1080              * This will leave the path in a permanent drop state. The route
1081              * needs to be removed and readded (and hence the path-list deleted)
1082              * before it can forward again.
1083              */
1084             fib_path_unresolve(path);
1085             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1086         }
1087         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1088         {
1089             /*
1090              * restack the DPO to pick up the correct DPO sub-type
1091              */
1092             uword if_is_up;
1093
1094             if_is_up = vnet_sw_interface_is_up(
1095                            vnet_get_main(),
1096                            path->attached_next_hop.fp_interface);
1097
1098             path = fib_path_attached_next_hop_get_adj(
1099                 path,
1100                 dpo_proto_to_link(path->fp_nh_proto),
1101                 &path->fp_dpo);
1102
1103             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1104             if (if_is_up && adj_is_up(path->fp_dpo.dpoi_index))
1105             {
1106                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1107             }
1108
1109             if (!if_is_up)
1110             {
1111                 /*
1112                  * If the interface is not up there is no reason to walk
1113                  * back to children. if we did they would only evalute
1114                  * that this path is unresolved and hence it would
1115                  * not contribute the adjacency - so it would be wasted
1116                  * CPU time.
1117                  */
1118                 return (FIB_NODE_BACK_WALK_CONTINUE);
1119             }
1120         }
1121         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1122         {
1123             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1124             {
1125                 /*
1126                  * alreday unresolved. no need to walk back again
1127                  */
1128                 return (FIB_NODE_BACK_WALK_CONTINUE);
1129             }
1130             /*
1131              * the adj has gone down. the path is no longer resolved.
1132              */
1133             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1134         }
1135         break;
1136     case FIB_PATH_TYPE_ATTACHED:
1137     case FIB_PATH_TYPE_DVR:
1138         /*
1139          * FIXME; this could schedule a lower priority walk, since attached
1140          * routes are not usually in ECMP configurations so the backwalk to
1141          * the FIB entry does not need to be high priority
1142          */
1143         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1144         {
1145             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1146         }
1147         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1148         {
1149             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1150         }
1151         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1152         {
1153             fib_path_unresolve(path);
1154             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1155         }
1156         break;
1157     case FIB_PATH_TYPE_UDP_ENCAP:
1158     {
1159         dpo_id_t via_dpo = DPO_INVALID;
1160
1161         /*
1162          * hope for the best - clear if restrictions apply.
1163          */
1164         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1165
1166         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1167                                         path->fp_nh_proto,
1168                                         &via_dpo);
1169         /*
1170          * If this path is contributing a drop, then it's not resolved
1171          */
1172         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1173         {
1174             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1175         }
1176
1177         /*
1178          * update the path's contributed DPO
1179          */
1180         dpo_copy(&path->fp_dpo, &via_dpo);
1181         dpo_reset(&via_dpo);
1182         break;
1183     }
1184     case FIB_PATH_TYPE_INTF_RX:
1185         ASSERT(0);
1186     case FIB_PATH_TYPE_DEAG:
1187         /*
1188          * FIXME When VRF delete is allowed this will need a poke.
1189          */
1190     case FIB_PATH_TYPE_SPECIAL:
1191     case FIB_PATH_TYPE_RECEIVE:
1192     case FIB_PATH_TYPE_EXCLUSIVE:
1193     case FIB_PATH_TYPE_BIER_TABLE:
1194     case FIB_PATH_TYPE_BIER_IMP:
1195         /*
1196          * these path types have no parents. so to be
1197          * walked from one is unexpected.
1198          */
1199         ASSERT(0);
1200         break;
1201     }
1202
1203     /*
1204      * propagate the backwalk further to the path-list
1205      */
1206     fib_path_list_back_walk(path->fp_pl_index, ctx);
1207
1208     return (FIB_NODE_BACK_WALK_CONTINUE);
1209 }
1210
1211 static void
1212 fib_path_memory_show (void)
1213 {
1214     fib_show_memory_usage("Path",
1215                           pool_elts(fib_path_pool),
1216                           pool_len(fib_path_pool),
1217                           sizeof(fib_path_t));
1218 }
1219
1220 /*
1221  * The FIB path's graph node virtual function table
1222  */
1223 static const fib_node_vft_t fib_path_vft = {
1224     .fnv_get = fib_path_get_node,
1225     .fnv_last_lock = fib_path_last_lock_gone,
1226     .fnv_back_walk = fib_path_back_walk_notify,
1227     .fnv_mem_show = fib_path_memory_show,
1228 };
1229
1230 static fib_path_cfg_flags_t
1231 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1232 {
1233     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1234
1235     if (rpath->frp_flags & FIB_ROUTE_PATH_POP_PW_CW)
1236         cfg_flags |= FIB_PATH_CFG_FLAG_POP_PW_CW;
1237     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1238         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1239     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1240         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1241     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1242         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1243     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1244         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1245     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1246         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1247     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1248         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1249     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1250         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1251     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1252         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1253     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1254         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1255     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_UNREACH)
1256         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_UNREACH;
1257     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_PROHIBIT)
1258         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_PROHIBIT;
1259     if (rpath->frp_flags & FIB_ROUTE_PATH_GLEAN)
1260         cfg_flags |= FIB_PATH_CFG_FLAG_GLEAN;
1261
1262     return (cfg_flags);
1263 }
1264
1265 /*
1266  * fib_path_create
1267  *
1268  * Create and initialise a new path object.
1269  * return the index of the path.
1270  */
1271 fib_node_index_t
1272 fib_path_create (fib_node_index_t pl_index,
1273                  const fib_route_path_t *rpath)
1274 {
1275     fib_path_t *path;
1276
1277     pool_get(fib_path_pool, path);
1278     clib_memset(path, 0, sizeof(*path));
1279
1280     fib_node_init(&path->fp_node,
1281                   FIB_NODE_TYPE_PATH);
1282
1283     dpo_reset(&path->fp_dpo);
1284     path->fp_pl_index = pl_index;
1285     path->fp_nh_proto = rpath->frp_proto;
1286     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1287     path->fp_weight = rpath->frp_weight;
1288     if (0 == path->fp_weight)
1289     {
1290         /*
1291          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1292          * clients to always use 1, or we can accept it and fixup approrpiately.
1293          */
1294         path->fp_weight = 1;
1295     }
1296     path->fp_preference = rpath->frp_preference;
1297     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1298
1299     /*
1300      * deduce the path's tpye from the parementers and save what is needed.
1301      */
1302     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1303     {
1304         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1305         path->receive.fp_interface = rpath->frp_sw_if_index;
1306         path->receive.fp_addr = rpath->frp_addr;
1307     }
1308     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1309     {
1310         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1311         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1312     }
1313     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1314     {
1315         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1316         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1317     }
1318     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1319     {
1320         path->fp_type = FIB_PATH_TYPE_DEAG;
1321         path->deag.fp_tbl_id = rpath->frp_fib_index;
1322         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1323     }
1324     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1325     {
1326         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1327         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1328     }
1329     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1330     {
1331         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1332         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1333     }
1334     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1335     {
1336         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1337         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1338     }
1339     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1340     {
1341         path->fp_type = FIB_PATH_TYPE_DEAG;
1342         path->deag.fp_tbl_id = rpath->frp_fib_index;
1343     }
1344     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1345     {
1346         path->fp_type = FIB_PATH_TYPE_DVR;
1347         path->dvr.fp_interface = rpath->frp_sw_if_index;
1348     }
1349     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1350     {
1351         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1352         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1353     }
1354     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT) ||
1355         (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH))
1356     {
1357         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1358     }
1359     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY))
1360     {
1361         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1362         path->classify.fp_classify_table_id = rpath->frp_classify_table_id;
1363     }
1364     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_GLEAN)
1365     {
1366         path->fp_type = FIB_PATH_TYPE_ATTACHED;
1367         path->attached.fp_interface = rpath->frp_sw_if_index;
1368         path->attached.fp_connected = rpath->frp_connected;
1369     }
1370     else if (~0 != rpath->frp_sw_if_index)
1371     {
1372         if (ip46_address_is_zero(&rpath->frp_addr))
1373         {
1374             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1375             path->attached.fp_interface = rpath->frp_sw_if_index;
1376         }
1377         else
1378         {
1379             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1380             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1381             path->attached_next_hop.fp_nh = rpath->frp_addr;
1382         }
1383     }
1384     else
1385     {
1386         if (ip46_address_is_zero(&rpath->frp_addr))
1387         {
1388             if (~0 == rpath->frp_fib_index)
1389             {
1390                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1391             }
1392             else
1393             {
1394                 path->fp_type = FIB_PATH_TYPE_DEAG;
1395                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1396                 path->deag.fp_rpf_id = ~0;
1397             }
1398         }
1399         else
1400         {
1401             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1402             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1403             {
1404                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1405                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1406             }
1407             else
1408             {
1409                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1410             }
1411             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1412         }
1413     }
1414
1415     FIB_PATH_DBG(path, "create");
1416
1417     return (fib_path_get_index(path));
1418 }
1419
1420 /*
1421  * fib_path_create_special
1422  *
1423  * Create and initialise a new path object.
1424  * return the index of the path.
1425  */
1426 fib_node_index_t
1427 fib_path_create_special (fib_node_index_t pl_index,
1428                          dpo_proto_t nh_proto,
1429                          fib_path_cfg_flags_t flags,
1430                          const dpo_id_t *dpo)
1431 {
1432     fib_path_t *path;
1433
1434     pool_get(fib_path_pool, path);
1435     clib_memset(path, 0, sizeof(*path));
1436
1437     fib_node_init(&path->fp_node,
1438                   FIB_NODE_TYPE_PATH);
1439     dpo_reset(&path->fp_dpo);
1440
1441     path->fp_pl_index = pl_index;
1442     path->fp_weight = 1;
1443     path->fp_preference = 0;
1444     path->fp_nh_proto = nh_proto;
1445     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1446     path->fp_cfg_flags = flags;
1447
1448     if (FIB_PATH_CFG_FLAG_DROP & flags)
1449     {
1450         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1451     }
1452     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1453     {
1454         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1455         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1456     }
1457     else
1458     {
1459         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1460         ASSERT(NULL != dpo);
1461         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1462     }
1463
1464     return (fib_path_get_index(path));
1465 }
1466
1467 /*
1468  * fib_path_copy
1469  *
1470  * Copy a path. return index of new path.
1471  */
1472 fib_node_index_t
1473 fib_path_copy (fib_node_index_t path_index,
1474                fib_node_index_t path_list_index)
1475 {
1476     fib_path_t *path, *orig_path;
1477
1478     pool_get(fib_path_pool, path);
1479
1480     orig_path = fib_path_get(path_index);
1481     ASSERT(NULL != orig_path);
1482
1483     clib_memcpy(path, orig_path, sizeof(*path));
1484
1485     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1486
1487     /*
1488      * reset the dynamic section
1489      */
1490     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1491     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1492     path->fp_pl_index  = path_list_index;
1493     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1494     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1495     dpo_reset(&path->fp_dpo);
1496
1497     return (fib_path_get_index(path));
1498 }
1499
1500 /*
1501  * fib_path_destroy
1502  *
1503  * destroy a path that is no longer required
1504  */
1505 void
1506 fib_path_destroy (fib_node_index_t path_index)
1507 {
1508     fib_path_t *path;
1509
1510     path = fib_path_get(path_index);
1511
1512     ASSERT(NULL != path);
1513     FIB_PATH_DBG(path, "destroy");
1514
1515     fib_path_unresolve(path);
1516
1517     fib_node_deinit(&path->fp_node);
1518     pool_put(fib_path_pool, path);
1519 }
1520
1521 /*
1522  * fib_path_destroy
1523  *
1524  * destroy a path that is no longer required
1525  */
1526 uword
1527 fib_path_hash (fib_node_index_t path_index)
1528 {
1529     fib_path_t *path;
1530
1531     path = fib_path_get(path_index);
1532
1533     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1534                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1535                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1536                         0));
1537 }
1538
1539 /*
1540  * fib_path_cmp_i
1541  *
1542  * Compare two paths for equivalence.
1543  */
1544 static int
1545 fib_path_cmp_i (const fib_path_t *path1,
1546                 const fib_path_t *path2)
1547 {
1548     int res;
1549
1550     res = 1;
1551
1552     /*
1553      * paths of different types and protocol are not equal.
1554      * different weights and/or preference only are the same path.
1555      */
1556     if (path1->fp_type != path2->fp_type)
1557     {
1558         res = (path1->fp_type - path2->fp_type);
1559     }
1560     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1561     {
1562         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1563     }
1564     else
1565     {
1566         /*
1567          * both paths are of the same type.
1568          * consider each type and its attributes in turn.
1569          */
1570         switch (path1->fp_type)
1571         {
1572         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1573             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1574                                    &path2->attached_next_hop.fp_nh);
1575             if (0 == res) {
1576                 res = (path1->attached_next_hop.fp_interface -
1577                        path2->attached_next_hop.fp_interface);
1578             }
1579             break;
1580         case FIB_PATH_TYPE_ATTACHED:
1581             res = (path1->attached.fp_interface -
1582                    path2->attached.fp_interface);
1583             break;
1584         case FIB_PATH_TYPE_RECURSIVE:
1585             res = ip46_address_cmp(&path1->recursive.fp_nh.fp_ip,
1586                                    &path2->recursive.fp_nh.fp_ip);
1587  
1588             if (0 == res)
1589             {
1590                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1591             }
1592             break;
1593         case FIB_PATH_TYPE_BIER_FMASK:
1594             res = (path1->bier_fmask.fp_bier_fmask -
1595                    path2->bier_fmask.fp_bier_fmask);
1596             break;
1597         case FIB_PATH_TYPE_BIER_IMP:
1598             res = (path1->bier_imp.fp_bier_imp -
1599                    path2->bier_imp.fp_bier_imp);
1600             break;
1601         case FIB_PATH_TYPE_BIER_TABLE:
1602             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1603                                     &path2->bier_table.fp_bier_tbl);
1604             break;
1605         case FIB_PATH_TYPE_DEAG:
1606             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1607             if (0 == res)
1608             {
1609                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1610             }
1611             break;
1612         case FIB_PATH_TYPE_INTF_RX:
1613             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1614             break;
1615         case FIB_PATH_TYPE_UDP_ENCAP:
1616             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1617             break;
1618         case FIB_PATH_TYPE_DVR:
1619             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1620             break;
1621         case FIB_PATH_TYPE_EXCLUSIVE:
1622             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1623             break;
1624         case FIB_PATH_TYPE_SPECIAL:
1625         case FIB_PATH_TYPE_RECEIVE:
1626             res = 0;
1627             break;
1628         }
1629     }
1630     return (res);
1631 }
1632
1633 /*
1634  * fib_path_cmp_for_sort
1635  *
1636  * Compare two paths for equivalence. Used during path sorting.
1637  * As usual 0 means equal.
1638  */
1639 int
1640 fib_path_cmp_for_sort (void * v1,
1641                        void * v2)
1642 {
1643     fib_node_index_t *pi1 = v1, *pi2 = v2;
1644     fib_path_t *path1, *path2;
1645
1646     path1 = fib_path_get(*pi1);
1647     path2 = fib_path_get(*pi2);
1648
1649     /*
1650      * when sorting paths we want the highest preference paths
1651      * first, so that the choices set built is in prefernce order
1652      */
1653     if (path1->fp_preference != path2->fp_preference)
1654     {
1655         return (path1->fp_preference - path2->fp_preference);
1656     }
1657
1658     return (fib_path_cmp_i(path1, path2));
1659 }
1660
1661 /*
1662  * fib_path_cmp
1663  *
1664  * Compare two paths for equivalence.
1665  */
1666 int
1667 fib_path_cmp (fib_node_index_t pi1,
1668               fib_node_index_t pi2)
1669 {
1670     fib_path_t *path1, *path2;
1671
1672     path1 = fib_path_get(pi1);
1673     path2 = fib_path_get(pi2);
1674
1675     return (fib_path_cmp_i(path1, path2));
1676 }
1677
1678 int
1679 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1680                            const fib_route_path_t *rpath)
1681 {
1682     fib_path_t *path;
1683     int res;
1684
1685     path = fib_path_get(path_index);
1686
1687     res = 1;
1688
1689     if (path->fp_weight != rpath->frp_weight)
1690     {
1691         res = (path->fp_weight - rpath->frp_weight);
1692     }
1693     else
1694     {
1695         /*
1696          * both paths are of the same type.
1697          * consider each type and its attributes in turn.
1698          */
1699         switch (path->fp_type)
1700         {
1701         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1702             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1703                                    &rpath->frp_addr);
1704             if (0 == res)
1705             {
1706                 res = (path->attached_next_hop.fp_interface -
1707                        rpath->frp_sw_if_index);
1708             }
1709             break;
1710         case FIB_PATH_TYPE_ATTACHED:
1711             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1712             break;
1713         case FIB_PATH_TYPE_RECURSIVE:
1714             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1715             {
1716                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1717
1718                 if (res == 0)
1719                 {
1720                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1721                 }
1722             }
1723             else
1724             {
1725                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1726                                        &rpath->frp_addr);
1727             }
1728
1729             if (0 == res)
1730             {
1731                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1732             }
1733             break;
1734         case FIB_PATH_TYPE_BIER_FMASK:
1735             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1736             break;
1737         case FIB_PATH_TYPE_BIER_IMP:
1738             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1739             break;
1740         case FIB_PATH_TYPE_BIER_TABLE:
1741             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1742                                     &rpath->frp_bier_tbl);
1743             break;
1744         case FIB_PATH_TYPE_INTF_RX:
1745             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1746             break;
1747         case FIB_PATH_TYPE_UDP_ENCAP:
1748             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1749             break;
1750         case FIB_PATH_TYPE_DEAG:
1751             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1752             if (0 == res)
1753             {
1754                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1755             }
1756             break;
1757         case FIB_PATH_TYPE_DVR:
1758             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1759             break;
1760         case FIB_PATH_TYPE_EXCLUSIVE:
1761             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1762             break;
1763         case FIB_PATH_TYPE_RECEIVE:
1764             if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1765             {
1766                 res = 0;
1767             }
1768             else
1769             {
1770                 res = 1;
1771             }
1772             break;
1773         case FIB_PATH_TYPE_SPECIAL:
1774             res = 0;
1775             break;
1776         }
1777     }
1778     return (res);
1779 }
1780
1781 /*
1782  * fib_path_recursive_loop_detect
1783  *
1784  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1785  * walk is initiated when an entry is linking to a new path list or from an old.
1786  * The entry vector passed contains all the FIB entrys that are children of this
1787  * path (it is all the entries encountered on the walk so far). If this vector
1788  * contains the entry this path resolve via, then a loop is about to form.
1789  * The loop must be allowed to form, since we need the dependencies in place
1790  * so that we can track when the loop breaks.
1791  * However, we MUST not produce a loop in the forwarding graph (else packets
1792  * would loop around the switch path until the loop breaks), so we mark recursive
1793  * paths as looped so that they do not contribute forwarding information.
1794  * By marking the path as looped, an etry such as;
1795  *    X/Y
1796  *     via a.a.a.a (looped)
1797  *     via b.b.b.b (not looped)
1798  * can still forward using the info provided by b.b.b.b only
1799  */
1800 int
1801 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1802                                 fib_node_index_t **entry_indicies)
1803 {
1804     fib_path_t *path;
1805
1806     path = fib_path_get(path_index);
1807
1808     /*
1809      * the forced drop path is never looped, cos it is never resolved.
1810      */
1811     if (fib_path_is_permanent_drop(path))
1812     {
1813         return (0);
1814     }
1815
1816     switch (path->fp_type)
1817     {
1818     case FIB_PATH_TYPE_RECURSIVE:
1819     {
1820         fib_node_index_t *entry_index, *entries;
1821         int looped = 0;
1822         entries = *entry_indicies;
1823
1824         vec_foreach(entry_index, entries) {
1825             if (*entry_index == path->fp_via_fib)
1826             {
1827                 /*
1828                  * the entry that is about to link to this path-list (or
1829                  * one of this path-list's children) is the same entry that
1830                  * this recursive path resolves through. this is a cycle.
1831                  * abort the walk.
1832                  */
1833                 looped = 1;
1834                 break;
1835             }
1836         }
1837
1838         if (looped)
1839         {
1840             FIB_PATH_DBG(path, "recursive loop formed");
1841             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1842
1843             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1844         }
1845         else
1846         {
1847             /*
1848              * no loop here yet. keep forward walking the graph.
1849              */
1850             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1851             {
1852                 FIB_PATH_DBG(path, "recursive loop formed");
1853                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1854             }
1855             else
1856             {
1857                 FIB_PATH_DBG(path, "recursive loop cleared");
1858                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1859             }
1860         }
1861         break;
1862     }
1863     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1864     case FIB_PATH_TYPE_ATTACHED:
1865         if (dpo_is_adj(&path->fp_dpo) &&
1866             adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1867                                       entry_indicies))
1868         {
1869             FIB_PATH_DBG(path, "recursive loop formed");
1870             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1871         }
1872         else
1873         {
1874             FIB_PATH_DBG(path, "recursive loop cleared");
1875             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1876         }
1877         break;
1878     case FIB_PATH_TYPE_SPECIAL:
1879     case FIB_PATH_TYPE_DEAG:
1880     case FIB_PATH_TYPE_DVR:
1881     case FIB_PATH_TYPE_RECEIVE:
1882     case FIB_PATH_TYPE_INTF_RX:
1883     case FIB_PATH_TYPE_UDP_ENCAP:
1884     case FIB_PATH_TYPE_EXCLUSIVE:
1885     case FIB_PATH_TYPE_BIER_FMASK:
1886     case FIB_PATH_TYPE_BIER_TABLE:
1887     case FIB_PATH_TYPE_BIER_IMP:
1888         /*
1889          * these path types cannot be part of a loop, since they are the leaves
1890          * of the graph.
1891          */
1892         break;
1893     }
1894
1895     return (fib_path_is_looped(path_index));
1896 }
1897
1898 int
1899 fib_path_resolve (fib_node_index_t path_index)
1900 {
1901     fib_path_t *path;
1902
1903     path = fib_path_get(path_index);
1904
1905     /*
1906      * hope for the best.
1907      */
1908     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1909
1910     /*
1911      * the forced drop path resolves via the drop adj
1912      */
1913     if (fib_path_is_permanent_drop(path))
1914     {
1915         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1916         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1917         return (fib_path_is_resolved(path_index));
1918     }
1919
1920     switch (path->fp_type)
1921     {
1922     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1923         fib_path_attached_next_hop_set(path);
1924         break;
1925     case FIB_PATH_TYPE_ATTACHED:
1926     {
1927         dpo_id_t tmp = DPO_INVALID;
1928
1929         /*
1930          * path->attached.fp_interface
1931          */
1932         if (!vnet_sw_interface_is_up(vnet_get_main(),
1933                                      path->attached.fp_interface))
1934         {
1935             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1936         }
1937         fib_path_attached_get_adj(path,
1938                                   dpo_proto_to_link(path->fp_nh_proto),
1939                                   &tmp);
1940
1941         /*
1942          * re-fetch after possible mem realloc
1943          */
1944         path = fib_path_get(path_index);
1945         dpo_copy(&path->fp_dpo, &tmp);
1946
1947         /*
1948          * become a child of the adjacency so we receive updates
1949          * when the interface state changes
1950          */
1951         if (dpo_is_adj(&path->fp_dpo))
1952         {
1953             path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1954                                              FIB_NODE_TYPE_PATH,
1955                                              fib_path_get_index(path));
1956         }
1957         dpo_reset(&tmp);
1958         break;
1959     }
1960     case FIB_PATH_TYPE_RECURSIVE:
1961     {
1962         /*
1963          * Create a RR source entry in the table for the address
1964          * that this path recurses through.
1965          * This resolve action is recursive, hence we may create
1966          * more paths in the process. more creates mean maybe realloc
1967          * of this path.
1968          */
1969         fib_node_index_t fei;
1970         fib_prefix_t pfx;
1971
1972         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1973
1974         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1975         {
1976             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1977                                        path->recursive.fp_nh.fp_eos,
1978                                        &pfx);
1979         }
1980         else
1981         {
1982             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1983         }
1984
1985         fib_table_lock(path->recursive.fp_tbl_id,
1986                        dpo_proto_to_fib(path->fp_nh_proto),
1987                        FIB_SOURCE_RR);
1988         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1989                                           &pfx,
1990                                           FIB_SOURCE_RR,
1991                                           FIB_ENTRY_FLAG_NONE);
1992
1993         path = fib_path_get(path_index);
1994         path->fp_via_fib = fei;
1995
1996         /*
1997          * become a dependent child of the entry so the path is 
1998          * informed when the forwarding for the entry changes.
1999          */
2000         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
2001                                                FIB_NODE_TYPE_PATH,
2002                                                fib_path_get_index(path));
2003
2004         /*
2005          * create and configure the IP DPO
2006          */
2007         fib_path_recursive_adj_update(
2008             path,
2009             fib_path_to_chain_type(path),
2010             &path->fp_dpo);
2011
2012         break;
2013     }
2014     case FIB_PATH_TYPE_BIER_FMASK:
2015     {
2016         /*
2017          * become a dependent child of the entry so the path is
2018          * informed when the forwarding for the entry changes.
2019          */
2020         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
2021                                                 FIB_NODE_TYPE_PATH,
2022                                                 fib_path_get_index(path));
2023
2024         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
2025         fib_path_bier_fmask_update(path, &path->fp_dpo);
2026
2027         break;
2028     }
2029     case FIB_PATH_TYPE_BIER_IMP:
2030         bier_imp_lock(path->bier_imp.fp_bier_imp);
2031         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2032                                        DPO_PROTO_IP4,
2033                                        &path->fp_dpo);
2034         break;
2035     case FIB_PATH_TYPE_BIER_TABLE:
2036     {
2037         /*
2038          * Find/create the BIER table to link to
2039          */
2040         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
2041
2042         path->fp_via_bier_tbl =
2043             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
2044
2045         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
2046                                          &path->fp_dpo);
2047         break;
2048     }
2049     case FIB_PATH_TYPE_SPECIAL:
2050         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2051         {
2052             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2053                                       IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
2054                                       &path->fp_dpo);
2055         }
2056         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2057         {
2058             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2059                                       IP_NULL_ACTION_SEND_ICMP_UNREACH,
2060                                       &path->fp_dpo);
2061         }
2062         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY)
2063         {
2064             dpo_set (&path->fp_dpo, DPO_CLASSIFY,
2065                      path->fp_nh_proto,
2066                      classify_dpo_create (path->fp_nh_proto,
2067                                           path->classify.fp_classify_table_id));
2068         }
2069         else
2070         {
2071             /*
2072              * Resolve via the drop
2073              */
2074             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
2075         }
2076         break;
2077     case FIB_PATH_TYPE_DEAG:
2078     {
2079         if (DPO_PROTO_BIER == path->fp_nh_proto)
2080         {
2081             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2082                                                   &path->fp_dpo);
2083         }
2084         else
2085         {
2086             /*
2087              * Resolve via a lookup DPO.
2088              * FIXME. control plane should add routes with a table ID
2089              */
2090             lookup_input_t input;
2091             lookup_cast_t cast;
2092
2093             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2094                     LOOKUP_MULTICAST :
2095                     LOOKUP_UNICAST);
2096             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2097                      LOOKUP_INPUT_SRC_ADDR :
2098                      LOOKUP_INPUT_DST_ADDR);
2099
2100             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2101                                                path->fp_nh_proto,
2102                                                cast,
2103                                                input,
2104                                                LOOKUP_TABLE_FROM_CONFIG,
2105                                                &path->fp_dpo);
2106         }
2107         break;
2108     }
2109     case FIB_PATH_TYPE_DVR:
2110         dvr_dpo_add_or_lock(path->dvr.fp_interface,
2111                             path->fp_nh_proto,
2112                             &path->fp_dpo);
2113         break;
2114     case FIB_PATH_TYPE_RECEIVE:
2115         /*
2116          * Resolve via a receive DPO.
2117          */
2118         receive_dpo_add_or_lock(path->fp_nh_proto,
2119                                 path->receive.fp_interface,
2120                                 &path->receive.fp_addr,
2121                                 &path->fp_dpo);
2122         break;
2123     case FIB_PATH_TYPE_UDP_ENCAP:
2124         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2125         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2126                                         path->fp_nh_proto,
2127                                         &path->fp_dpo);
2128         break;
2129     case FIB_PATH_TYPE_INTF_RX: {
2130         /*
2131          * Resolve via a receive DPO.
2132          */
2133         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2134                                      path->intf_rx.fp_interface,
2135                                      &path->fp_dpo);
2136         break;
2137     }
2138     case FIB_PATH_TYPE_EXCLUSIVE:
2139         /*
2140          * Resolve via the user provided DPO
2141          */
2142         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2143         break;
2144     }
2145
2146     return (fib_path_is_resolved(path_index));
2147 }
2148
2149 u32
2150 fib_path_get_resolving_interface (fib_node_index_t path_index)
2151 {
2152     fib_path_t *path;
2153
2154     path = fib_path_get(path_index);
2155
2156     switch (path->fp_type)
2157     {
2158     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2159         return (path->attached_next_hop.fp_interface);
2160     case FIB_PATH_TYPE_ATTACHED:
2161         return (path->attached.fp_interface);
2162     case FIB_PATH_TYPE_RECEIVE:
2163         return (path->receive.fp_interface);
2164     case FIB_PATH_TYPE_RECURSIVE:
2165         if (fib_path_is_resolved(path_index))
2166         {
2167             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2168         }
2169         break;
2170     case FIB_PATH_TYPE_DVR:
2171         return (path->dvr.fp_interface);
2172     case FIB_PATH_TYPE_INTF_RX:
2173     case FIB_PATH_TYPE_UDP_ENCAP:
2174     case FIB_PATH_TYPE_SPECIAL:
2175     case FIB_PATH_TYPE_DEAG:
2176     case FIB_PATH_TYPE_EXCLUSIVE:
2177     case FIB_PATH_TYPE_BIER_FMASK:
2178     case FIB_PATH_TYPE_BIER_TABLE:
2179     case FIB_PATH_TYPE_BIER_IMP:
2180         break;
2181     }
2182     return (dpo_get_urpf(&path->fp_dpo));
2183 }
2184
2185 index_t
2186 fib_path_get_resolving_index (fib_node_index_t path_index)
2187 {
2188     fib_path_t *path;
2189
2190     path = fib_path_get(path_index);
2191
2192     switch (path->fp_type)
2193     {
2194     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2195     case FIB_PATH_TYPE_ATTACHED:
2196     case FIB_PATH_TYPE_RECEIVE:
2197     case FIB_PATH_TYPE_INTF_RX:
2198     case FIB_PATH_TYPE_SPECIAL:
2199     case FIB_PATH_TYPE_DEAG:
2200     case FIB_PATH_TYPE_DVR:
2201     case FIB_PATH_TYPE_EXCLUSIVE:
2202         break;
2203     case FIB_PATH_TYPE_UDP_ENCAP:
2204         return (path->udp_encap.fp_udp_encap_id);
2205     case FIB_PATH_TYPE_RECURSIVE:
2206         return (path->fp_via_fib);
2207     case FIB_PATH_TYPE_BIER_FMASK:
2208         return (path->bier_fmask.fp_bier_fmask);
2209    case FIB_PATH_TYPE_BIER_TABLE:
2210        return (path->fp_via_bier_tbl);
2211    case FIB_PATH_TYPE_BIER_IMP:
2212        return (path->bier_imp.fp_bier_imp);
2213     }
2214     return (~0);
2215 }
2216
2217 adj_index_t
2218 fib_path_get_adj (fib_node_index_t path_index)
2219 {
2220     fib_path_t *path;
2221
2222     path = fib_path_get(path_index);
2223
2224     if (dpo_is_adj(&path->fp_dpo))
2225     {
2226         return (path->fp_dpo.dpoi_index);
2227     }
2228     return (ADJ_INDEX_INVALID);
2229 }
2230
2231 u16
2232 fib_path_get_weight (fib_node_index_t path_index)
2233 {
2234     fib_path_t *path;
2235
2236     path = fib_path_get(path_index);
2237
2238     ASSERT(path);
2239
2240     return (path->fp_weight);
2241 }
2242
2243 u16
2244 fib_path_get_preference (fib_node_index_t path_index)
2245 {
2246     fib_path_t *path;
2247
2248     path = fib_path_get(path_index);
2249
2250     ASSERT(path);
2251
2252     return (path->fp_preference);
2253 }
2254
2255 u32
2256 fib_path_get_rpf_id (fib_node_index_t path_index)
2257 {
2258     fib_path_t *path;
2259
2260     path = fib_path_get(path_index);
2261
2262     ASSERT(path);
2263
2264     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2265     {
2266         return (path->deag.fp_rpf_id);
2267     }
2268
2269     return (~0);
2270 }
2271
2272 /**
2273  * @brief Contribute the path's adjacency to the list passed.
2274  * By calling this function over all paths, recursively, a child
2275  * can construct its full set of forwarding adjacencies, and hence its
2276  * uRPF list.
2277  */
2278 void
2279 fib_path_contribute_urpf (fib_node_index_t path_index,
2280                           index_t urpf)
2281 {
2282     fib_path_t *path;
2283
2284     path = fib_path_get(path_index);
2285
2286     /*
2287      * resolved and unresolved paths contribute to the RPF list.
2288      */
2289     switch (path->fp_type)
2290     {
2291     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2292         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2293         break;
2294
2295     case FIB_PATH_TYPE_ATTACHED:
2296         fib_urpf_list_append(urpf, path->attached.fp_interface);
2297         break;
2298
2299     case FIB_PATH_TYPE_RECURSIVE:
2300         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2301             !fib_path_is_looped(path_index))
2302         {
2303             /*
2304              * there's unresolved due to constraints, and there's unresolved
2305              * due to ain't got no via. can't do nowt w'out via.
2306              */
2307             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2308         }
2309         break;
2310
2311     case FIB_PATH_TYPE_EXCLUSIVE:
2312     case FIB_PATH_TYPE_SPECIAL:
2313     {
2314         /*
2315          * these path types may link to an adj, if that's what
2316          * the clinet gave
2317          */
2318         u32 rpf_sw_if_index;
2319
2320         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2321
2322         if (~0 != rpf_sw_if_index)
2323         {
2324             fib_urpf_list_append(urpf, rpf_sw_if_index);
2325         }
2326         break;
2327     }
2328     case FIB_PATH_TYPE_DVR:
2329         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2330         break;
2331     case FIB_PATH_TYPE_UDP_ENCAP:
2332         fib_urpf_list_append(urpf, path->udp_encap.fp_udp_encap_id);
2333         break;
2334     case FIB_PATH_TYPE_DEAG:
2335     case FIB_PATH_TYPE_RECEIVE:
2336     case FIB_PATH_TYPE_INTF_RX:
2337     case FIB_PATH_TYPE_BIER_FMASK:
2338     case FIB_PATH_TYPE_BIER_TABLE:
2339     case FIB_PATH_TYPE_BIER_IMP:
2340         /*
2341          * these path types don't link to an adj
2342          */
2343         break;
2344     }
2345 }
2346
2347 void
2348 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2349                           dpo_proto_t payload_proto,
2350                           fib_mpls_lsp_mode_t mode,
2351                           dpo_id_t *dpo)
2352 {
2353     fib_path_t *path;
2354
2355     path = fib_path_get(path_index);
2356
2357     ASSERT(path);
2358
2359     switch (path->fp_type)
2360     {
2361     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2362     {
2363         dpo_id_t tmp = DPO_INVALID;
2364
2365         dpo_copy(&tmp, dpo);
2366
2367         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2368         dpo_reset(&tmp);
2369         break;
2370     }                
2371     case FIB_PATH_TYPE_DEAG:
2372     {
2373         dpo_id_t tmp = DPO_INVALID;
2374
2375         dpo_copy(&tmp, dpo);
2376
2377         mpls_disp_dpo_create(payload_proto,
2378                              path->deag.fp_rpf_id,
2379                              mode, &tmp, dpo);
2380         dpo_reset(&tmp);
2381         break;
2382     }
2383     case FIB_PATH_TYPE_RECEIVE:
2384     case FIB_PATH_TYPE_ATTACHED:
2385     case FIB_PATH_TYPE_RECURSIVE:
2386     case FIB_PATH_TYPE_INTF_RX:
2387     case FIB_PATH_TYPE_UDP_ENCAP:
2388     case FIB_PATH_TYPE_EXCLUSIVE:
2389     case FIB_PATH_TYPE_SPECIAL:
2390     case FIB_PATH_TYPE_BIER_FMASK:
2391     case FIB_PATH_TYPE_BIER_TABLE:
2392     case FIB_PATH_TYPE_BIER_IMP:
2393     case FIB_PATH_TYPE_DVR:
2394         break;
2395     }
2396
2397     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_POP_PW_CW)
2398     {
2399         dpo_id_t tmp = DPO_INVALID;
2400
2401         dpo_copy(&tmp, dpo);
2402
2403         pw_cw_dpo_create(&tmp, dpo);
2404         dpo_reset(&tmp);
2405     }
2406 }
2407
2408 void
2409 fib_path_contribute_forwarding (fib_node_index_t path_index,
2410                                 fib_forward_chain_type_t fct,
2411                                 dpo_id_t *dpo)
2412 {
2413     fib_path_t *path;
2414
2415     path = fib_path_get(path_index);
2416
2417     ASSERT(path);
2418     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2419
2420     /*
2421      * The DPO stored in the path was created when the path was resolved.
2422      * This then represents the path's 'native' protocol; IP.
2423      * For all others will need to go find something else.
2424      */
2425     if (fib_path_to_chain_type(path) == fct)
2426     {
2427         dpo_copy(dpo, &path->fp_dpo);
2428     }
2429     else
2430     {
2431         switch (path->fp_type)
2432         {
2433         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2434             switch (fct)
2435             {
2436             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2437             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2438             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2439             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2440             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2441             case FIB_FORW_CHAIN_TYPE_NSH:
2442             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2443             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2444                 path = fib_path_attached_next_hop_get_adj(
2445                     path,
2446                     fib_forw_chain_type_to_link_type(fct),
2447                     dpo);
2448                 break;
2449             case FIB_FORW_CHAIN_TYPE_BIER:
2450                 break;
2451             }
2452             break;
2453         case FIB_PATH_TYPE_RECURSIVE:
2454             switch (fct)
2455             {
2456             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2457             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2458             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2459             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2460             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2461             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2462             case FIB_FORW_CHAIN_TYPE_BIER:
2463                 fib_path_recursive_adj_update(path, fct, dpo);
2464                 break;
2465             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2466             case FIB_FORW_CHAIN_TYPE_NSH:
2467                 ASSERT(0);
2468                 break;
2469             }
2470             break;
2471         case FIB_PATH_TYPE_BIER_TABLE:
2472             switch (fct)
2473             {
2474             case FIB_FORW_CHAIN_TYPE_BIER:
2475                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2476                 break;
2477             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2478             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2479             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2480             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2481             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2482             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2483             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2484             case FIB_FORW_CHAIN_TYPE_NSH:
2485                 ASSERT(0);
2486                 break;
2487             }
2488             break;
2489         case FIB_PATH_TYPE_BIER_FMASK:
2490             switch (fct)
2491             {
2492             case FIB_FORW_CHAIN_TYPE_BIER:
2493                 fib_path_bier_fmask_update(path, dpo);
2494                 break;
2495             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2496             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2497             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2498             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2499             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2500             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2501             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2502             case FIB_FORW_CHAIN_TYPE_NSH:
2503                 ASSERT(0);
2504                 break;
2505             }
2506             break;
2507         case FIB_PATH_TYPE_BIER_IMP:
2508             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2509                                            fib_forw_chain_type_to_dpo_proto(fct),
2510                                            dpo);
2511             break;
2512         case FIB_PATH_TYPE_DEAG:
2513             switch (fct)
2514             {
2515             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2516                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2517                                                   DPO_PROTO_MPLS,
2518                                                   LOOKUP_UNICAST,
2519                                                   LOOKUP_INPUT_DST_ADDR,
2520                                                   LOOKUP_TABLE_FROM_CONFIG,
2521                                                   dpo);
2522                 break;
2523             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2524             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2525             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2526             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2527             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2528                 dpo_copy(dpo, &path->fp_dpo);
2529                 break;
2530             case FIB_FORW_CHAIN_TYPE_BIER:
2531                 break;
2532             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2533             case FIB_FORW_CHAIN_TYPE_NSH:
2534                 ASSERT(0);
2535                 break;
2536             }
2537             break;
2538         case FIB_PATH_TYPE_EXCLUSIVE:
2539             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2540             break;
2541         case FIB_PATH_TYPE_ATTACHED:
2542             switch (fct)
2543             {
2544             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2545             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2546             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2547             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2548             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2549             case FIB_FORW_CHAIN_TYPE_NSH:
2550             case FIB_FORW_CHAIN_TYPE_BIER:
2551                 fib_path_attached_get_adj(path,
2552                                           fib_forw_chain_type_to_link_type(fct),
2553                                           dpo);
2554                 break;
2555             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2556             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2557                 {
2558                     adj_index_t ai;
2559
2560                     /*
2561                      * Create the adj needed for sending IP multicast traffic
2562                      */
2563                     if (vnet_sw_interface_is_p2p(vnet_get_main(),
2564                                                  path->attached.fp_interface))
2565                     {
2566                         /*
2567                          * point-2-point interfaces do not require a glean, since
2568                          * there is nothing to ARP. Install a rewrite/nbr adj instead
2569                          */
2570                         ai = adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2571                                                  fib_forw_chain_type_to_link_type(fct),
2572                                                  &zero_addr,
2573                                                  path->attached.fp_interface);
2574                     }
2575                     else
2576                     {
2577                         ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2578                                                    fib_forw_chain_type_to_link_type(fct),
2579                                                    path->attached.fp_interface);
2580                     }
2581                     dpo_set(dpo, DPO_ADJACENCY,
2582                             fib_forw_chain_type_to_dpo_proto(fct),
2583                             ai);
2584                     adj_unlock(ai);
2585                 }
2586                 break;
2587             }
2588             break;
2589         case FIB_PATH_TYPE_INTF_RX:
2590             /*
2591              * Create the adj needed for sending IP multicast traffic
2592              */
2593             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2594                                          path->attached.fp_interface,
2595                                          dpo);
2596             break;
2597         case FIB_PATH_TYPE_UDP_ENCAP:
2598             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2599                                             path->fp_nh_proto,
2600                                             dpo);
2601             break;
2602         case FIB_PATH_TYPE_RECEIVE:
2603         case FIB_PATH_TYPE_SPECIAL:
2604         case FIB_PATH_TYPE_DVR:
2605             dpo_copy(dpo, &path->fp_dpo);
2606             break;
2607         }
2608     }
2609 }
2610
2611 load_balance_path_t *
2612 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2613                                        fib_forward_chain_type_t fct,
2614                                        load_balance_path_t *hash_key)
2615 {
2616     load_balance_path_t *mnh;
2617     fib_path_t *path;
2618
2619     path = fib_path_get(path_index);
2620
2621     ASSERT(path);
2622
2623     vec_add2(hash_key, mnh, 1);
2624
2625     mnh->path_weight = path->fp_weight;
2626     mnh->path_index = path_index;
2627
2628     if (fib_path_is_resolved(path_index))
2629     {
2630         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2631     }
2632     else
2633     {
2634         dpo_copy(&mnh->path_dpo,
2635                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2636     }
2637     return (hash_key);
2638 }
2639
2640 int
2641 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2642 {
2643     fib_path_t *path;
2644
2645     path = fib_path_get(path_index);
2646
2647     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2648             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2649              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2650 }
2651
2652 int
2653 fib_path_is_exclusive (fib_node_index_t path_index)
2654 {
2655     fib_path_t *path;
2656
2657     path = fib_path_get(path_index);
2658
2659     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2660 }
2661
2662 int
2663 fib_path_is_deag (fib_node_index_t path_index)
2664 {
2665     fib_path_t *path;
2666
2667     path = fib_path_get(path_index);
2668
2669     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2670 }
2671
2672 int
2673 fib_path_is_resolved (fib_node_index_t path_index)
2674 {
2675     fib_path_t *path;
2676
2677     path = fib_path_get(path_index);
2678
2679     return (dpo_id_is_valid(&path->fp_dpo) &&
2680             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2681             !fib_path_is_looped(path_index) &&
2682             !fib_path_is_permanent_drop(path));
2683 }
2684
2685 int
2686 fib_path_is_looped (fib_node_index_t path_index)
2687 {
2688     fib_path_t *path;
2689
2690     path = fib_path_get(path_index);
2691
2692     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2693 }
2694
2695 fib_path_list_walk_rc_t
2696 fib_path_encode (fib_node_index_t path_list_index,
2697                  fib_node_index_t path_index,
2698                  const fib_path_ext_t *path_ext,
2699                  void *args)
2700 {
2701     fib_path_encode_ctx_t *ctx = args;
2702     fib_route_path_t *rpath;
2703     fib_path_t *path;
2704
2705     path = fib_path_get(path_index);
2706     if (!path)
2707       return (FIB_PATH_LIST_WALK_CONTINUE);
2708
2709     vec_add2(ctx->rpaths, rpath, 1);
2710     rpath->frp_weight = path->fp_weight;
2711     rpath->frp_preference = path->fp_preference;
2712     rpath->frp_proto = path->fp_nh_proto;
2713     rpath->frp_sw_if_index = ~0;
2714     rpath->frp_fib_index = 0;
2715
2716     switch (path->fp_type)
2717     {
2718       case FIB_PATH_TYPE_RECEIVE:
2719         rpath->frp_addr = path->receive.fp_addr;
2720         rpath->frp_sw_if_index = path->receive.fp_interface;
2721         rpath->frp_flags |= FIB_ROUTE_PATH_LOCAL;
2722         break;
2723       case FIB_PATH_TYPE_ATTACHED:
2724         rpath->frp_sw_if_index = path->attached.fp_interface;
2725         break;
2726       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2727         rpath->frp_sw_if_index = path->attached_next_hop.fp_interface;
2728         rpath->frp_addr = path->attached_next_hop.fp_nh;
2729         break;
2730       case FIB_PATH_TYPE_BIER_FMASK:
2731         rpath->frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2732         break;
2733       case FIB_PATH_TYPE_SPECIAL:
2734         break;
2735       case FIB_PATH_TYPE_DEAG:
2736         rpath->frp_fib_index = path->deag.fp_tbl_id;
2737         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
2738         {
2739             rpath->frp_flags |= FIB_ROUTE_PATH_RPF_ID;
2740         }
2741         break;
2742       case FIB_PATH_TYPE_RECURSIVE:
2743         rpath->frp_addr = path->recursive.fp_nh.fp_ip;
2744         rpath->frp_fib_index = path->recursive.fp_tbl_id;
2745         break;
2746       case FIB_PATH_TYPE_DVR:
2747           rpath->frp_sw_if_index = path->dvr.fp_interface;
2748           rpath->frp_flags |= FIB_ROUTE_PATH_DVR;
2749           break;
2750       case FIB_PATH_TYPE_UDP_ENCAP:
2751           rpath->frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2752           rpath->frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2753           break;
2754       case FIB_PATH_TYPE_INTF_RX:
2755           rpath->frp_sw_if_index = path->receive.fp_interface;
2756           rpath->frp_flags |= FIB_ROUTE_PATH_INTF_RX;
2757           break;
2758       case FIB_PATH_TYPE_EXCLUSIVE:
2759         rpath->frp_flags |= FIB_ROUTE_PATH_EXCLUSIVE;
2760       default:
2761         break;
2762     }
2763
2764     if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS) 
2765     {
2766         rpath->frp_label_stack = path_ext->fpe_path.frp_label_stack;
2767     }
2768
2769     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP)
2770         rpath->frp_flags |= FIB_ROUTE_PATH_DROP;
2771     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2772         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_UNREACH;
2773     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2774         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_PROHIBIT;
2775
2776     return (FIB_PATH_LIST_WALK_CONTINUE);
2777 }
2778
2779 dpo_proto_t
2780 fib_path_get_proto (fib_node_index_t path_index)
2781 {
2782     fib_path_t *path;
2783
2784     path = fib_path_get(path_index);
2785
2786     return (path->fp_nh_proto);
2787 }
2788
2789 void
2790 fib_path_module_init (void)
2791 {
2792     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2793     fib_path_logger = vlib_log_register_class ("fib", "path");
2794 }
2795
2796 static clib_error_t *
2797 show_fib_path_command (vlib_main_t * vm,
2798                         unformat_input_t * input,
2799                         vlib_cli_command_t * cmd)
2800 {
2801     fib_node_index_t pi;
2802     fib_path_t *path;
2803
2804     if (unformat (input, "%d", &pi))
2805     {
2806         /*
2807          * show one in detail
2808          */
2809         if (!pool_is_free_index(fib_path_pool, pi))
2810         {
2811             path = fib_path_get(pi);
2812             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2813                            FIB_PATH_FORMAT_FLAGS_NONE);
2814             s = format(s, "\n  children:");
2815             s = fib_node_children_format(path->fp_node.fn_children, s);
2816             vlib_cli_output (vm, "%v", s);
2817             vec_free(s);
2818         }
2819         else
2820         {
2821             vlib_cli_output (vm, "path %d invalid", pi);
2822         }
2823     }
2824     else
2825     {
2826         vlib_cli_output (vm, "FIB Paths");
2827         pool_foreach_index (pi, fib_path_pool)
2828          {
2829             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2830                              FIB_PATH_FORMAT_FLAGS_NONE);
2831         }
2832     }
2833
2834     return (NULL);
2835 }
2836
2837 VLIB_CLI_COMMAND (show_fib_path, static) = {
2838   .path = "show fib paths",
2839   .function = show_fib_path_command,
2840   .short_help = "show fib paths",
2841 };