1cc65b67cb81a55536c41c10efd2949324f7d156
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/ip_null_dpo.h>
28 #include <vnet/dpo/classify_dpo.h>
29 #include <vnet/dpo/pw_cw.h>
30
31 #include <vnet/adj/adj.h>
32 #include <vnet/adj/adj_mcast.h>
33
34 #include <vnet/fib/fib_path.h>
35 #include <vnet/fib/fib_node.h>
36 #include <vnet/fib/fib_table.h>
37 #include <vnet/fib/fib_entry.h>
38 #include <vnet/fib/fib_path_list.h>
39 #include <vnet/fib/fib_internal.h>
40 #include <vnet/fib/fib_urpf_list.h>
41 #include <vnet/fib/mpls_fib.h>
42 #include <vnet/fib/fib_path_ext.h>
43 #include <vnet/udp/udp_encap.h>
44 #include <vnet/bier/bier_fmask.h>
45 #include <vnet/bier/bier_table.h>
46 #include <vnet/bier/bier_imp.h>
47 #include <vnet/bier/bier_disp_table.h>
48
49 /**
50  * Enurmeration of path types
51  */
52 typedef enum fib_path_type_t_ {
53     /**
54      * Marker. Add new types after this one.
55      */
56     FIB_PATH_TYPE_FIRST = 0,
57     /**
58      * Attached-nexthop. An interface and a nexthop are known.
59      */
60     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
61     /**
62      * attached. Only the interface is known.
63      */
64     FIB_PATH_TYPE_ATTACHED,
65     /**
66      * recursive. Only the next-hop is known.
67      */
68     FIB_PATH_TYPE_RECURSIVE,
69     /**
70      * special. nothing is known. so we drop.
71      */
72     FIB_PATH_TYPE_SPECIAL,
73     /**
74      * exclusive. user provided adj.
75      */
76     FIB_PATH_TYPE_EXCLUSIVE,
77     /**
78      * deag. Link to a lookup adj in the next table
79      */
80     FIB_PATH_TYPE_DEAG,
81     /**
82      * interface receive.
83      */
84     FIB_PATH_TYPE_INTF_RX,
85     /**
86      * Path resolves via a UDP encap object.
87      */
88     FIB_PATH_TYPE_UDP_ENCAP,
89     /**
90      * receive. it's for-us.
91      */
92     FIB_PATH_TYPE_RECEIVE,
93     /**
94      * bier-imp. it's via a BIER imposition.
95      */
96     FIB_PATH_TYPE_BIER_IMP,
97     /**
98      * bier-fmask. it's via a BIER ECMP-table.
99      */
100     FIB_PATH_TYPE_BIER_TABLE,
101     /**
102      * bier-fmask. it's via a BIER f-mask.
103      */
104     FIB_PATH_TYPE_BIER_FMASK,
105     /**
106      * via a DVR.
107      */
108     FIB_PATH_TYPE_DVR,
109 } __attribute__ ((packed)) fib_path_type_t;
110
111 #define FIB_PATH_TYPES {                                        \
112     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
113     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
114     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
115     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
116     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
117     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
118     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
119     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
120     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
121     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
122     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
123     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
124     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
125 }
126
127 /**
128  * Enurmeration of path operational (i.e. derived) attributes
129  */
130 typedef enum fib_path_oper_attribute_t_ {
131     /**
132      * Marker. Add new types after this one.
133      */
134     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
135     /**
136      * The path forms part of a recursive loop.
137      */
138     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
139     /**
140      * The path is resolved
141      */
142     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
143     /**
144      * The path has become a permanent drop.
145      */
146     FIB_PATH_OPER_ATTRIBUTE_DROP,
147     /**
148      * Marker. Add new types before this one, then update it.
149      */
150     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
151 } __attribute__ ((packed)) fib_path_oper_attribute_t;
152
153 /**
154  * The maximum number of path operational attributes
155  */
156 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
157
158 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
159     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
160     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
161     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
162 }
163
164 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
165     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
166          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
167          _item++)
168
169 /**
170  * Path flags from the attributes
171  */
172 typedef enum fib_path_oper_flags_t_ {
173     FIB_PATH_OPER_FLAG_NONE = 0,
174     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
175     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
176     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
177 } __attribute__ ((packed)) fib_path_oper_flags_t;
178
179 /**
180  * A FIB path
181  */
182 typedef struct fib_path_t_ {
183     /**
184      * A path is a node in the FIB graph.
185      */
186     fib_node_t fp_node;
187
188     /**
189      * The index of the path-list to which this path belongs
190      */
191     u32 fp_pl_index;
192
193     /**
194      * This marks the start of the memory area used to hash
195      * the path
196      */
197     STRUCT_MARK(path_hash_start);
198
199     /**
200      * Configuration Flags
201      */
202     fib_path_cfg_flags_t fp_cfg_flags;
203
204     /**
205      * The type of the path. This is the selector for the union
206      */
207     fib_path_type_t fp_type;
208
209     /**
210      * The protocol of the next-hop, i.e. the address family of the
211      * next-hop's address. We can't derive this from the address itself
212      * since the address can be all zeros
213      */
214     dpo_proto_t fp_nh_proto;
215
216     /**
217      * UCMP [unnormalised] weigth
218      */
219     u8 fp_weight;
220
221     /**
222      * A path preference. 0 is the best.
223      * Only paths of the best preference, that are 'up', are considered
224      * for forwarding.
225      */
226     u8 fp_preference;
227
228     /**
229      * per-type union of the data required to resolve the path
230      */
231     union {
232         struct {
233             /**
234              * The next-hop
235              */
236             ip46_address_t fp_nh;
237             /**
238              * The interface
239              */
240             u32 fp_interface;
241         } attached_next_hop;
242         struct {
243             /**
244              * The Connected local address
245              */
246             fib_prefix_t fp_connected;
247             /**
248              * The interface
249              */
250             u32 fp_interface;
251         } attached;
252         struct {
253             union
254             {
255                 /**
256                  * The next-hop
257                  */
258                 ip46_address_t fp_ip;
259                 struct {
260                     /**
261                      * The local label to resolve through.
262                      */
263                     mpls_label_t fp_local_label;
264                     /**
265                      * The EOS bit of the resolving label
266                      */
267                     mpls_eos_bit_t fp_eos;
268                 };
269             } fp_nh;
270             /**
271              * The FIB table index in which to find the next-hop.
272              */
273             fib_node_index_t fp_tbl_id;
274         } recursive;
275         struct {
276             /**
277              * BIER FMask ID
278              */
279             index_t fp_bier_fmask;
280         } bier_fmask;
281         struct {
282             /**
283              * The BIER table's ID
284              */
285             bier_table_id_t fp_bier_tbl;
286         } bier_table;
287         struct {
288             /**
289              * The BIER imposition object
290              * this is part of the path's key, since the index_t
291              * of an imposition object is the object's key.
292              */
293             index_t fp_bier_imp;
294         } bier_imp;
295         struct {
296             /**
297              * The FIB index in which to perfom the next lookup
298              */
299             fib_node_index_t fp_tbl_id;
300             /**
301              * The RPF-ID to tag the packets with
302              */
303             fib_rpf_id_t fp_rpf_id;
304         } deag;
305         struct {
306         } special;
307         struct {
308             /**
309              * The user provided 'exclusive' DPO
310              */
311             dpo_id_t fp_ex_dpo;
312         } exclusive;
313         struct {
314             /**
315              * The interface on which the local address is configured
316              */
317             u32 fp_interface;
318             /**
319              * The next-hop
320              */
321             ip46_address_t fp_addr;
322         } receive;
323         struct {
324             /**
325              * The interface on which the packets will be input.
326              */
327             u32 fp_interface;
328         } intf_rx;
329         struct {
330             /**
331              * The UDP Encap object this path resolves through
332              */
333             u32 fp_udp_encap_id;
334         } udp_encap;
335         struct {
336             /**
337              * The UDP Encap object this path resolves through
338              */
339             u32 fp_classify_table_id;
340         } classify;
341         struct {
342             /**
343              * The interface
344              */
345             u32 fp_interface;
346         } dvr;
347     };
348     STRUCT_MARK(path_hash_end);
349
350     /**
351      * Members in this last section represent information that is
352      * dervied during resolution. It should not be copied to new paths
353      * nor compared.
354      */
355
356     /**
357      * Operational Flags
358      */
359     fib_path_oper_flags_t fp_oper_flags;
360
361     union {
362         /**
363          * the resolving via fib. not part of the union, since it it not part
364          * of the path's hash.
365          */
366         fib_node_index_t fp_via_fib;
367         /**
368          * the resolving bier-table
369          */
370         index_t fp_via_bier_tbl;
371         /**
372          * the resolving bier-fmask
373          */
374         index_t fp_via_bier_fmask;
375     };
376
377     /**
378      * The Data-path objects through which this path resolves for IP.
379      */
380     dpo_id_t fp_dpo;
381
382     /**
383      * the index of this path in the parent's child list.
384      */
385     u32 fp_sibling;
386 } fib_path_t;
387
388 /*
389  * Array of strings/names for the path types and attributes
390  */
391 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
392 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
393 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
394
395 /*
396  * The memory pool from which we allocate all the paths
397  */
398 static fib_path_t *fib_path_pool;
399
400 /**
401  * the logger
402  */
403 vlib_log_class_t fib_path_logger;
404
405 /*
406  * Debug macro
407  */
408 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
409 {                                                                       \
410     vlib_log_debug (fib_path_logger,                                    \
411                     "[%U]: " _fmt,                                      \
412                     format_fib_path, fib_path_get_index(_p), 0,         \
413                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
414                     ##_args);                                           \
415 }
416
417 static fib_path_t *
418 fib_path_get (fib_node_index_t index)
419 {
420     return (pool_elt_at_index(fib_path_pool, index));
421 }
422
423 static fib_node_index_t 
424 fib_path_get_index (fib_path_t *path)
425 {
426     return (path - fib_path_pool);
427 }
428
429 static fib_node_t *
430 fib_path_get_node (fib_node_index_t index)
431 {
432     return ((fib_node_t*)fib_path_get(index));
433 }
434
435 static fib_path_t*
436 fib_path_from_fib_node (fib_node_t *node)
437 {
438     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
439     return ((fib_path_t*)node);
440 }
441
442 u8 *
443 format_fib_path (u8 * s, va_list * args)
444 {
445     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
446     u32 indent = va_arg (*args, u32);
447     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
448     vnet_main_t * vnm = vnet_get_main();
449     fib_path_oper_attribute_t oattr;
450     fib_path_cfg_attribute_t cattr;
451     fib_path_t *path;
452     const char *eol;
453
454     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
455     {
456         eol = "";
457     }
458     else
459     {
460         eol = "\n";
461     }
462
463     path = fib_path_get(path_index);
464
465     s = format (s, "%Upath:[%d] ", format_white_space, indent,
466                 fib_path_get_index(path));
467     s = format (s, "pl-index:%d ", path->fp_pl_index);
468     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
469     s = format (s, "weight=%d ", path->fp_weight);
470     s = format (s, "pref=%d ", path->fp_preference);
471     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
472     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
473         s = format(s, " oper-flags:");
474         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
475             if ((1<<oattr) & path->fp_oper_flags) {
476                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
477             }
478         }
479     }
480     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
481         s = format(s, " cfg-flags:");
482         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
483             if ((1<<cattr) & path->fp_cfg_flags) {
484                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
485             }
486         }
487     }
488     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
489         s = format(s, "\n%U", format_white_space, indent+2);
490
491     switch (path->fp_type)
492     {
493     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
494         s = format (s, "%U", format_ip46_address,
495                     &path->attached_next_hop.fp_nh,
496                     IP46_TYPE_ANY);
497         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
498         {
499             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
500         }
501         else
502         {
503             s = format (s, " %U",
504                         format_vnet_sw_interface_name,
505                         vnm,
506                         vnet_get_sw_interface(
507                             vnm,
508                             path->attached_next_hop.fp_interface));
509             if (vnet_sw_interface_is_p2p(vnet_get_main(),
510                                          path->attached_next_hop.fp_interface))
511             {
512                 s = format (s, " (p2p)");
513             }
514         }
515         if (!dpo_id_is_valid(&path->fp_dpo))
516         {
517             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
518         }
519         else
520         {
521             s = format(s, "%s%U%U", eol,
522                        format_white_space, indent,
523                        format_dpo_id,
524                        &path->fp_dpo, 13);
525         }
526         break;
527     case FIB_PATH_TYPE_ATTACHED:
528         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
529         {
530             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
531         }
532         else
533         {
534             s = format (s, " %U",
535                         format_vnet_sw_interface_name,
536                         vnm,
537                         vnet_get_sw_interface(
538                             vnm,
539                             path->attached.fp_interface));
540         }
541         break;
542     case FIB_PATH_TYPE_RECURSIVE:
543         if (DPO_PROTO_MPLS == path->fp_nh_proto)
544         {
545             s = format (s, "via %U %U",
546                         format_mpls_unicast_label,
547                         path->recursive.fp_nh.fp_local_label,
548                         format_mpls_eos_bit,
549                         path->recursive.fp_nh.fp_eos);
550         }
551         else
552         {
553             s = format (s, "via %U",
554                         format_ip46_address,
555                         &path->recursive.fp_nh.fp_ip,
556                         IP46_TYPE_ANY);
557         }
558         s = format (s, " in fib:%d",
559                     path->recursive.fp_tbl_id,
560                     path->fp_via_fib); 
561         s = format (s, " via-fib:%d", path->fp_via_fib); 
562         s = format (s, " via-dpo:[%U:%d]",
563                     format_dpo_type, path->fp_dpo.dpoi_type, 
564                     path->fp_dpo.dpoi_index);
565
566         break;
567     case FIB_PATH_TYPE_UDP_ENCAP:
568         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
569         break;
570     case FIB_PATH_TYPE_BIER_TABLE:
571         s = format (s, "via bier-table:[%U}",
572                     format_bier_table_id,
573                     &path->bier_table.fp_bier_tbl);
574         s = format (s, " via-dpo:[%U:%d]",
575                     format_dpo_type, path->fp_dpo.dpoi_type,
576                     path->fp_dpo.dpoi_index);
577         break;
578     case FIB_PATH_TYPE_BIER_FMASK:
579         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
580         s = format (s, " via-dpo:[%U:%d]",
581                     format_dpo_type, path->fp_dpo.dpoi_type, 
582                     path->fp_dpo.dpoi_index);
583         break;
584     case FIB_PATH_TYPE_BIER_IMP:
585         s = format (s, "via %U", format_bier_imp,
586                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
587         break;
588     case FIB_PATH_TYPE_DVR:
589         s = format (s, " %U",
590                     format_vnet_sw_interface_name,
591                     vnm,
592                     vnet_get_sw_interface(
593                         vnm,
594                         path->dvr.fp_interface));
595         break;
596     case FIB_PATH_TYPE_DEAG:
597         s = format (s, " %sfib-index:%d",
598                     (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?  "m" : ""),
599                     path->deag.fp_tbl_id);
600         break;
601     case FIB_PATH_TYPE_RECEIVE:
602     case FIB_PATH_TYPE_INTF_RX:
603     case FIB_PATH_TYPE_SPECIAL:
604     case FIB_PATH_TYPE_EXCLUSIVE:
605         if (dpo_id_is_valid(&path->fp_dpo))
606         {
607             s = format(s, "%U", format_dpo_id,
608                        &path->fp_dpo, indent+2);
609         }
610         break;
611     }
612     return (s);
613 }
614
615 /*
616  * fib_path_last_lock_gone
617  *
618  * We don't share paths, we share path lists, so the [un]lock functions
619  * are no-ops
620  */
621 static void
622 fib_path_last_lock_gone (fib_node_t *node)
623 {
624     ASSERT(0);
625 }
626
627 static fib_path_t*
628 fib_path_attached_next_hop_get_adj (fib_path_t *path,
629                                     vnet_link_t link,
630                                     dpo_id_t *dpo)
631 {
632     fib_node_index_t fib_path_index;
633     fib_protocol_t nh_proto;
634     adj_index_t ai;
635
636     fib_path_index = fib_path_get_index(path);
637     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
638
639     if (vnet_sw_interface_is_p2p(vnet_get_main(),
640                                  path->attached_next_hop.fp_interface))
641     {
642         /*
643          * if the interface is p2p then the adj for the specific
644          * neighbour on that link will never exist. on p2p links
645          * the subnet address (the attached route) links to the
646          * auto-adj (see below), we want that adj here too.
647          */
648         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
649                                  path->attached_next_hop.fp_interface);
650     }
651     else
652     {
653         ai = adj_nbr_add_or_lock(nh_proto, link,
654                                  &path->attached_next_hop.fp_nh,
655                                  path->attached_next_hop.fp_interface);
656     }
657
658     dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
659     adj_unlock(ai);
660
661     return (fib_path_get(fib_path_index));
662 }
663
664 static void
665 fib_path_attached_next_hop_set (fib_path_t *path)
666 {
667     dpo_id_t tmp = DPO_INVALID;
668
669     /*
670      * resolve directly via the adjacency discribed by the
671      * interface and next-hop
672      */
673     dpo_copy (&tmp, &path->fp_dpo);
674     path = fib_path_attached_next_hop_get_adj(path,
675                                               dpo_proto_to_link(path->fp_nh_proto),
676                                               &tmp);
677     dpo_copy(&path->fp_dpo, &tmp);
678     dpo_reset(&tmp);
679     ASSERT(dpo_is_adj(&path->fp_dpo));
680
681     /*
682      * become a child of the adjacency so we receive updates
683      * when its rewrite changes
684      */
685     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
686                                      FIB_NODE_TYPE_PATH,
687                                      fib_path_get_index(path));
688
689     if (!vnet_sw_interface_is_up(vnet_get_main(),
690                                  path->attached_next_hop.fp_interface) ||
691         !adj_is_up(path->fp_dpo.dpoi_index))
692     {
693         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
694     }
695 }
696
697 static void
698 fib_path_attached_get_adj (fib_path_t *path,
699                            vnet_link_t link,
700                            dpo_id_t *dpo)
701 {
702     fib_protocol_t nh_proto;
703
704     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
705
706     if (vnet_sw_interface_is_p2p(vnet_get_main(),
707                                  path->attached.fp_interface))
708     {
709         /*
710          * point-2-point interfaces do not require a glean, since
711          * there is nothing to ARP. Install a rewrite/nbr adj instead
712          */
713         adj_index_t ai;
714
715         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
716                                  path->attached.fp_interface);
717
718         dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
719         adj_unlock(ai);
720     }
721     else if (vnet_sw_interface_is_nbma(vnet_get_main(),
722                                        path->attached.fp_interface))
723     {
724         dpo_copy(dpo, drop_dpo_get(path->fp_nh_proto));
725     }
726     else
727     {
728         adj_index_t ai;
729
730         ai = adj_glean_add_or_lock(nh_proto, link,
731                                    path->attached.fp_interface,
732                                    &path->attached.fp_connected);
733         dpo_set(dpo, DPO_ADJACENCY_GLEAN, vnet_link_to_dpo_proto(link), ai);
734         adj_unlock(ai);
735     }
736 }
737
738 /*
739  * create of update the paths recursive adj
740  */
741 static void
742 fib_path_recursive_adj_update (fib_path_t *path,
743                                fib_forward_chain_type_t fct,
744                                dpo_id_t *dpo)
745 {
746     dpo_id_t via_dpo = DPO_INVALID;
747
748     /*
749      * get the DPO to resolve through from the via-entry
750      */
751     fib_entry_contribute_forwarding(path->fp_via_fib,
752                                     fct,
753                                     &via_dpo);
754
755
756     /*
757      * hope for the best - clear if restrictions apply.
758      */
759     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
760
761     /*
762      * Validate any recursion constraints and over-ride the via
763      * adj if not met
764      */
765     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
766     {
767         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
768         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
769     }
770     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
771     {
772         /*
773          * the via FIB must be a host route.
774          * note the via FIB just added will always be a host route
775          * since it is an RR source added host route. So what we need to
776          * check is whether the route has other sources. If it does then
777          * some other source has added it as a host route. If it doesn't
778          * then it was added only here and inherits forwarding from a cover.
779          * the cover is not a host route.
780          * The RR source is the lowest priority source, so we check if it
781          * is the best. if it is there are no other sources.
782          */
783         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
784         {
785             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
786             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
787
788             /*
789              * PIC edge trigger. let the load-balance maps know
790              */
791             load_balance_map_path_state_change(fib_path_get_index(path));
792         }
793     }
794     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
795     {
796         /*
797          * RR source entries inherit the flags from the cover, so
798          * we can check the via directly
799          */
800         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
801         {
802             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
803             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
804
805             /*
806              * PIC edge trigger. let the load-balance maps know
807              */
808             load_balance_map_path_state_change(fib_path_get_index(path));
809         }
810     }
811     /*
812      * check for over-riding factors on the FIB entry itself
813      */
814     if (!fib_entry_is_resolved(path->fp_via_fib))
815     {
816         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
817         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
818
819         /*
820          * PIC edge trigger. let the load-balance maps know
821          */
822         load_balance_map_path_state_change(fib_path_get_index(path));
823     }
824
825     /*
826      * If this path is contributing a drop, then it's not resolved
827      */
828     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
829     {
830         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
831     }
832
833     /*
834      * update the path's contributed DPO
835      */
836     dpo_copy(dpo, &via_dpo);
837
838     FIB_PATH_DBG(path, "recursive update:");
839
840     dpo_reset(&via_dpo);
841 }
842
843 /*
844  * re-evaulate the forwarding state for a via fmask path
845  */
846 static void
847 fib_path_bier_fmask_update (fib_path_t *path,
848                             dpo_id_t *dpo)
849 {
850     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
851
852     /*
853      * if we are stakcing on the drop, then the path is not resolved
854      */
855     if (dpo_is_drop(dpo))
856     {
857         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
858     }
859     else
860     {
861         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
862     }
863 }
864
865 /*
866  * fib_path_is_permanent_drop
867  *
868  * Return !0 if the path is configured to permanently drop,
869  * despite other attributes.
870  */
871 static int
872 fib_path_is_permanent_drop (fib_path_t *path)
873 {
874     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
875             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
876 }
877
878 /*
879  * fib_path_unresolve
880  *
881  * Remove our dependency on the resolution target
882  */
883 static void
884 fib_path_unresolve (fib_path_t *path)
885 {
886     /*
887      * the forced drop path does not need unresolving
888      */
889     if (fib_path_is_permanent_drop(path))
890     {
891         return;
892     }
893
894     switch (path->fp_type)
895     {
896     case FIB_PATH_TYPE_RECURSIVE:
897         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
898         {
899             fib_entry_child_remove(path->fp_via_fib,
900                                    path->fp_sibling);
901             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
902                                            fib_entry_get_prefix(path->fp_via_fib),
903                                            FIB_SOURCE_RR);
904             fib_table_unlock(path->recursive.fp_tbl_id,
905                              dpo_proto_to_fib(path->fp_nh_proto),
906                              FIB_SOURCE_RR);
907             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
908         }
909         break;
910     case FIB_PATH_TYPE_BIER_FMASK:
911         bier_fmask_child_remove(path->fp_via_bier_fmask,
912                                 path->fp_sibling);
913         break;
914     case FIB_PATH_TYPE_BIER_IMP:
915         bier_imp_unlock(path->fp_dpo.dpoi_index);
916         break;
917     case FIB_PATH_TYPE_BIER_TABLE:
918         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
919         break;
920     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
921     case FIB_PATH_TYPE_ATTACHED:
922         if (dpo_is_adj(&path->fp_dpo))
923             adj_child_remove(path->fp_dpo.dpoi_index,
924                              path->fp_sibling);
925         break;
926     case FIB_PATH_TYPE_UDP_ENCAP:
927         udp_encap_unlock(path->fp_dpo.dpoi_index);
928         break;
929     case FIB_PATH_TYPE_EXCLUSIVE:
930         dpo_reset(&path->exclusive.fp_ex_dpo);
931         break;
932     case FIB_PATH_TYPE_SPECIAL:
933     case FIB_PATH_TYPE_RECEIVE:
934     case FIB_PATH_TYPE_INTF_RX:
935     case FIB_PATH_TYPE_DEAG:
936     case FIB_PATH_TYPE_DVR:
937         /*
938          * these hold only the path's DPO, which is reset below.
939          */
940         break;
941     }
942
943     /*
944      * release the adj we were holding and pick up the
945      * drop just in case.
946      */
947     dpo_reset(&path->fp_dpo);
948     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
949
950     return;
951 }
952
953 static fib_forward_chain_type_t
954 fib_path_to_chain_type (const fib_path_t *path)
955 {
956     if (DPO_PROTO_MPLS == path->fp_nh_proto)
957     {
958         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
959             MPLS_EOS == path->recursive.fp_nh.fp_eos)
960         {
961             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
962         }
963         else
964         {
965             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
966         }
967     }
968     else
969     {
970         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
971     }
972 }
973
974 /*
975  * fib_path_back_walk_notify
976  *
977  * A back walk has reach this path.
978  */
979 static fib_node_back_walk_rc_t
980 fib_path_back_walk_notify (fib_node_t *node,
981                            fib_node_back_walk_ctx_t *ctx)
982 {
983     fib_path_t *path;
984
985     path = fib_path_from_fib_node(node);
986
987     FIB_PATH_DBG(path, "bw:%U",
988                  format_fib_node_bw_reason, ctx->fnbw_reason);
989
990     switch (path->fp_type)
991     {
992     case FIB_PATH_TYPE_RECURSIVE:
993         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
994         {
995             /*
996              * modify the recursive adjacency to use the new forwarding
997              * of the via-fib.
998              * this update is visible to packets in flight in the DP.
999              */
1000             fib_path_recursive_adj_update(
1001                 path,
1002                 fib_path_to_chain_type(path),
1003                 &path->fp_dpo);
1004         }
1005         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1006             (FIB_NODE_BW_REASON_FLAG_ADJ_MTU    & ctx->fnbw_reason) ||
1007             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1008         {
1009             /*
1010              * ADJ updates (complete<->incomplete) do not need to propagate to
1011              * recursive entries.
1012              * The only reason its needed as far back as here, is that the adj
1013              * and the incomplete adj are a different DPO type, so the LBs need
1014              * to re-stack.
1015              * If this walk was quashed in the fib_entry, then any non-fib_path
1016              * children (like tunnels that collapse out the LB when they stack)
1017              * would not see the update.
1018              */
1019             return (FIB_NODE_BACK_WALK_CONTINUE);
1020         }
1021         break;
1022     case FIB_PATH_TYPE_BIER_FMASK:
1023         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1024         {
1025             /*
1026              * update to use the BIER fmask's new forwading
1027              */
1028             fib_path_bier_fmask_update(path, &path->fp_dpo);
1029         }
1030         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1031             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1032         {
1033             /*
1034              * ADJ updates (complete<->incomplete) do not need to propagate to
1035              * recursive entries.
1036              * The only reason its needed as far back as here, is that the adj
1037              * and the incomplete adj are a different DPO type, so the LBs need
1038              * to re-stack.
1039              * If this walk was quashed in the fib_entry, then any non-fib_path
1040              * children (like tunnels that collapse out the LB when they stack)
1041              * would not see the update.
1042              */
1043             return (FIB_NODE_BACK_WALK_CONTINUE);
1044         }
1045         break;
1046     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1047         /*
1048 FIXME comment
1049          * ADJ_UPDATE backwalk pass silently through here and up to
1050          * the path-list when the multipath adj collapse occurs.
1051          * The reason we do this is that the assumtption is that VPP
1052          * runs in an environment where the Control-Plane is remote
1053          * and hence reacts slowly to link up down. In order to remove
1054          * this down link from the ECMP set quickly, we back-walk.
1055          * VPP also has dedicated CPUs, so we are not stealing resources
1056          * from the CP to do so.
1057          */
1058         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1059         {
1060             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1061             {
1062                 /*
1063                  * alreday resolved. no need to walk back again
1064                  */
1065                 return (FIB_NODE_BACK_WALK_CONTINUE);
1066             }
1067             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1068         }
1069         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1070         {
1071             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1072             {
1073                 /*
1074                  * alreday unresolved. no need to walk back again
1075                  */
1076                 return (FIB_NODE_BACK_WALK_CONTINUE);
1077             }
1078             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1079         }
1080         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1081         {
1082             /*
1083              * The interface this path resolves through has been deleted.
1084              * This will leave the path in a permanent drop state. The route
1085              * needs to be removed and readded (and hence the path-list deleted)
1086              * before it can forward again.
1087              */
1088             fib_path_unresolve(path);
1089             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1090         }
1091         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1092         {
1093             /*
1094              * restack the DPO to pick up the correct DPO sub-type
1095              */
1096             dpo_id_t tmp = DPO_INVALID;
1097             uword if_is_up;
1098
1099             if_is_up = vnet_sw_interface_is_up(
1100                            vnet_get_main(),
1101                            path->attached_next_hop.fp_interface);
1102
1103             dpo_copy (&tmp, &path->fp_dpo);
1104             path = fib_path_attached_next_hop_get_adj(
1105                 path,
1106                 dpo_proto_to_link(path->fp_nh_proto),
1107                 &tmp);
1108             dpo_copy(&path->fp_dpo, &tmp);
1109             dpo_reset(&tmp);
1110
1111             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1112             if (if_is_up && adj_is_up(path->fp_dpo.dpoi_index))
1113             {
1114                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1115             }
1116
1117             if (!if_is_up)
1118             {
1119                 /*
1120                  * If the interface is not up there is no reason to walk
1121                  * back to children. if we did they would only evalute
1122                  * that this path is unresolved and hence it would
1123                  * not contribute the adjacency - so it would be wasted
1124                  * CPU time.
1125                  */
1126                 return (FIB_NODE_BACK_WALK_CONTINUE);
1127             }
1128         }
1129         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1130         {
1131             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1132             {
1133                 /*
1134                  * alreday unresolved. no need to walk back again
1135                  */
1136                 return (FIB_NODE_BACK_WALK_CONTINUE);
1137             }
1138             /*
1139              * the adj has gone down. the path is no longer resolved.
1140              */
1141             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1142         }
1143         break;
1144     case FIB_PATH_TYPE_ATTACHED:
1145     case FIB_PATH_TYPE_DVR:
1146         /*
1147          * FIXME; this could schedule a lower priority walk, since attached
1148          * routes are not usually in ECMP configurations so the backwalk to
1149          * the FIB entry does not need to be high priority
1150          */
1151         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1152         {
1153             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1154         }
1155         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1156         {
1157             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1158         }
1159         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1160         {
1161             fib_path_unresolve(path);
1162             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1163         }
1164         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_BIND & ctx->fnbw_reason)
1165         {
1166             /* bind walks should appear here and pass silently up to
1167              * to the fib_entry */
1168         }
1169         break;
1170     case FIB_PATH_TYPE_UDP_ENCAP:
1171     {
1172         dpo_id_t via_dpo = DPO_INVALID;
1173
1174         /*
1175          * hope for the best - clear if restrictions apply.
1176          */
1177         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1178
1179         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1180                                         path->fp_nh_proto,
1181                                         &via_dpo);
1182         /*
1183          * If this path is contributing a drop, then it's not resolved
1184          */
1185         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1186         {
1187             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1188         }
1189
1190         /*
1191          * update the path's contributed DPO
1192          */
1193         dpo_copy(&path->fp_dpo, &via_dpo);
1194         dpo_reset(&via_dpo);
1195         break;
1196     }
1197     case FIB_PATH_TYPE_INTF_RX:
1198         ASSERT(0);
1199     case FIB_PATH_TYPE_DEAG:
1200         /*
1201          * FIXME When VRF delete is allowed this will need a poke.
1202          */
1203     case FIB_PATH_TYPE_SPECIAL:
1204     case FIB_PATH_TYPE_RECEIVE:
1205     case FIB_PATH_TYPE_EXCLUSIVE:
1206     case FIB_PATH_TYPE_BIER_TABLE:
1207     case FIB_PATH_TYPE_BIER_IMP:
1208         /*
1209          * these path types have no parents. so to be
1210          * walked from one is unexpected.
1211          */
1212         ASSERT(0);
1213         break;
1214     }
1215
1216     /*
1217      * propagate the backwalk further to the path-list
1218      */
1219     fib_path_list_back_walk(path->fp_pl_index, ctx);
1220
1221     return (FIB_NODE_BACK_WALK_CONTINUE);
1222 }
1223
1224 static void
1225 fib_path_memory_show (void)
1226 {
1227     fib_show_memory_usage("Path",
1228                           pool_elts(fib_path_pool),
1229                           pool_len(fib_path_pool),
1230                           sizeof(fib_path_t));
1231 }
1232
1233 /*
1234  * The FIB path's graph node virtual function table
1235  */
1236 static const fib_node_vft_t fib_path_vft = {
1237     .fnv_get = fib_path_get_node,
1238     .fnv_last_lock = fib_path_last_lock_gone,
1239     .fnv_back_walk = fib_path_back_walk_notify,
1240     .fnv_mem_show = fib_path_memory_show,
1241 };
1242
1243 static fib_path_cfg_flags_t
1244 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1245 {
1246     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1247
1248     if (rpath->frp_flags & FIB_ROUTE_PATH_POP_PW_CW)
1249         cfg_flags |= FIB_PATH_CFG_FLAG_POP_PW_CW;
1250     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1251         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1252     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1253         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1254     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1255         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1256     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1257         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1258     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1259         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1260     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1261         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1262     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1263         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1264     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1265         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1266     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1267         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1268     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_UNREACH)
1269         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_UNREACH;
1270     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_PROHIBIT)
1271         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_PROHIBIT;
1272     if (rpath->frp_flags & FIB_ROUTE_PATH_GLEAN)
1273         cfg_flags |= FIB_PATH_CFG_FLAG_GLEAN;
1274
1275     return (cfg_flags);
1276 }
1277
1278 /*
1279  * fib_path_create
1280  *
1281  * Create and initialise a new path object.
1282  * return the index of the path.
1283  */
1284 fib_node_index_t
1285 fib_path_create (fib_node_index_t pl_index,
1286                  const fib_route_path_t *rpath)
1287 {
1288     fib_path_t *path;
1289
1290     pool_get(fib_path_pool, path);
1291     clib_memset(path, 0, sizeof(*path));
1292
1293     fib_node_init(&path->fp_node,
1294                   FIB_NODE_TYPE_PATH);
1295
1296     dpo_reset(&path->fp_dpo);
1297     path->fp_pl_index = pl_index;
1298     path->fp_nh_proto = rpath->frp_proto;
1299     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1300     path->fp_weight = rpath->frp_weight;
1301     if (0 == path->fp_weight)
1302     {
1303         /*
1304          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1305          * clients to always use 1, or we can accept it and fixup approrpiately.
1306          */
1307         path->fp_weight = 1;
1308     }
1309     path->fp_preference = rpath->frp_preference;
1310     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1311
1312     /*
1313      * deduce the path's tpye from the parementers and save what is needed.
1314      */
1315     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1316     {
1317         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1318         path->receive.fp_interface = rpath->frp_sw_if_index;
1319         path->receive.fp_addr = rpath->frp_addr;
1320     }
1321     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1322     {
1323         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1324         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1325     }
1326     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1327     {
1328         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1329         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1330     }
1331     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1332     {
1333         path->fp_type = FIB_PATH_TYPE_DEAG;
1334         path->deag.fp_tbl_id = rpath->frp_fib_index;
1335         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1336     }
1337     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1338     {
1339         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1340         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1341     }
1342     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1343     {
1344         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1345         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1346     }
1347     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1348     {
1349         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1350         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1351     }
1352     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1353     {
1354         path->fp_type = FIB_PATH_TYPE_DEAG;
1355         path->deag.fp_tbl_id = rpath->frp_fib_index;
1356     }
1357     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1358     {
1359         path->fp_type = FIB_PATH_TYPE_DVR;
1360         path->dvr.fp_interface = rpath->frp_sw_if_index;
1361     }
1362     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1363     {
1364         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1365         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1366     }
1367     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT) ||
1368         (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH) ||
1369         (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP))
1370     {
1371         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1372     }
1373     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY))
1374     {
1375         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1376         path->classify.fp_classify_table_id = rpath->frp_classify_table_id;
1377     }
1378     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_GLEAN)
1379     {
1380         path->fp_type = FIB_PATH_TYPE_ATTACHED;
1381         path->attached.fp_interface = rpath->frp_sw_if_index;
1382         path->attached.fp_connected = rpath->frp_connected;
1383     }
1384     else if (~0 != rpath->frp_sw_if_index)
1385     {
1386         if (ip46_address_is_zero(&rpath->frp_addr))
1387         {
1388             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1389             path->attached.fp_interface = rpath->frp_sw_if_index;
1390         }
1391         else
1392         {
1393             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1394             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1395             path->attached_next_hop.fp_nh = rpath->frp_addr;
1396         }
1397     }
1398     else
1399     {
1400         if (ip46_address_is_zero(&rpath->frp_addr))
1401         {
1402             if (~0 == rpath->frp_fib_index)
1403             {
1404                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1405             }
1406             else
1407             {
1408                 path->fp_type = FIB_PATH_TYPE_DEAG;
1409                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1410                 path->deag.fp_rpf_id = ~0;
1411             }
1412         }
1413         else
1414         {
1415             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1416             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1417             {
1418                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1419                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1420             }
1421             else
1422             {
1423                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1424             }
1425             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1426         }
1427     }
1428
1429     FIB_PATH_DBG(path, "create");
1430
1431     return (fib_path_get_index(path));
1432 }
1433
1434 /*
1435  * fib_path_create_special
1436  *
1437  * Create and initialise a new path object.
1438  * return the index of the path.
1439  */
1440 fib_node_index_t
1441 fib_path_create_special (fib_node_index_t pl_index,
1442                          dpo_proto_t nh_proto,
1443                          fib_path_cfg_flags_t flags,
1444                          const dpo_id_t *dpo)
1445 {
1446     fib_path_t *path;
1447
1448     pool_get(fib_path_pool, path);
1449     clib_memset(path, 0, sizeof(*path));
1450
1451     fib_node_init(&path->fp_node,
1452                   FIB_NODE_TYPE_PATH);
1453     dpo_reset(&path->fp_dpo);
1454
1455     path->fp_pl_index = pl_index;
1456     path->fp_weight = 1;
1457     path->fp_preference = 0;
1458     path->fp_nh_proto = nh_proto;
1459     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1460     path->fp_cfg_flags = flags;
1461
1462     if (FIB_PATH_CFG_FLAG_DROP & flags)
1463     {
1464         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1465     }
1466     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1467     {
1468         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1469         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1470     }
1471     else
1472     {
1473         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1474         ASSERT(NULL != dpo);
1475         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1476     }
1477
1478     return (fib_path_get_index(path));
1479 }
1480
1481 /*
1482  * fib_path_copy
1483  *
1484  * Copy a path. return index of new path.
1485  */
1486 fib_node_index_t
1487 fib_path_copy (fib_node_index_t path_index,
1488                fib_node_index_t path_list_index)
1489 {
1490     fib_path_t *path, *orig_path;
1491
1492     pool_get(fib_path_pool, path);
1493
1494     orig_path = fib_path_get(path_index);
1495     ASSERT(NULL != orig_path);
1496
1497     clib_memcpy(path, orig_path, sizeof(*path));
1498
1499     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1500
1501     /*
1502      * reset the dynamic section
1503      */
1504     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1505     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1506     path->fp_pl_index  = path_list_index;
1507     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1508     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1509     dpo_reset(&path->fp_dpo);
1510
1511     if (path->fp_type == FIB_PATH_TYPE_EXCLUSIVE)
1512     {
1513         clib_memset(&path->exclusive.fp_ex_dpo, 0, sizeof(dpo_id_t));
1514         dpo_copy(&path->exclusive.fp_ex_dpo, &orig_path->exclusive.fp_ex_dpo);
1515     }
1516
1517     return (fib_path_get_index(path));
1518 }
1519
1520 /*
1521  * fib_path_destroy
1522  *
1523  * destroy a path that is no longer required
1524  */
1525 void
1526 fib_path_destroy (fib_node_index_t path_index)
1527 {
1528     fib_path_t *path;
1529
1530     path = fib_path_get(path_index);
1531
1532     ASSERT(NULL != path);
1533     FIB_PATH_DBG(path, "destroy");
1534
1535     fib_path_unresolve(path);
1536
1537     fib_node_deinit(&path->fp_node);
1538     pool_put(fib_path_pool, path);
1539 }
1540
1541 /*
1542  * fib_path_destroy
1543  *
1544  * destroy a path that is no longer required
1545  */
1546 uword
1547 fib_path_hash (fib_node_index_t path_index)
1548 {
1549     fib_path_t *path;
1550
1551     path = fib_path_get(path_index);
1552
1553     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1554                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1555                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1556                         0));
1557 }
1558
1559 /*
1560  * fib_path_cmp_i
1561  *
1562  * Compare two paths for equivalence.
1563  */
1564 static int
1565 fib_path_cmp_i (const fib_path_t *path1,
1566                 const fib_path_t *path2)
1567 {
1568     int res;
1569
1570     res = 1;
1571
1572     /*
1573      * paths of different types and protocol are not equal.
1574      * different weights and/or preference only are the same path.
1575      */
1576     if (path1->fp_type != path2->fp_type)
1577     {
1578         res = (path1->fp_type - path2->fp_type);
1579     }
1580     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1581     {
1582         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1583     }
1584     else
1585     {
1586         /*
1587          * both paths are of the same type.
1588          * consider each type and its attributes in turn.
1589          */
1590         switch (path1->fp_type)
1591         {
1592         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1593             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1594                                    &path2->attached_next_hop.fp_nh);
1595             if (0 == res) {
1596                 res = (path1->attached_next_hop.fp_interface -
1597                        path2->attached_next_hop.fp_interface);
1598             }
1599             break;
1600         case FIB_PATH_TYPE_ATTACHED:
1601             res = (path1->attached.fp_interface -
1602                    path2->attached.fp_interface);
1603             break;
1604         case FIB_PATH_TYPE_RECURSIVE:
1605             res = ip46_address_cmp(&path1->recursive.fp_nh.fp_ip,
1606                                    &path2->recursive.fp_nh.fp_ip);
1607  
1608             if (0 == res)
1609             {
1610                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1611             }
1612             break;
1613         case FIB_PATH_TYPE_BIER_FMASK:
1614             res = (path1->bier_fmask.fp_bier_fmask -
1615                    path2->bier_fmask.fp_bier_fmask);
1616             break;
1617         case FIB_PATH_TYPE_BIER_IMP:
1618             res = (path1->bier_imp.fp_bier_imp -
1619                    path2->bier_imp.fp_bier_imp);
1620             break;
1621         case FIB_PATH_TYPE_BIER_TABLE:
1622             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1623                                     &path2->bier_table.fp_bier_tbl);
1624             break;
1625         case FIB_PATH_TYPE_DEAG:
1626             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1627             if (0 == res)
1628             {
1629                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1630             }
1631             break;
1632         case FIB_PATH_TYPE_INTF_RX:
1633             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1634             break;
1635         case FIB_PATH_TYPE_UDP_ENCAP:
1636             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1637             break;
1638         case FIB_PATH_TYPE_DVR:
1639             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1640             break;
1641         case FIB_PATH_TYPE_EXCLUSIVE:
1642             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1643             break;
1644         case FIB_PATH_TYPE_SPECIAL:
1645         case FIB_PATH_TYPE_RECEIVE:
1646             res = 0;
1647             break;
1648         }
1649     }
1650     return (res);
1651 }
1652
1653 /*
1654  * fib_path_cmp_for_sort
1655  *
1656  * Compare two paths for equivalence. Used during path sorting.
1657  * As usual 0 means equal.
1658  */
1659 int
1660 fib_path_cmp_for_sort (void * v1,
1661                        void * v2)
1662 {
1663     fib_node_index_t *pi1 = v1, *pi2 = v2;
1664     fib_path_t *path1, *path2;
1665
1666     path1 = fib_path_get(*pi1);
1667     path2 = fib_path_get(*pi2);
1668
1669     /*
1670      * when sorting paths we want the highest preference paths
1671      * first, so that the choices set built is in prefernce order
1672      */
1673     if (path1->fp_preference != path2->fp_preference)
1674     {
1675         return (path1->fp_preference - path2->fp_preference);
1676     }
1677
1678     return (fib_path_cmp_i(path1, path2));
1679 }
1680
1681 /*
1682  * fib_path_cmp
1683  *
1684  * Compare two paths for equivalence.
1685  */
1686 int
1687 fib_path_cmp (fib_node_index_t pi1,
1688               fib_node_index_t pi2)
1689 {
1690     fib_path_t *path1, *path2;
1691
1692     path1 = fib_path_get(pi1);
1693     path2 = fib_path_get(pi2);
1694
1695     return (fib_path_cmp_i(path1, path2));
1696 }
1697
1698 int
1699 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1700                            const fib_route_path_t *rpath)
1701 {
1702     fib_path_t *path;
1703     int res;
1704
1705     path = fib_path_get(path_index);
1706
1707     res = 1;
1708
1709     if (path->fp_weight != rpath->frp_weight)
1710     {
1711         res = (path->fp_weight - rpath->frp_weight);
1712     }
1713     else
1714     {
1715         /*
1716          * both paths are of the same type.
1717          * consider each type and its attributes in turn.
1718          */
1719         switch (path->fp_type)
1720         {
1721         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1722             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1723                                    &rpath->frp_addr);
1724             if (0 == res)
1725             {
1726                 res = (path->attached_next_hop.fp_interface -
1727                        rpath->frp_sw_if_index);
1728             }
1729             break;
1730         case FIB_PATH_TYPE_ATTACHED:
1731             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1732             break;
1733         case FIB_PATH_TYPE_RECURSIVE:
1734             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1735             {
1736                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1737
1738                 if (res == 0)
1739                 {
1740                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1741                 }
1742             }
1743             else
1744             {
1745                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1746                                        &rpath->frp_addr);
1747             }
1748
1749             if (0 == res)
1750             {
1751                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1752             }
1753             break;
1754         case FIB_PATH_TYPE_BIER_FMASK:
1755             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1756             break;
1757         case FIB_PATH_TYPE_BIER_IMP:
1758             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1759             break;
1760         case FIB_PATH_TYPE_BIER_TABLE:
1761             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1762                                     &rpath->frp_bier_tbl);
1763             break;
1764         case FIB_PATH_TYPE_INTF_RX:
1765             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1766             break;
1767         case FIB_PATH_TYPE_UDP_ENCAP:
1768             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1769             break;
1770         case FIB_PATH_TYPE_DEAG:
1771             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1772             if (0 == res)
1773             {
1774                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1775             }
1776             break;
1777         case FIB_PATH_TYPE_DVR:
1778             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1779             break;
1780         case FIB_PATH_TYPE_EXCLUSIVE:
1781             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1782             break;
1783         case FIB_PATH_TYPE_RECEIVE:
1784             if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1785             {
1786                 res = 0;
1787             }
1788             else
1789             {
1790                 res = 1;
1791             }
1792             break;
1793         case FIB_PATH_TYPE_SPECIAL:
1794             res = 0;
1795             break;
1796         }
1797     }
1798     return (res);
1799 }
1800
1801 /*
1802  * fib_path_recursive_loop_detect
1803  *
1804  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1805  * walk is initiated when an entry is linking to a new path list or from an old.
1806  * The entry vector passed contains all the FIB entrys that are children of this
1807  * path (it is all the entries encountered on the walk so far). If this vector
1808  * contains the entry this path resolve via, then a loop is about to form.
1809  * The loop must be allowed to form, since we need the dependencies in place
1810  * so that we can track when the loop breaks.
1811  * However, we MUST not produce a loop in the forwarding graph (else packets
1812  * would loop around the switch path until the loop breaks), so we mark recursive
1813  * paths as looped so that they do not contribute forwarding information.
1814  * By marking the path as looped, an etry such as;
1815  *    X/Y
1816  *     via a.a.a.a (looped)
1817  *     via b.b.b.b (not looped)
1818  * can still forward using the info provided by b.b.b.b only
1819  */
1820 int
1821 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1822                                 fib_node_index_t **entry_indicies)
1823 {
1824     fib_path_t *path;
1825
1826     path = fib_path_get(path_index);
1827
1828     /*
1829      * the forced drop path is never looped, cos it is never resolved.
1830      */
1831     if (fib_path_is_permanent_drop(path))
1832     {
1833         return (0);
1834     }
1835
1836     switch (path->fp_type)
1837     {
1838     case FIB_PATH_TYPE_RECURSIVE:
1839     {
1840         fib_node_index_t *entry_index, *entries;
1841         int looped = 0;
1842         entries = *entry_indicies;
1843
1844         vec_foreach(entry_index, entries) {
1845             if (*entry_index == path->fp_via_fib)
1846             {
1847                 /*
1848                  * the entry that is about to link to this path-list (or
1849                  * one of this path-list's children) is the same entry that
1850                  * this recursive path resolves through. this is a cycle.
1851                  * abort the walk.
1852                  */
1853                 looped = 1;
1854                 break;
1855             }
1856         }
1857
1858         if (looped)
1859         {
1860             FIB_PATH_DBG(path, "recursive loop formed");
1861             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1862
1863             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1864         }
1865         else
1866         {
1867             /*
1868              * no loop here yet. keep forward walking the graph.
1869              */
1870             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1871             {
1872                 FIB_PATH_DBG(path, "recursive loop formed");
1873                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1874             }
1875             else
1876             {
1877                 FIB_PATH_DBG(path, "recursive loop cleared");
1878                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1879             }
1880         }
1881         break;
1882     }
1883     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1884     case FIB_PATH_TYPE_ATTACHED:
1885         if (dpo_is_adj(&path->fp_dpo) &&
1886             adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1887                                       entry_indicies))
1888         {
1889             FIB_PATH_DBG(path, "recursive loop formed");
1890             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1891         }
1892         else
1893         {
1894             FIB_PATH_DBG(path, "recursive loop cleared");
1895             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1896         }
1897         break;
1898     case FIB_PATH_TYPE_SPECIAL:
1899     case FIB_PATH_TYPE_DEAG:
1900     case FIB_PATH_TYPE_DVR:
1901     case FIB_PATH_TYPE_RECEIVE:
1902     case FIB_PATH_TYPE_INTF_RX:
1903     case FIB_PATH_TYPE_UDP_ENCAP:
1904     case FIB_PATH_TYPE_EXCLUSIVE:
1905     case FIB_PATH_TYPE_BIER_FMASK:
1906     case FIB_PATH_TYPE_BIER_TABLE:
1907     case FIB_PATH_TYPE_BIER_IMP:
1908         /*
1909          * these path types cannot be part of a loop, since they are the leaves
1910          * of the graph.
1911          */
1912         break;
1913     }
1914
1915     return (fib_path_is_looped(path_index));
1916 }
1917
1918 int
1919 fib_path_resolve (fib_node_index_t path_index)
1920 {
1921     fib_path_t *path;
1922
1923     path = fib_path_get(path_index);
1924
1925     /*
1926      * hope for the best.
1927      */
1928     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1929
1930     /*
1931      * the forced drop path resolves via the drop adj
1932      */
1933     if (fib_path_is_permanent_drop(path))
1934     {
1935         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1936         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1937         return (fib_path_is_resolved(path_index));
1938     }
1939
1940     switch (path->fp_type)
1941     {
1942     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1943         fib_path_attached_next_hop_set(path);
1944         break;
1945     case FIB_PATH_TYPE_ATTACHED:
1946     {
1947         dpo_id_t tmp = DPO_INVALID;
1948
1949         /*
1950          * path->attached.fp_interface
1951          */
1952         if (!vnet_sw_interface_is_up(vnet_get_main(),
1953                                      path->attached.fp_interface))
1954         {
1955             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1956         }
1957         fib_path_attached_get_adj(path,
1958                                   dpo_proto_to_link(path->fp_nh_proto),
1959                                   &tmp);
1960
1961         /*
1962          * re-fetch after possible mem realloc
1963          */
1964         path = fib_path_get(path_index);
1965         dpo_copy(&path->fp_dpo, &tmp);
1966
1967         /*
1968          * become a child of the adjacency so we receive updates
1969          * when the interface state changes
1970          */
1971         if (dpo_is_adj(&path->fp_dpo))
1972         {
1973             path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1974                                              FIB_NODE_TYPE_PATH,
1975                                              fib_path_get_index(path));
1976         }
1977         dpo_reset(&tmp);
1978         break;
1979     }
1980     case FIB_PATH_TYPE_RECURSIVE:
1981     {
1982         /*
1983          * Create a RR source entry in the table for the address
1984          * that this path recurses through.
1985          * This resolve action is recursive, hence we may create
1986          * more paths in the process. more creates mean maybe realloc
1987          * of this path.
1988          */
1989         fib_node_index_t fei;
1990         fib_prefix_t pfx;
1991
1992         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1993
1994         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1995         {
1996             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1997                                        path->recursive.fp_nh.fp_eos,
1998                                        &pfx);
1999         }
2000         else
2001         {
2002             ASSERT(!ip46_address_is_zero(&path->recursive.fp_nh.fp_ip));
2003
2004             fib_protocol_t fp = (ip46_address_is_ip4(&path->recursive.fp_nh.fp_ip) ?
2005                                         FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6);
2006             fib_prefix_from_ip46_addr(fp, &path->recursive.fp_nh.fp_ip, &pfx);
2007         }
2008
2009         fib_table_lock(path->recursive.fp_tbl_id,
2010                        dpo_proto_to_fib(path->fp_nh_proto),
2011                        FIB_SOURCE_RR);
2012         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
2013                                           &pfx,
2014                                           FIB_SOURCE_RR,
2015                                           FIB_ENTRY_FLAG_NONE);
2016
2017         path = fib_path_get(path_index);
2018         path->fp_via_fib = fei;
2019
2020         /*
2021          * become a dependent child of the entry so the path is 
2022          * informed when the forwarding for the entry changes.
2023          */
2024         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
2025                                                FIB_NODE_TYPE_PATH,
2026                                                fib_path_get_index(path));
2027
2028         /*
2029          * create and configure the IP DPO
2030          */
2031         fib_path_recursive_adj_update(
2032             path,
2033             fib_path_to_chain_type(path),
2034             &path->fp_dpo);
2035
2036         break;
2037     }
2038     case FIB_PATH_TYPE_BIER_FMASK:
2039     {
2040         /*
2041          * become a dependent child of the entry so the path is
2042          * informed when the forwarding for the entry changes.
2043          */
2044         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
2045                                                 FIB_NODE_TYPE_PATH,
2046                                                 fib_path_get_index(path));
2047
2048         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
2049         fib_path_bier_fmask_update(path, &path->fp_dpo);
2050
2051         break;
2052     }
2053     case FIB_PATH_TYPE_BIER_IMP:
2054         bier_imp_lock(path->bier_imp.fp_bier_imp);
2055         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2056                                        DPO_PROTO_IP4,
2057                                        &path->fp_dpo);
2058         break;
2059     case FIB_PATH_TYPE_BIER_TABLE:
2060     {
2061         /*
2062          * Find/create the BIER table to link to
2063          */
2064         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
2065
2066         path->fp_via_bier_tbl =
2067             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
2068
2069         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
2070                                          &path->fp_dpo);
2071         break;
2072     }
2073     case FIB_PATH_TYPE_SPECIAL:
2074         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2075         {
2076             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2077                                       IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
2078                                       &path->fp_dpo);
2079         }
2080         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2081         {
2082             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2083                                       IP_NULL_ACTION_SEND_ICMP_UNREACH,
2084                                       &path->fp_dpo);
2085         }
2086         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY)
2087         {
2088             dpo_set (&path->fp_dpo, DPO_CLASSIFY,
2089                      path->fp_nh_proto,
2090                      classify_dpo_create (path->fp_nh_proto,
2091                                           path->classify.fp_classify_table_id));
2092         }
2093         else
2094         {
2095             /*
2096              * Resolve via the drop
2097              */
2098             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
2099         }
2100         break;
2101     case FIB_PATH_TYPE_DEAG:
2102     {
2103         if (DPO_PROTO_BIER == path->fp_nh_proto)
2104         {
2105             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2106                                                   &path->fp_dpo);
2107         }
2108         else
2109         {
2110             /*
2111              * Resolve via a lookup DPO.
2112              * FIXME. control plane should add routes with a table ID
2113              */
2114             lookup_input_t input;
2115             lookup_cast_t cast;
2116
2117             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2118                     LOOKUP_MULTICAST :
2119                     LOOKUP_UNICAST);
2120             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2121                      LOOKUP_INPUT_SRC_ADDR :
2122                      LOOKUP_INPUT_DST_ADDR);
2123
2124             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2125                                                path->fp_nh_proto,
2126                                                cast,
2127                                                input,
2128                                                LOOKUP_TABLE_FROM_CONFIG,
2129                                                &path->fp_dpo);
2130         }
2131         break;
2132     }
2133     case FIB_PATH_TYPE_DVR:
2134         dvr_dpo_add_or_lock(path->dvr.fp_interface,
2135                             path->fp_nh_proto,
2136                             &path->fp_dpo);
2137         break;
2138     case FIB_PATH_TYPE_RECEIVE:
2139         /*
2140          * Resolve via a receive DPO.
2141          */
2142         receive_dpo_add_or_lock(path->fp_nh_proto,
2143                                 path->receive.fp_interface,
2144                                 &path->receive.fp_addr,
2145                                 &path->fp_dpo);
2146         break;
2147     case FIB_PATH_TYPE_UDP_ENCAP:
2148         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2149         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2150                                         path->fp_nh_proto,
2151                                         &path->fp_dpo);
2152         break;
2153     case FIB_PATH_TYPE_INTF_RX: {
2154         /*
2155          * Resolve via a receive DPO.
2156          */
2157         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2158                                      path->intf_rx.fp_interface,
2159                                      &path->fp_dpo);
2160         break;
2161     }
2162     case FIB_PATH_TYPE_EXCLUSIVE:
2163         /*
2164          * Resolve via the user provided DPO
2165          */
2166         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2167         break;
2168     }
2169
2170     return (fib_path_is_resolved(path_index));
2171 }
2172
2173 u32
2174 fib_path_get_resolving_interface (fib_node_index_t path_index)
2175 {
2176     fib_path_t *path;
2177
2178     path = fib_path_get(path_index);
2179
2180     switch (path->fp_type)
2181     {
2182     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2183         return (path->attached_next_hop.fp_interface);
2184     case FIB_PATH_TYPE_ATTACHED:
2185         return (path->attached.fp_interface);
2186     case FIB_PATH_TYPE_RECEIVE:
2187         return (path->receive.fp_interface);
2188     case FIB_PATH_TYPE_RECURSIVE:
2189         if (fib_path_is_resolved(path_index))
2190         {
2191             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2192         }
2193         break;
2194     case FIB_PATH_TYPE_DVR:
2195         return (path->dvr.fp_interface);
2196     case FIB_PATH_TYPE_INTF_RX:
2197     case FIB_PATH_TYPE_UDP_ENCAP:
2198     case FIB_PATH_TYPE_SPECIAL:
2199     case FIB_PATH_TYPE_DEAG:
2200     case FIB_PATH_TYPE_EXCLUSIVE:
2201     case FIB_PATH_TYPE_BIER_FMASK:
2202     case FIB_PATH_TYPE_BIER_TABLE:
2203     case FIB_PATH_TYPE_BIER_IMP:
2204         break;
2205     }
2206     return (dpo_get_urpf(&path->fp_dpo));
2207 }
2208
2209 index_t
2210 fib_path_get_resolving_index (fib_node_index_t path_index)
2211 {
2212     fib_path_t *path;
2213
2214     path = fib_path_get(path_index);
2215
2216     switch (path->fp_type)
2217     {
2218     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2219     case FIB_PATH_TYPE_ATTACHED:
2220     case FIB_PATH_TYPE_RECEIVE:
2221     case FIB_PATH_TYPE_INTF_RX:
2222     case FIB_PATH_TYPE_SPECIAL:
2223     case FIB_PATH_TYPE_DEAG:
2224     case FIB_PATH_TYPE_DVR:
2225     case FIB_PATH_TYPE_EXCLUSIVE:
2226         break;
2227     case FIB_PATH_TYPE_UDP_ENCAP:
2228         return (path->udp_encap.fp_udp_encap_id);
2229     case FIB_PATH_TYPE_RECURSIVE:
2230         return (path->fp_via_fib);
2231     case FIB_PATH_TYPE_BIER_FMASK:
2232         return (path->bier_fmask.fp_bier_fmask);
2233    case FIB_PATH_TYPE_BIER_TABLE:
2234        return (path->fp_via_bier_tbl);
2235    case FIB_PATH_TYPE_BIER_IMP:
2236        return (path->bier_imp.fp_bier_imp);
2237     }
2238     return (~0);
2239 }
2240
2241 adj_index_t
2242 fib_path_get_adj (fib_node_index_t path_index)
2243 {
2244     fib_path_t *path;
2245
2246     path = fib_path_get(path_index);
2247
2248     if (dpo_is_adj(&path->fp_dpo))
2249     {
2250         return (path->fp_dpo.dpoi_index);
2251     }
2252     return (ADJ_INDEX_INVALID);
2253 }
2254
2255 u16
2256 fib_path_get_weight (fib_node_index_t path_index)
2257 {
2258     fib_path_t *path;
2259
2260     path = fib_path_get(path_index);
2261
2262     ASSERT(path);
2263
2264     return (path->fp_weight);
2265 }
2266
2267 u16
2268 fib_path_get_preference (fib_node_index_t path_index)
2269 {
2270     fib_path_t *path;
2271
2272     path = fib_path_get(path_index);
2273
2274     ASSERT(path);
2275
2276     return (path->fp_preference);
2277 }
2278
2279 u32
2280 fib_path_get_rpf_id (fib_node_index_t path_index)
2281 {
2282     fib_path_t *path;
2283
2284     path = fib_path_get(path_index);
2285
2286     ASSERT(path);
2287
2288     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2289     {
2290         return (path->deag.fp_rpf_id);
2291     }
2292
2293     return (~0);
2294 }
2295
2296 /**
2297  * @brief Contribute the path's adjacency to the list passed.
2298  * By calling this function over all paths, recursively, a child
2299  * can construct its full set of forwarding adjacencies, and hence its
2300  * uRPF list.
2301  */
2302 void
2303 fib_path_contribute_urpf (fib_node_index_t path_index,
2304                           index_t urpf)
2305 {
2306     fib_path_t *path;
2307
2308     path = fib_path_get(path_index);
2309
2310     /*
2311      * resolved and unresolved paths contribute to the RPF list.
2312      */
2313     switch (path->fp_type)
2314     {
2315     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2316         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2317         break;
2318
2319     case FIB_PATH_TYPE_ATTACHED:
2320         fib_urpf_list_append(urpf, path->attached.fp_interface);
2321         break;
2322
2323     case FIB_PATH_TYPE_RECURSIVE:
2324         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2325             !fib_path_is_looped(path_index))
2326         {
2327             /*
2328              * there's unresolved due to constraints, and there's unresolved
2329              * due to ain't got no via. can't do nowt w'out via.
2330              */
2331             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2332         }
2333         break;
2334
2335     case FIB_PATH_TYPE_EXCLUSIVE:
2336     case FIB_PATH_TYPE_SPECIAL:
2337     {
2338         /*
2339          * these path types may link to an adj, if that's what
2340          * the clinet gave
2341          */
2342         u32 rpf_sw_if_index;
2343
2344         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2345
2346         if (~0 != rpf_sw_if_index)
2347         {
2348             fib_urpf_list_append(urpf, rpf_sw_if_index);
2349         }
2350         break;
2351     }
2352     case FIB_PATH_TYPE_DVR:
2353         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2354         break;
2355     case FIB_PATH_TYPE_UDP_ENCAP:
2356         fib_urpf_list_append(urpf, path->udp_encap.fp_udp_encap_id);
2357         break;
2358     case FIB_PATH_TYPE_DEAG:
2359     case FIB_PATH_TYPE_RECEIVE:
2360     case FIB_PATH_TYPE_INTF_RX:
2361     case FIB_PATH_TYPE_BIER_FMASK:
2362     case FIB_PATH_TYPE_BIER_TABLE:
2363     case FIB_PATH_TYPE_BIER_IMP:
2364         /*
2365          * these path types don't link to an adj
2366          */
2367         break;
2368     }
2369 }
2370
2371 void
2372 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2373                           dpo_proto_t payload_proto,
2374                           fib_mpls_lsp_mode_t mode,
2375                           dpo_id_t *dpo)
2376 {
2377     fib_path_t *path;
2378
2379     path = fib_path_get(path_index);
2380
2381     ASSERT(path);
2382
2383     switch (path->fp_type)
2384     {
2385     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2386     {
2387         dpo_id_t tmp = DPO_INVALID;
2388
2389         dpo_copy(&tmp, dpo);
2390
2391         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2392         dpo_reset(&tmp);
2393         break;
2394     }                
2395     case FIB_PATH_TYPE_DEAG:
2396     {
2397         dpo_id_t tmp = DPO_INVALID;
2398
2399         dpo_copy(&tmp, dpo);
2400
2401         mpls_disp_dpo_create(payload_proto,
2402                              path->deag.fp_rpf_id,
2403                              mode, &tmp, dpo);
2404         dpo_reset(&tmp);
2405         break;
2406     }
2407     case FIB_PATH_TYPE_RECEIVE:
2408     case FIB_PATH_TYPE_ATTACHED:
2409     case FIB_PATH_TYPE_RECURSIVE:
2410     case FIB_PATH_TYPE_INTF_RX:
2411     case FIB_PATH_TYPE_UDP_ENCAP:
2412     case FIB_PATH_TYPE_EXCLUSIVE:
2413     case FIB_PATH_TYPE_SPECIAL:
2414     case FIB_PATH_TYPE_BIER_FMASK:
2415     case FIB_PATH_TYPE_BIER_TABLE:
2416     case FIB_PATH_TYPE_BIER_IMP:
2417     case FIB_PATH_TYPE_DVR:
2418         break;
2419     }
2420
2421     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_POP_PW_CW)
2422     {
2423         dpo_id_t tmp = DPO_INVALID;
2424
2425         dpo_copy(&tmp, dpo);
2426
2427         pw_cw_dpo_create(&tmp, dpo);
2428         dpo_reset(&tmp);
2429     }
2430 }
2431
2432 void
2433 fib_path_contribute_forwarding (fib_node_index_t path_index,
2434                                 fib_forward_chain_type_t fct,
2435                                 dpo_proto_t payload_proto,
2436                                 dpo_id_t *dpo)
2437 {
2438     fib_path_t *path;
2439
2440     path = fib_path_get(path_index);
2441
2442     ASSERT(path);
2443
2444     /*
2445      * The DPO stored in the path was created when the path was resolved.
2446      * This then represents the path's 'native' protocol; IP.
2447      * For all others will need to go find something else.
2448      */
2449     if (fib_path_to_chain_type(path) == fct)
2450     {
2451         dpo_copy(dpo, &path->fp_dpo);
2452     }
2453     else
2454     {
2455         switch (path->fp_type)
2456         {
2457         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2458             switch (fct)
2459             {
2460             case FIB_FORW_CHAIN_TYPE_MPLS_EOS: {
2461                     dpo_id_t tmp = DPO_INVALID;
2462                     dpo_copy (&tmp, dpo);
2463                     path = fib_path_attached_next_hop_get_adj(
2464                            path,
2465                            dpo_proto_to_link(payload_proto),
2466                            &tmp);
2467                     dpo_copy (dpo, &tmp);
2468                     dpo_reset(&tmp);
2469                     break;
2470             }
2471             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2472             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2473             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2474             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2475             case FIB_FORW_CHAIN_TYPE_NSH:
2476             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2477             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2478                 {
2479                     dpo_id_t tmp = DPO_INVALID;
2480                     dpo_copy (&tmp, dpo);
2481                     path = fib_path_attached_next_hop_get_adj(
2482                            path,
2483                            fib_forw_chain_type_to_link_type(fct),
2484                            &tmp);
2485                     dpo_copy (dpo, &tmp);
2486                     dpo_reset(&tmp);
2487                     break;
2488                 }
2489             case FIB_FORW_CHAIN_TYPE_BIER:
2490                 break;
2491             }
2492             break;
2493         case FIB_PATH_TYPE_RECURSIVE:
2494             switch (fct)
2495             {
2496             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2497             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2498             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2499             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2500             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2501             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2502             case FIB_FORW_CHAIN_TYPE_BIER:
2503                 fib_path_recursive_adj_update(path, fct, dpo);
2504                 break;
2505             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2506             case FIB_FORW_CHAIN_TYPE_NSH:
2507                 ASSERT(0);
2508                 break;
2509             }
2510             break;
2511         case FIB_PATH_TYPE_BIER_TABLE:
2512             switch (fct)
2513             {
2514             case FIB_FORW_CHAIN_TYPE_BIER:
2515                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2516                 break;
2517             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2518             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2519             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2520             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2521             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2522             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2523             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2524             case FIB_FORW_CHAIN_TYPE_NSH:
2525                 ASSERT(0);
2526                 break;
2527             }
2528             break;
2529         case FIB_PATH_TYPE_BIER_FMASK:
2530             switch (fct)
2531             {
2532             case FIB_FORW_CHAIN_TYPE_BIER:
2533                 fib_path_bier_fmask_update(path, dpo);
2534                 break;
2535             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2536             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2537             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2538             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2539             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2540             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2541             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2542             case FIB_FORW_CHAIN_TYPE_NSH:
2543                 ASSERT(0);
2544                 break;
2545             }
2546             break;
2547         case FIB_PATH_TYPE_BIER_IMP:
2548             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2549                                            fib_forw_chain_type_to_dpo_proto(fct),
2550                                            dpo);
2551             break;
2552         case FIB_PATH_TYPE_DEAG:
2553             switch (fct)
2554             {
2555             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2556                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2557                                                   DPO_PROTO_MPLS,
2558                                                   LOOKUP_UNICAST,
2559                                                   LOOKUP_INPUT_DST_ADDR,
2560                                                   LOOKUP_TABLE_FROM_CONFIG,
2561                                                   dpo);
2562                 break;
2563             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2564             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2565             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2566             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2567             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2568                 dpo_copy(dpo, &path->fp_dpo);
2569                 break;
2570             case FIB_FORW_CHAIN_TYPE_BIER:
2571                 break;
2572             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2573             case FIB_FORW_CHAIN_TYPE_NSH:
2574                 ASSERT(0);
2575                 break;
2576             }
2577             break;
2578         case FIB_PATH_TYPE_EXCLUSIVE:
2579             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2580             break;
2581         case FIB_PATH_TYPE_ATTACHED:
2582             switch (fct)
2583             {
2584             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2585                 /*
2586                  * End of stack traffic via an attacehd path (a glean)
2587                  * must forace an IP lookup so that the IP packet can
2588                  * match against any installed adj-fibs
2589                  */
2590                 lookup_dpo_add_or_lock_w_fib_index(
2591                     fib_table_get_index_for_sw_if_index(
2592                         dpo_proto_to_fib(payload_proto),
2593                         path->attached.fp_interface),
2594                     payload_proto,
2595                     LOOKUP_UNICAST,
2596                     LOOKUP_INPUT_DST_ADDR,
2597                     LOOKUP_TABLE_FROM_CONFIG,
2598                     dpo);
2599                 break;
2600             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2601             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2602             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2603             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2604             case FIB_FORW_CHAIN_TYPE_NSH:
2605             case FIB_FORW_CHAIN_TYPE_BIER:
2606                 fib_path_attached_get_adj(path,
2607                                           fib_forw_chain_type_to_link_type(fct),
2608                                           dpo);
2609                 break;
2610             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2611             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2612                 {
2613                     adj_index_t ai;
2614
2615                     /*
2616                      * Create the adj needed for sending IP multicast traffic
2617                      */
2618                     if (vnet_sw_interface_is_p2p(vnet_get_main(),
2619                                                  path->attached.fp_interface))
2620                     {
2621                         /*
2622                          * point-2-point interfaces do not require a glean, since
2623                          * there is nothing to ARP. Install a rewrite/nbr adj instead
2624                          */
2625                         ai = adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2626                                                  fib_forw_chain_type_to_link_type(fct),
2627                                                  &zero_addr,
2628                                                  path->attached.fp_interface);
2629                     }
2630                     else
2631                     {
2632                         ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2633                                                    fib_forw_chain_type_to_link_type(fct),
2634                                                    path->attached.fp_interface);
2635                     }
2636                     dpo_set(dpo, DPO_ADJACENCY,
2637                             fib_forw_chain_type_to_dpo_proto(fct),
2638                             ai);
2639                     adj_unlock(ai);
2640                 }
2641                 break;
2642             }
2643             break;
2644         case FIB_PATH_TYPE_INTF_RX:
2645             /*
2646              * Create the adj needed for sending IP multicast traffic
2647              */
2648             interface_rx_dpo_add_or_lock(payload_proto,
2649                                          path->intf_rx.fp_interface,
2650                                          dpo);
2651             break;
2652         case FIB_PATH_TYPE_UDP_ENCAP:
2653             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2654                                             path->fp_nh_proto,
2655                                             dpo);
2656             break;
2657         case FIB_PATH_TYPE_RECEIVE:
2658         case FIB_PATH_TYPE_SPECIAL:
2659         case FIB_PATH_TYPE_DVR:
2660             dpo_copy(dpo, &path->fp_dpo);
2661             break;
2662         }
2663     }
2664 }
2665
2666 load_balance_path_t *
2667 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2668                                        fib_forward_chain_type_t fct,
2669                                        dpo_proto_t payload_proto,
2670                                        load_balance_path_t *hash_key)
2671 {
2672     load_balance_path_t *mnh;
2673     fib_path_t *path;
2674
2675     path = fib_path_get(path_index);
2676
2677     ASSERT(path);
2678
2679     vec_add2(hash_key, mnh, 1);
2680
2681     mnh->path_weight = path->fp_weight;
2682     mnh->path_index = path_index;
2683
2684     if (fib_path_is_resolved(path_index))
2685     {
2686         fib_path_contribute_forwarding(path_index, fct, payload_proto, &mnh->path_dpo);
2687     }
2688     else
2689     {
2690         dpo_copy(&mnh->path_dpo,
2691                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2692     }
2693     return (hash_key);
2694 }
2695
2696 int
2697 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2698 {
2699     fib_path_t *path;
2700
2701     path = fib_path_get(path_index);
2702
2703     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2704             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2705              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2706 }
2707
2708 int
2709 fib_path_is_exclusive (fib_node_index_t path_index)
2710 {
2711     fib_path_t *path;
2712
2713     path = fib_path_get(path_index);
2714
2715     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2716 }
2717
2718 int
2719 fib_path_is_deag (fib_node_index_t path_index)
2720 {
2721     fib_path_t *path;
2722
2723     path = fib_path_get(path_index);
2724
2725     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2726 }
2727
2728 int
2729 fib_path_is_resolved (fib_node_index_t path_index)
2730 {
2731     fib_path_t *path;
2732
2733     path = fib_path_get(path_index);
2734
2735     return (dpo_id_is_valid(&path->fp_dpo) &&
2736             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2737             !fib_path_is_looped(path_index) &&
2738             !fib_path_is_permanent_drop(path));
2739 }
2740
2741 int
2742 fib_path_is_looped (fib_node_index_t path_index)
2743 {
2744     fib_path_t *path;
2745
2746     path = fib_path_get(path_index);
2747
2748     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2749 }
2750
2751 fib_path_list_walk_rc_t
2752 fib_path_encode (fib_node_index_t path_list_index,
2753                  fib_node_index_t path_index,
2754                  const fib_path_ext_t *path_ext,
2755                  void *args)
2756 {
2757     fib_path_encode_ctx_t *ctx = args;
2758     fib_route_path_t *rpath;
2759     fib_path_t *path;
2760
2761     path = fib_path_get(path_index);
2762     if (!path)
2763       return (FIB_PATH_LIST_WALK_CONTINUE);
2764
2765     vec_add2(ctx->rpaths, rpath, 1);
2766     rpath->frp_weight = path->fp_weight;
2767     rpath->frp_preference = path->fp_preference;
2768     rpath->frp_proto = path->fp_nh_proto;
2769     rpath->frp_sw_if_index = ~0;
2770     rpath->frp_fib_index = 0;
2771
2772     switch (path->fp_type)
2773     {
2774       case FIB_PATH_TYPE_RECEIVE:
2775         rpath->frp_addr = path->receive.fp_addr;
2776         rpath->frp_sw_if_index = path->receive.fp_interface;
2777         rpath->frp_flags |= FIB_ROUTE_PATH_LOCAL;
2778         break;
2779       case FIB_PATH_TYPE_ATTACHED:
2780         rpath->frp_sw_if_index = path->attached.fp_interface;
2781         break;
2782       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2783         rpath->frp_sw_if_index = path->attached_next_hop.fp_interface;
2784         rpath->frp_addr = path->attached_next_hop.fp_nh;
2785         break;
2786       case FIB_PATH_TYPE_BIER_FMASK:
2787         rpath->frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2788         break;
2789       case FIB_PATH_TYPE_SPECIAL:
2790         break;
2791       case FIB_PATH_TYPE_DEAG:
2792         rpath->frp_fib_index = path->deag.fp_tbl_id;
2793         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
2794         {
2795             rpath->frp_flags |= FIB_ROUTE_PATH_RPF_ID;
2796         }
2797         break;
2798       case FIB_PATH_TYPE_RECURSIVE:
2799         rpath->frp_addr = path->recursive.fp_nh.fp_ip;
2800         rpath->frp_fib_index = path->recursive.fp_tbl_id;
2801         break;
2802       case FIB_PATH_TYPE_DVR:
2803           rpath->frp_sw_if_index = path->dvr.fp_interface;
2804           rpath->frp_flags |= FIB_ROUTE_PATH_DVR;
2805           break;
2806       case FIB_PATH_TYPE_UDP_ENCAP:
2807           rpath->frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2808           rpath->frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2809           break;
2810       case FIB_PATH_TYPE_INTF_RX:
2811           rpath->frp_sw_if_index = path->receive.fp_interface;
2812           rpath->frp_flags |= FIB_ROUTE_PATH_INTF_RX;
2813           break;
2814       case FIB_PATH_TYPE_EXCLUSIVE:
2815         rpath->frp_flags |= FIB_ROUTE_PATH_EXCLUSIVE;
2816       default:
2817         break;
2818     }
2819
2820     if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS) 
2821     {
2822         rpath->frp_label_stack = path_ext->fpe_path.frp_label_stack;
2823     }
2824
2825     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP)
2826         rpath->frp_flags |= FIB_ROUTE_PATH_DROP;
2827     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2828         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_UNREACH;
2829     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2830         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_PROHIBIT;
2831
2832     return (FIB_PATH_LIST_WALK_CONTINUE);
2833 }
2834
2835 dpo_proto_t
2836 fib_path_get_proto (fib_node_index_t path_index)
2837 {
2838     fib_path_t *path;
2839
2840     path = fib_path_get(path_index);
2841
2842     return (path->fp_nh_proto);
2843 }
2844
2845 void
2846 fib_path_module_init (void)
2847 {
2848     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2849     fib_path_logger = vlib_log_register_class ("fib", "path");
2850 }
2851
2852 static clib_error_t *
2853 show_fib_path_command (vlib_main_t * vm,
2854                         unformat_input_t * input,
2855                         vlib_cli_command_t * cmd)
2856 {
2857     fib_node_index_t pi;
2858     fib_path_t *path;
2859
2860     if (unformat (input, "%d", &pi))
2861     {
2862         /*
2863          * show one in detail
2864          */
2865         if (!pool_is_free_index(fib_path_pool, pi))
2866         {
2867             path = fib_path_get(pi);
2868             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2869                            FIB_PATH_FORMAT_FLAGS_NONE);
2870             s = format(s, "\n  children:");
2871             s = fib_node_children_format(path->fp_node.fn_children, s);
2872             vlib_cli_output (vm, "%v", s);
2873             vec_free(s);
2874         }
2875         else
2876         {
2877             vlib_cli_output (vm, "path %d invalid", pi);
2878         }
2879     }
2880     else
2881     {
2882         vlib_cli_output (vm, "FIB Paths");
2883         pool_foreach_index (pi, fib_path_pool)
2884          {
2885             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2886                              FIB_PATH_FORMAT_FLAGS_NONE);
2887         }
2888     }
2889
2890     return (NULL);
2891 }
2892
2893 VLIB_CLI_COMMAND (show_fib_path, static) = {
2894   .path = "show fib paths",
2895   .function = show_fib_path_command,
2896   .short_help = "show fib paths",
2897 };