2cee8467647c6d81896d387b5c623843dc8188f1
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/ip_null_dpo.h>
28 #include <vnet/dpo/classify_dpo.h>
29 #include <vnet/dpo/pw_cw.h>
30
31 #include <vnet/adj/adj.h>
32 #include <vnet/adj/adj_mcast.h>
33
34 #include <vnet/fib/fib_path.h>
35 #include <vnet/fib/fib_node.h>
36 #include <vnet/fib/fib_table.h>
37 #include <vnet/fib/fib_entry.h>
38 #include <vnet/fib/fib_path_list.h>
39 #include <vnet/fib/fib_internal.h>
40 #include <vnet/fib/fib_urpf_list.h>
41 #include <vnet/fib/mpls_fib.h>
42 #include <vnet/fib/fib_path_ext.h>
43 #include <vnet/udp/udp_encap.h>
44 #include <vnet/bier/bier_fmask.h>
45 #include <vnet/bier/bier_table.h>
46 #include <vnet/bier/bier_imp.h>
47 #include <vnet/bier/bier_disp_table.h>
48
49 /**
50  * Enurmeration of path types
51  */
52 typedef enum fib_path_type_t_ {
53     /**
54      * Marker. Add new types after this one.
55      */
56     FIB_PATH_TYPE_FIRST = 0,
57     /**
58      * Attached-nexthop. An interface and a nexthop are known.
59      */
60     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
61     /**
62      * attached. Only the interface is known.
63      */
64     FIB_PATH_TYPE_ATTACHED,
65     /**
66      * recursive. Only the next-hop is known.
67      */
68     FIB_PATH_TYPE_RECURSIVE,
69     /**
70      * special. nothing is known. so we drop.
71      */
72     FIB_PATH_TYPE_SPECIAL,
73     /**
74      * exclusive. user provided adj.
75      */
76     FIB_PATH_TYPE_EXCLUSIVE,
77     /**
78      * deag. Link to a lookup adj in the next table
79      */
80     FIB_PATH_TYPE_DEAG,
81     /**
82      * interface receive.
83      */
84     FIB_PATH_TYPE_INTF_RX,
85     /**
86      * Path resolves via a UDP encap object.
87      */
88     FIB_PATH_TYPE_UDP_ENCAP,
89     /**
90      * receive. it's for-us.
91      */
92     FIB_PATH_TYPE_RECEIVE,
93     /**
94      * bier-imp. it's via a BIER imposition.
95      */
96     FIB_PATH_TYPE_BIER_IMP,
97     /**
98      * bier-fmask. it's via a BIER ECMP-table.
99      */
100     FIB_PATH_TYPE_BIER_TABLE,
101     /**
102      * bier-fmask. it's via a BIER f-mask.
103      */
104     FIB_PATH_TYPE_BIER_FMASK,
105     /**
106      * via a DVR.
107      */
108     FIB_PATH_TYPE_DVR,
109 } __attribute__ ((packed)) fib_path_type_t;
110
111 #define FIB_PATH_TYPES {                                        \
112     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
113     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
114     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
115     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
116     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
117     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
118     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
119     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
120     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
121     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
122     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
123     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
124     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
125 }
126
127 /**
128  * Enurmeration of path operational (i.e. derived) attributes
129  */
130 typedef enum fib_path_oper_attribute_t_ {
131     /**
132      * Marker. Add new types after this one.
133      */
134     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
135     /**
136      * The path forms part of a recursive loop.
137      */
138     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
139     /**
140      * The path is resolved
141      */
142     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
143     /**
144      * The path is attached, despite what the next-hop may say.
145      */
146     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
147     /**
148      * The path has become a permanent drop.
149      */
150     FIB_PATH_OPER_ATTRIBUTE_DROP,
151     /**
152      * Marker. Add new types before this one, then update it.
153      */
154     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
155 } __attribute__ ((packed)) fib_path_oper_attribute_t;
156
157 /**
158  * The maximum number of path operational attributes
159  */
160 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
161
162 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
163     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
164     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
165     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
166 }
167
168 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
169     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
170          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
171          _item++)
172
173 /**
174  * Path flags from the attributes
175  */
176 typedef enum fib_path_oper_flags_t_ {
177     FIB_PATH_OPER_FLAG_NONE = 0,
178     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
179     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
180     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
181     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
182 } __attribute__ ((packed)) fib_path_oper_flags_t;
183
184 /**
185  * A FIB path
186  */
187 typedef struct fib_path_t_ {
188     /**
189      * A path is a node in the FIB graph.
190      */
191     fib_node_t fp_node;
192
193     /**
194      * The index of the path-list to which this path belongs
195      */
196     u32 fp_pl_index;
197
198     /**
199      * This marks the start of the memory area used to hash
200      * the path
201      */
202     STRUCT_MARK(path_hash_start);
203
204     /**
205      * Configuration Flags
206      */
207     fib_path_cfg_flags_t fp_cfg_flags;
208
209     /**
210      * The type of the path. This is the selector for the union
211      */
212     fib_path_type_t fp_type;
213
214     /**
215      * The protocol of the next-hop, i.e. the address family of the
216      * next-hop's address. We can't derive this from the address itself
217      * since the address can be all zeros
218      */
219     dpo_proto_t fp_nh_proto;
220
221     /**
222      * UCMP [unnormalised] weigth
223      */
224     u8 fp_weight;
225
226     /**
227      * A path preference. 0 is the best.
228      * Only paths of the best preference, that are 'up', are considered
229      * for forwarding.
230      */
231     u8 fp_preference;
232
233     /**
234      * per-type union of the data required to resolve the path
235      */
236     union {
237         struct {
238             /**
239              * The next-hop
240              */
241             ip46_address_t fp_nh;
242             /**
243              * The interface
244              */
245             u32 fp_interface;
246         } attached_next_hop;
247         struct {
248             /**
249              * The interface
250              */
251             u32 fp_interface;
252         } attached;
253         struct {
254             union
255             {
256                 /**
257                  * The next-hop
258                  */
259                 ip46_address_t fp_ip;
260                 struct {
261                     /**
262                      * The local label to resolve through.
263                      */
264                     mpls_label_t fp_local_label;
265                     /**
266                      * The EOS bit of the resolving label
267                      */
268                     mpls_eos_bit_t fp_eos;
269                 };
270             } fp_nh;
271             union {
272                 /**
273                  * The FIB table index in which to find the next-hop.
274                  */
275                 fib_node_index_t fp_tbl_id;
276                 /**
277                  * The BIER FIB the fmask is in
278                  */
279                 index_t fp_bier_fib;
280             };
281         } recursive;
282         struct {
283             /**
284              * BIER FMask ID
285              */
286             index_t fp_bier_fmask;
287         } bier_fmask;
288         struct {
289             /**
290              * The BIER table's ID
291              */
292             bier_table_id_t fp_bier_tbl;
293         } bier_table;
294         struct {
295             /**
296              * The BIER imposition object
297              * this is part of the path's key, since the index_t
298              * of an imposition object is the object's key.
299              */
300             index_t fp_bier_imp;
301         } bier_imp;
302         struct {
303             /**
304              * The FIB index in which to perfom the next lookup
305              */
306             fib_node_index_t fp_tbl_id;
307             /**
308              * The RPF-ID to tag the packets with
309              */
310             fib_rpf_id_t fp_rpf_id;
311         } deag;
312         struct {
313         } special;
314         struct {
315             /**
316              * The user provided 'exclusive' DPO
317              */
318             dpo_id_t fp_ex_dpo;
319         } exclusive;
320         struct {
321             /**
322              * The interface on which the local address is configured
323              */
324             u32 fp_interface;
325             /**
326              * The next-hop
327              */
328             ip46_address_t fp_addr;
329         } receive;
330         struct {
331             /**
332              * The interface on which the packets will be input.
333              */
334             u32 fp_interface;
335         } intf_rx;
336         struct {
337             /**
338              * The UDP Encap object this path resolves through
339              */
340             u32 fp_udp_encap_id;
341         } udp_encap;
342         struct {
343             /**
344              * The UDP Encap object this path resolves through
345              */
346             u32 fp_classify_table_id;
347         } classify;
348         struct {
349             /**
350              * The interface
351              */
352             u32 fp_interface;
353         } dvr;
354     };
355     STRUCT_MARK(path_hash_end);
356
357     /**
358      * Memebers in this last section represent information that is
359      * dervied during resolution. It should not be copied to new paths
360      * nor compared.
361      */
362
363     /**
364      * Operational Flags
365      */
366     fib_path_oper_flags_t fp_oper_flags;
367
368     union {
369         /**
370          * the resolving via fib. not part of the union, since it it not part
371          * of the path's hash.
372          */
373         fib_node_index_t fp_via_fib;
374         /**
375          * the resolving bier-table
376          */
377         index_t fp_via_bier_tbl;
378         /**
379          * the resolving bier-fmask
380          */
381         index_t fp_via_bier_fmask;
382     };
383
384     /**
385      * The Data-path objects through which this path resolves for IP.
386      */
387     dpo_id_t fp_dpo;
388
389     /**
390      * the index of this path in the parent's child list.
391      */
392     u32 fp_sibling;
393 } fib_path_t;
394
395 /*
396  * Array of strings/names for the path types and attributes
397  */
398 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
399 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
400 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
401
402 /*
403  * The memory pool from which we allocate all the paths
404  */
405 static fib_path_t *fib_path_pool;
406
407 /**
408  * the logger
409  */
410 vlib_log_class_t fib_path_logger;
411
412 /*
413  * Debug macro
414  */
415 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
416 {                                                                       \
417     vlib_log_debug (fib_path_logger,                                    \
418                     "[%U]: " _fmt,                                      \
419                     format_fib_path, fib_path_get_index(_p), 0,         \
420                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
421                     ##_args);                                           \
422 }
423
424 static fib_path_t *
425 fib_path_get (fib_node_index_t index)
426 {
427     return (pool_elt_at_index(fib_path_pool, index));
428 }
429
430 static fib_node_index_t 
431 fib_path_get_index (fib_path_t *path)
432 {
433     return (path - fib_path_pool);
434 }
435
436 static fib_node_t *
437 fib_path_get_node (fib_node_index_t index)
438 {
439     return ((fib_node_t*)fib_path_get(index));
440 }
441
442 static fib_path_t*
443 fib_path_from_fib_node (fib_node_t *node)
444 {
445     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
446     return ((fib_path_t*)node);
447 }
448
449 u8 *
450 format_fib_path (u8 * s, va_list * args)
451 {
452     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
453     u32 indent = va_arg (*args, u32);
454     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
455     vnet_main_t * vnm = vnet_get_main();
456     fib_path_oper_attribute_t oattr;
457     fib_path_cfg_attribute_t cattr;
458     fib_path_t *path;
459     const char *eol;
460
461     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
462     {
463         eol = "";
464     }
465     else
466     {
467         eol = "\n";
468     }
469
470     path = fib_path_get(path_index);
471
472     s = format (s, "%Upath:[%d] ", format_white_space, indent,
473                 fib_path_get_index(path));
474     s = format (s, "pl-index:%d ", path->fp_pl_index);
475     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
476     s = format (s, "weight=%d ", path->fp_weight);
477     s = format (s, "pref=%d ", path->fp_preference);
478     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
479     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
480         s = format(s, " oper-flags:");
481         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
482             if ((1<<oattr) & path->fp_oper_flags) {
483                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
484             }
485         }
486     }
487     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
488         s = format(s, " cfg-flags:");
489         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
490             if ((1<<cattr) & path->fp_cfg_flags) {
491                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
492             }
493         }
494     }
495     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
496         s = format(s, "\n%U", format_white_space, indent+2);
497
498     switch (path->fp_type)
499     {
500     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
501         s = format (s, "%U", format_ip46_address,
502                     &path->attached_next_hop.fp_nh,
503                     IP46_TYPE_ANY);
504         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
505         {
506             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
507         }
508         else
509         {
510             s = format (s, " %U",
511                         format_vnet_sw_interface_name,
512                         vnm,
513                         vnet_get_sw_interface(
514                             vnm,
515                             path->attached_next_hop.fp_interface));
516             if (vnet_sw_interface_is_p2p(vnet_get_main(),
517                                          path->attached_next_hop.fp_interface))
518             {
519                 s = format (s, " (p2p)");
520             }
521         }
522         if (!dpo_id_is_valid(&path->fp_dpo))
523         {
524             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
525         }
526         else
527         {
528             s = format(s, "%s%U%U", eol,
529                        format_white_space, indent,
530                        format_dpo_id,
531                        &path->fp_dpo, 13);
532         }
533         break;
534     case FIB_PATH_TYPE_ATTACHED:
535         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
536         {
537             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
538         }
539         else
540         {
541             s = format (s, " %U",
542                         format_vnet_sw_interface_name,
543                         vnm,
544                         vnet_get_sw_interface(
545                             vnm,
546                             path->attached.fp_interface));
547         }
548         break;
549     case FIB_PATH_TYPE_RECURSIVE:
550         if (DPO_PROTO_MPLS == path->fp_nh_proto)
551         {
552             s = format (s, "via %U %U",
553                         format_mpls_unicast_label,
554                         path->recursive.fp_nh.fp_local_label,
555                         format_mpls_eos_bit,
556                         path->recursive.fp_nh.fp_eos);
557         }
558         else
559         {
560             s = format (s, "via %U",
561                         format_ip46_address,
562                         &path->recursive.fp_nh.fp_ip,
563                         IP46_TYPE_ANY);
564         }
565         s = format (s, " in fib:%d",
566                     path->recursive.fp_tbl_id,
567                     path->fp_via_fib); 
568         s = format (s, " via-fib:%d", path->fp_via_fib); 
569         s = format (s, " via-dpo:[%U:%d]",
570                     format_dpo_type, path->fp_dpo.dpoi_type, 
571                     path->fp_dpo.dpoi_index);
572
573         break;
574     case FIB_PATH_TYPE_UDP_ENCAP:
575         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
576         break;
577     case FIB_PATH_TYPE_BIER_TABLE:
578         s = format (s, "via bier-table:[%U}",
579                     format_bier_table_id,
580                     &path->bier_table.fp_bier_tbl);
581         s = format (s, " via-dpo:[%U:%d]",
582                     format_dpo_type, path->fp_dpo.dpoi_type,
583                     path->fp_dpo.dpoi_index);
584         break;
585     case FIB_PATH_TYPE_BIER_FMASK:
586         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
587         s = format (s, " via-dpo:[%U:%d]",
588                     format_dpo_type, path->fp_dpo.dpoi_type, 
589                     path->fp_dpo.dpoi_index);
590         break;
591     case FIB_PATH_TYPE_BIER_IMP:
592         s = format (s, "via %U", format_bier_imp,
593                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
594         break;
595     case FIB_PATH_TYPE_DVR:
596         s = format (s, " %U",
597                     format_vnet_sw_interface_name,
598                     vnm,
599                     vnet_get_sw_interface(
600                         vnm,
601                         path->dvr.fp_interface));
602         break;
603     case FIB_PATH_TYPE_DEAG:
604         s = format (s, " %sfib-index:%d",
605                     (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?  "m" : ""),
606                     path->deag.fp_tbl_id);
607         break;
608     case FIB_PATH_TYPE_RECEIVE:
609     case FIB_PATH_TYPE_INTF_RX:
610     case FIB_PATH_TYPE_SPECIAL:
611     case FIB_PATH_TYPE_EXCLUSIVE:
612         if (dpo_id_is_valid(&path->fp_dpo))
613         {
614             s = format(s, "%U", format_dpo_id,
615                        &path->fp_dpo, indent+2);
616         }
617         break;
618     }
619     return (s);
620 }
621
622 /*
623  * fib_path_last_lock_gone
624  *
625  * We don't share paths, we share path lists, so the [un]lock functions
626  * are no-ops
627  */
628 static void
629 fib_path_last_lock_gone (fib_node_t *node)
630 {
631     ASSERT(0);
632 }
633
634 static fib_path_t*
635 fib_path_attached_next_hop_get_adj (fib_path_t *path,
636                                     vnet_link_t link,
637                                     dpo_id_t *dpo)
638 {
639     fib_node_index_t fib_path_index;
640     fib_protocol_t nh_proto;
641     adj_index_t ai;
642
643     fib_path_index = fib_path_get_index(path);
644     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
645
646     if (vnet_sw_interface_is_p2p(vnet_get_main(),
647                                  path->attached_next_hop.fp_interface))
648     {
649         /*
650          * if the interface is p2p then the adj for the specific
651          * neighbour on that link will never exist. on p2p links
652          * the subnet address (the attached route) links to the
653          * auto-adj (see below), we want that adj here too.
654          */
655         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
656                                  path->attached_next_hop.fp_interface);
657     }
658     else
659     {
660         ai = adj_nbr_add_or_lock(nh_proto, link,
661                                  &path->attached_next_hop.fp_nh,
662                                  path->attached_next_hop.fp_interface);
663     }
664
665     dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
666     adj_unlock(ai);
667
668     return (fib_path_get(fib_path_index));
669 }
670
671 static void
672 fib_path_attached_next_hop_set (fib_path_t *path)
673 {
674     /*
675      * resolve directly via the adjacency discribed by the
676      * interface and next-hop
677      */
678     path = fib_path_attached_next_hop_get_adj(path,
679                                               dpo_proto_to_link(path->fp_nh_proto),
680                                               &path->fp_dpo);
681
682     ASSERT(dpo_is_adj(&path->fp_dpo));
683
684     /*
685      * become a child of the adjacency so we receive updates
686      * when its rewrite changes
687      */
688     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
689                                      FIB_NODE_TYPE_PATH,
690                                      fib_path_get_index(path));
691
692     if (!vnet_sw_interface_is_up(vnet_get_main(),
693                                  path->attached_next_hop.fp_interface) ||
694         !adj_is_up(path->fp_dpo.dpoi_index))
695     {
696         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
697     }
698 }
699
700 static void
701 fib_path_attached_get_adj (fib_path_t *path,
702                            vnet_link_t link,
703                            dpo_id_t *dpo)
704 {
705     fib_protocol_t nh_proto;
706
707     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
708
709     if (vnet_sw_interface_is_p2p(vnet_get_main(),
710                                  path->attached.fp_interface))
711     {
712         /*
713          * point-2-point interfaces do not require a glean, since
714          * there is nothing to ARP. Install a rewrite/nbr adj instead
715          */
716         adj_index_t ai;
717
718         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
719                                  path->attached.fp_interface);
720
721         dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
722         adj_unlock(ai);
723     }
724     else if (vnet_sw_interface_is_nbma(vnet_get_main(),
725                                        path->attached.fp_interface))
726     {
727         dpo_copy(dpo, drop_dpo_get(path->fp_nh_proto));
728     }
729     else
730     {
731         adj_index_t ai;
732
733         ai = adj_glean_add_or_lock(nh_proto, link,
734                                    path->attached.fp_interface,
735                                    NULL);
736         dpo_set(dpo, DPO_ADJACENCY_GLEAN, vnet_link_to_dpo_proto(link), ai);
737         adj_unlock(ai);
738     }
739 }
740
741 /*
742  * create of update the paths recursive adj
743  */
744 static void
745 fib_path_recursive_adj_update (fib_path_t *path,
746                                fib_forward_chain_type_t fct,
747                                dpo_id_t *dpo)
748 {
749     dpo_id_t via_dpo = DPO_INVALID;
750
751     /*
752      * get the DPO to resolve through from the via-entry
753      */
754     fib_entry_contribute_forwarding(path->fp_via_fib,
755                                     fct,
756                                     &via_dpo);
757
758
759     /*
760      * hope for the best - clear if restrictions apply.
761      */
762     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
763
764     /*
765      * Validate any recursion constraints and over-ride the via
766      * adj if not met
767      */
768     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
769     {
770         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
771         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
772     }
773     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
774     {
775         /*
776          * the via FIB must be a host route.
777          * note the via FIB just added will always be a host route
778          * since it is an RR source added host route. So what we need to
779          * check is whether the route has other sources. If it does then
780          * some other source has added it as a host route. If it doesn't
781          * then it was added only here and inherits forwarding from a cover.
782          * the cover is not a host route.
783          * The RR source is the lowest priority source, so we check if it
784          * is the best. if it is there are no other sources.
785          */
786         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
787         {
788             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
789             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
790
791             /*
792              * PIC edge trigger. let the load-balance maps know
793              */
794             load_balance_map_path_state_change(fib_path_get_index(path));
795         }
796     }
797     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
798     {
799         /*
800          * RR source entries inherit the flags from the cover, so
801          * we can check the via directly
802          */
803         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
804         {
805             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
806             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
807
808             /*
809              * PIC edge trigger. let the load-balance maps know
810              */
811             load_balance_map_path_state_change(fib_path_get_index(path));
812         }
813     }
814     /*
815      * check for over-riding factors on the FIB entry itself
816      */
817     if (!fib_entry_is_resolved(path->fp_via_fib))
818     {
819         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
820         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
821
822         /*
823          * PIC edge trigger. let the load-balance maps know
824          */
825         load_balance_map_path_state_change(fib_path_get_index(path));
826     }
827
828     /*
829      * If this path is contributing a drop, then it's not resolved
830      */
831     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
832     {
833         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
834     }
835
836     /*
837      * update the path's contributed DPO
838      */
839     dpo_copy(dpo, &via_dpo);
840
841     FIB_PATH_DBG(path, "recursive update:");
842
843     dpo_reset(&via_dpo);
844 }
845
846 /*
847  * re-evaulate the forwarding state for a via fmask path
848  */
849 static void
850 fib_path_bier_fmask_update (fib_path_t *path,
851                             dpo_id_t *dpo)
852 {
853     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
854
855     /*
856      * if we are stakcing on the drop, then the path is not resolved
857      */
858     if (dpo_is_drop(dpo))
859     {
860         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
861     }
862     else
863     {
864         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
865     }
866 }
867
868 /*
869  * fib_path_is_permanent_drop
870  *
871  * Return !0 if the path is configured to permanently drop,
872  * despite other attributes.
873  */
874 static int
875 fib_path_is_permanent_drop (fib_path_t *path)
876 {
877     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
878             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
879 }
880
881 /*
882  * fib_path_unresolve
883  *
884  * Remove our dependency on the resolution target
885  */
886 static void
887 fib_path_unresolve (fib_path_t *path)
888 {
889     /*
890      * the forced drop path does not need unresolving
891      */
892     if (fib_path_is_permanent_drop(path))
893     {
894         return;
895     }
896
897     switch (path->fp_type)
898     {
899     case FIB_PATH_TYPE_RECURSIVE:
900         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
901         {
902             fib_entry_child_remove(path->fp_via_fib,
903                                    path->fp_sibling);
904             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
905                                            fib_entry_get_prefix(path->fp_via_fib),
906                                            FIB_SOURCE_RR);
907             fib_table_unlock(path->recursive.fp_tbl_id,
908                              dpo_proto_to_fib(path->fp_nh_proto),
909                              FIB_SOURCE_RR);
910             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
911         }
912         break;
913     case FIB_PATH_TYPE_BIER_FMASK:
914         bier_fmask_child_remove(path->fp_via_bier_fmask,
915                                 path->fp_sibling);
916         break;
917     case FIB_PATH_TYPE_BIER_IMP:
918         bier_imp_unlock(path->fp_dpo.dpoi_index);
919         break;
920     case FIB_PATH_TYPE_BIER_TABLE:
921         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
922         break;
923     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
924     case FIB_PATH_TYPE_ATTACHED:
925         if (dpo_is_adj(&path->fp_dpo))
926             adj_child_remove(path->fp_dpo.dpoi_index,
927                              path->fp_sibling);
928         break;
929     case FIB_PATH_TYPE_UDP_ENCAP:
930         udp_encap_unlock(path->fp_dpo.dpoi_index);
931         break;
932     case FIB_PATH_TYPE_EXCLUSIVE:
933         dpo_reset(&path->exclusive.fp_ex_dpo);
934         break;
935     case FIB_PATH_TYPE_SPECIAL:
936     case FIB_PATH_TYPE_RECEIVE:
937     case FIB_PATH_TYPE_INTF_RX:
938     case FIB_PATH_TYPE_DEAG:
939     case FIB_PATH_TYPE_DVR:
940         /*
941          * these hold only the path's DPO, which is reset below.
942          */
943         break;
944     }
945
946     /*
947      * release the adj we were holding and pick up the
948      * drop just in case.
949      */
950     dpo_reset(&path->fp_dpo);
951     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
952
953     return;
954 }
955
956 static fib_forward_chain_type_t
957 fib_path_to_chain_type (const fib_path_t *path)
958 {
959     if (DPO_PROTO_MPLS == path->fp_nh_proto)
960     {
961         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
962             MPLS_EOS == path->recursive.fp_nh.fp_eos)
963         {
964             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
965         }
966         else
967         {
968             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
969         }
970     }
971     else
972     {
973         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
974     }
975 }
976
977 /*
978  * fib_path_back_walk_notify
979  *
980  * A back walk has reach this path.
981  */
982 static fib_node_back_walk_rc_t
983 fib_path_back_walk_notify (fib_node_t *node,
984                            fib_node_back_walk_ctx_t *ctx)
985 {
986     fib_path_t *path;
987
988     path = fib_path_from_fib_node(node);
989
990     FIB_PATH_DBG(path, "bw:%U",
991                  format_fib_node_bw_reason, ctx->fnbw_reason);
992
993     switch (path->fp_type)
994     {
995     case FIB_PATH_TYPE_RECURSIVE:
996         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
997         {
998             /*
999              * modify the recursive adjacency to use the new forwarding
1000              * of the via-fib.
1001              * this update is visible to packets in flight in the DP.
1002              */
1003             fib_path_recursive_adj_update(
1004                 path,
1005                 fib_path_to_chain_type(path),
1006                 &path->fp_dpo);
1007         }
1008         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1009             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1010         {
1011             /*
1012              * ADJ updates (complete<->incomplete) do not need to propagate to
1013              * recursive entries.
1014              * The only reason its needed as far back as here, is that the adj
1015              * and the incomplete adj are a different DPO type, so the LBs need
1016              * to re-stack.
1017              * If this walk was quashed in the fib_entry, then any non-fib_path
1018              * children (like tunnels that collapse out the LB when they stack)
1019              * would not see the update.
1020              */
1021             return (FIB_NODE_BACK_WALK_CONTINUE);
1022         }
1023         break;
1024     case FIB_PATH_TYPE_BIER_FMASK:
1025         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1026         {
1027             /*
1028              * update to use the BIER fmask's new forwading
1029              */
1030             fib_path_bier_fmask_update(path, &path->fp_dpo);
1031         }
1032         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1033             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1034         {
1035             /*
1036              * ADJ updates (complete<->incomplete) do not need to propagate to
1037              * recursive entries.
1038              * The only reason its needed as far back as here, is that the adj
1039              * and the incomplete adj are a different DPO type, so the LBs need
1040              * to re-stack.
1041              * If this walk was quashed in the fib_entry, then any non-fib_path
1042              * children (like tunnels that collapse out the LB when they stack)
1043              * would not see the update.
1044              */
1045             return (FIB_NODE_BACK_WALK_CONTINUE);
1046         }
1047         break;
1048     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1049         /*
1050 FIXME comment
1051          * ADJ_UPDATE backwalk pass silently through here and up to
1052          * the path-list when the multipath adj collapse occurs.
1053          * The reason we do this is that the assumtption is that VPP
1054          * runs in an environment where the Control-Plane is remote
1055          * and hence reacts slowly to link up down. In order to remove
1056          * this down link from the ECMP set quickly, we back-walk.
1057          * VPP also has dedicated CPUs, so we are not stealing resources
1058          * from the CP to do so.
1059          */
1060         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1061         {
1062             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1063             {
1064                 /*
1065                  * alreday resolved. no need to walk back again
1066                  */
1067                 return (FIB_NODE_BACK_WALK_CONTINUE);
1068             }
1069             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1070         }
1071         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1072         {
1073             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1074             {
1075                 /*
1076                  * alreday unresolved. no need to walk back again
1077                  */
1078                 return (FIB_NODE_BACK_WALK_CONTINUE);
1079             }
1080             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1081         }
1082         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1083         {
1084             /*
1085              * The interface this path resolves through has been deleted.
1086              * This will leave the path in a permanent drop state. The route
1087              * needs to be removed and readded (and hence the path-list deleted)
1088              * before it can forward again.
1089              */
1090             fib_path_unresolve(path);
1091             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1092         }
1093         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1094         {
1095             /*
1096              * restack the DPO to pick up the correct DPO sub-type
1097              */
1098             uword if_is_up;
1099
1100             if_is_up = vnet_sw_interface_is_up(
1101                            vnet_get_main(),
1102                            path->attached_next_hop.fp_interface);
1103
1104             path = fib_path_attached_next_hop_get_adj(
1105                 path,
1106                 dpo_proto_to_link(path->fp_nh_proto),
1107                 &path->fp_dpo);
1108
1109             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1110             if (if_is_up && adj_is_up(path->fp_dpo.dpoi_index))
1111             {
1112                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1113             }
1114
1115             if (!if_is_up)
1116             {
1117                 /*
1118                  * If the interface is not up there is no reason to walk
1119                  * back to children. if we did they would only evalute
1120                  * that this path is unresolved and hence it would
1121                  * not contribute the adjacency - so it would be wasted
1122                  * CPU time.
1123                  */
1124                 return (FIB_NODE_BACK_WALK_CONTINUE);
1125             }
1126         }
1127         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1128         {
1129             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1130             {
1131                 /*
1132                  * alreday unresolved. no need to walk back again
1133                  */
1134                 return (FIB_NODE_BACK_WALK_CONTINUE);
1135             }
1136             /*
1137              * the adj has gone down. the path is no longer resolved.
1138              */
1139             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1140         }
1141         break;
1142     case FIB_PATH_TYPE_ATTACHED:
1143     case FIB_PATH_TYPE_DVR:
1144         /*
1145          * FIXME; this could schedule a lower priority walk, since attached
1146          * routes are not usually in ECMP configurations so the backwalk to
1147          * the FIB entry does not need to be high priority
1148          */
1149         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1150         {
1151             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1152         }
1153         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1154         {
1155             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1156         }
1157         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1158         {
1159             fib_path_unresolve(path);
1160             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1161         }
1162         break;
1163     case FIB_PATH_TYPE_UDP_ENCAP:
1164     {
1165         dpo_id_t via_dpo = DPO_INVALID;
1166
1167         /*
1168          * hope for the best - clear if restrictions apply.
1169          */
1170         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1171
1172         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1173                                         path->fp_nh_proto,
1174                                         &via_dpo);
1175         /*
1176          * If this path is contributing a drop, then it's not resolved
1177          */
1178         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1179         {
1180             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1181         }
1182
1183         /*
1184          * update the path's contributed DPO
1185          */
1186         dpo_copy(&path->fp_dpo, &via_dpo);
1187         dpo_reset(&via_dpo);
1188         break;
1189     }
1190     case FIB_PATH_TYPE_INTF_RX:
1191         ASSERT(0);
1192     case FIB_PATH_TYPE_DEAG:
1193         /*
1194          * FIXME When VRF delete is allowed this will need a poke.
1195          */
1196     case FIB_PATH_TYPE_SPECIAL:
1197     case FIB_PATH_TYPE_RECEIVE:
1198     case FIB_PATH_TYPE_EXCLUSIVE:
1199     case FIB_PATH_TYPE_BIER_TABLE:
1200     case FIB_PATH_TYPE_BIER_IMP:
1201         /*
1202          * these path types have no parents. so to be
1203          * walked from one is unexpected.
1204          */
1205         ASSERT(0);
1206         break;
1207     }
1208
1209     /*
1210      * propagate the backwalk further to the path-list
1211      */
1212     fib_path_list_back_walk(path->fp_pl_index, ctx);
1213
1214     return (FIB_NODE_BACK_WALK_CONTINUE);
1215 }
1216
1217 static void
1218 fib_path_memory_show (void)
1219 {
1220     fib_show_memory_usage("Path",
1221                           pool_elts(fib_path_pool),
1222                           pool_len(fib_path_pool),
1223                           sizeof(fib_path_t));
1224 }
1225
1226 /*
1227  * The FIB path's graph node virtual function table
1228  */
1229 static const fib_node_vft_t fib_path_vft = {
1230     .fnv_get = fib_path_get_node,
1231     .fnv_last_lock = fib_path_last_lock_gone,
1232     .fnv_back_walk = fib_path_back_walk_notify,
1233     .fnv_mem_show = fib_path_memory_show,
1234 };
1235
1236 static fib_path_cfg_flags_t
1237 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1238 {
1239     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1240
1241     if (rpath->frp_flags & FIB_ROUTE_PATH_POP_PW_CW)
1242         cfg_flags |= FIB_PATH_CFG_FLAG_POP_PW_CW;
1243     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1244         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1245     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1246         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1247     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1248         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1249     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1250         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1251     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1252         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1253     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1254         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1255     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1256         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1257     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1258         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1259     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1260         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1261     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_UNREACH)
1262         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_UNREACH;
1263     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_PROHIBIT)
1264         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_PROHIBIT;
1265
1266     return (cfg_flags);
1267 }
1268
1269 /*
1270  * fib_path_create
1271  *
1272  * Create and initialise a new path object.
1273  * return the index of the path.
1274  */
1275 fib_node_index_t
1276 fib_path_create (fib_node_index_t pl_index,
1277                  const fib_route_path_t *rpath)
1278 {
1279     fib_path_t *path;
1280
1281     pool_get(fib_path_pool, path);
1282     clib_memset(path, 0, sizeof(*path));
1283
1284     fib_node_init(&path->fp_node,
1285                   FIB_NODE_TYPE_PATH);
1286
1287     dpo_reset(&path->fp_dpo);
1288     path->fp_pl_index = pl_index;
1289     path->fp_nh_proto = rpath->frp_proto;
1290     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1291     path->fp_weight = rpath->frp_weight;
1292     if (0 == path->fp_weight)
1293     {
1294         /*
1295          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1296          * clients to always use 1, or we can accept it and fixup approrpiately.
1297          */
1298         path->fp_weight = 1;
1299     }
1300     path->fp_preference = rpath->frp_preference;
1301     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1302
1303     /*
1304      * deduce the path's tpye from the parementers and save what is needed.
1305      */
1306     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1307     {
1308         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1309         path->receive.fp_interface = rpath->frp_sw_if_index;
1310         path->receive.fp_addr = rpath->frp_addr;
1311     }
1312     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1313     {
1314         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1315         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1316     }
1317     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1318     {
1319         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1320         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1321     }
1322     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1323     {
1324         path->fp_type = FIB_PATH_TYPE_DEAG;
1325         path->deag.fp_tbl_id = rpath->frp_fib_index;
1326         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1327     }
1328     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1329     {
1330         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1331         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1332     }
1333     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1334     {
1335         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1336         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1337     }
1338     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1339     {
1340         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1341         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1342     }
1343     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1344     {
1345         path->fp_type = FIB_PATH_TYPE_DEAG;
1346         path->deag.fp_tbl_id = rpath->frp_fib_index;
1347     }
1348     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1349     {
1350         path->fp_type = FIB_PATH_TYPE_DVR;
1351         path->dvr.fp_interface = rpath->frp_sw_if_index;
1352     }
1353     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1354     {
1355         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1356         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1357     }
1358     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT) ||
1359         (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH))
1360     {
1361         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1362     }
1363     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY))
1364     {
1365         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1366         path->classify.fp_classify_table_id = rpath->frp_classify_table_id;
1367     }
1368     else if (~0 != rpath->frp_sw_if_index)
1369     {
1370         if (ip46_address_is_zero(&rpath->frp_addr))
1371         {
1372             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1373             path->attached.fp_interface = rpath->frp_sw_if_index;
1374         }
1375         else
1376         {
1377             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1378             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1379             path->attached_next_hop.fp_nh = rpath->frp_addr;
1380         }
1381     }
1382     else
1383     {
1384         if (ip46_address_is_zero(&rpath->frp_addr))
1385         {
1386             if (~0 == rpath->frp_fib_index)
1387             {
1388                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1389             }
1390             else
1391             {
1392                 path->fp_type = FIB_PATH_TYPE_DEAG;
1393                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1394                 path->deag.fp_rpf_id = ~0;
1395             }
1396         }
1397         else
1398         {
1399             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1400             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1401             {
1402                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1403                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1404             }
1405             else
1406             {
1407                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1408             }
1409             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1410         }
1411     }
1412
1413     FIB_PATH_DBG(path, "create");
1414
1415     return (fib_path_get_index(path));
1416 }
1417
1418 /*
1419  * fib_path_create_special
1420  *
1421  * Create and initialise a new path object.
1422  * return the index of the path.
1423  */
1424 fib_node_index_t
1425 fib_path_create_special (fib_node_index_t pl_index,
1426                          dpo_proto_t nh_proto,
1427                          fib_path_cfg_flags_t flags,
1428                          const dpo_id_t *dpo)
1429 {
1430     fib_path_t *path;
1431
1432     pool_get(fib_path_pool, path);
1433     clib_memset(path, 0, sizeof(*path));
1434
1435     fib_node_init(&path->fp_node,
1436                   FIB_NODE_TYPE_PATH);
1437     dpo_reset(&path->fp_dpo);
1438
1439     path->fp_pl_index = pl_index;
1440     path->fp_weight = 1;
1441     path->fp_preference = 0;
1442     path->fp_nh_proto = nh_proto;
1443     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1444     path->fp_cfg_flags = flags;
1445
1446     if (FIB_PATH_CFG_FLAG_DROP & flags)
1447     {
1448         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1449     }
1450     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1451     {
1452         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1453         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1454     }
1455     else
1456     {
1457         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1458         ASSERT(NULL != dpo);
1459         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1460     }
1461
1462     return (fib_path_get_index(path));
1463 }
1464
1465 /*
1466  * fib_path_copy
1467  *
1468  * Copy a path. return index of new path.
1469  */
1470 fib_node_index_t
1471 fib_path_copy (fib_node_index_t path_index,
1472                fib_node_index_t path_list_index)
1473 {
1474     fib_path_t *path, *orig_path;
1475
1476     pool_get(fib_path_pool, path);
1477
1478     orig_path = fib_path_get(path_index);
1479     ASSERT(NULL != orig_path);
1480
1481     clib_memcpy(path, orig_path, sizeof(*path));
1482
1483     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1484
1485     /*
1486      * reset the dynamic section
1487      */
1488     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1489     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1490     path->fp_pl_index  = path_list_index;
1491     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1492     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1493     dpo_reset(&path->fp_dpo);
1494
1495     return (fib_path_get_index(path));
1496 }
1497
1498 /*
1499  * fib_path_destroy
1500  *
1501  * destroy a path that is no longer required
1502  */
1503 void
1504 fib_path_destroy (fib_node_index_t path_index)
1505 {
1506     fib_path_t *path;
1507
1508     path = fib_path_get(path_index);
1509
1510     ASSERT(NULL != path);
1511     FIB_PATH_DBG(path, "destroy");
1512
1513     fib_path_unresolve(path);
1514
1515     fib_node_deinit(&path->fp_node);
1516     pool_put(fib_path_pool, path);
1517 }
1518
1519 /*
1520  * fib_path_destroy
1521  *
1522  * destroy a path that is no longer required
1523  */
1524 uword
1525 fib_path_hash (fib_node_index_t path_index)
1526 {
1527     fib_path_t *path;
1528
1529     path = fib_path_get(path_index);
1530
1531     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1532                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1533                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1534                         0));
1535 }
1536
1537 /*
1538  * fib_path_cmp_i
1539  *
1540  * Compare two paths for equivalence.
1541  */
1542 static int
1543 fib_path_cmp_i (const fib_path_t *path1,
1544                 const fib_path_t *path2)
1545 {
1546     int res;
1547
1548     res = 1;
1549
1550     /*
1551      * paths of different types and protocol are not equal.
1552      * different weights and/or preference only are the same path.
1553      */
1554     if (path1->fp_type != path2->fp_type)
1555     {
1556         res = (path1->fp_type - path2->fp_type);
1557     }
1558     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1559     {
1560         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1561     }
1562     else
1563     {
1564         /*
1565          * both paths are of the same type.
1566          * consider each type and its attributes in turn.
1567          */
1568         switch (path1->fp_type)
1569         {
1570         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1571             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1572                                    &path2->attached_next_hop.fp_nh);
1573             if (0 == res) {
1574                 res = (path1->attached_next_hop.fp_interface -
1575                        path2->attached_next_hop.fp_interface);
1576             }
1577             break;
1578         case FIB_PATH_TYPE_ATTACHED:
1579             res = (path1->attached.fp_interface -
1580                    path2->attached.fp_interface);
1581             break;
1582         case FIB_PATH_TYPE_RECURSIVE:
1583             res = ip46_address_cmp(&path1->recursive.fp_nh.fp_ip,
1584                                    &path2->recursive.fp_nh.fp_ip);
1585  
1586             if (0 == res)
1587             {
1588                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1589             }
1590             break;
1591         case FIB_PATH_TYPE_BIER_FMASK:
1592             res = (path1->bier_fmask.fp_bier_fmask -
1593                    path2->bier_fmask.fp_bier_fmask);
1594             break;
1595         case FIB_PATH_TYPE_BIER_IMP:
1596             res = (path1->bier_imp.fp_bier_imp -
1597                    path2->bier_imp.fp_bier_imp);
1598             break;
1599         case FIB_PATH_TYPE_BIER_TABLE:
1600             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1601                                     &path2->bier_table.fp_bier_tbl);
1602             break;
1603         case FIB_PATH_TYPE_DEAG:
1604             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1605             if (0 == res)
1606             {
1607                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1608             }
1609             break;
1610         case FIB_PATH_TYPE_INTF_RX:
1611             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1612             break;
1613         case FIB_PATH_TYPE_UDP_ENCAP:
1614             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1615             break;
1616         case FIB_PATH_TYPE_DVR:
1617             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1618             break;
1619         case FIB_PATH_TYPE_EXCLUSIVE:
1620             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1621             break;
1622         case FIB_PATH_TYPE_SPECIAL:
1623         case FIB_PATH_TYPE_RECEIVE:
1624             res = 0;
1625             break;
1626         }
1627     }
1628     return (res);
1629 }
1630
1631 /*
1632  * fib_path_cmp_for_sort
1633  *
1634  * Compare two paths for equivalence. Used during path sorting.
1635  * As usual 0 means equal.
1636  */
1637 int
1638 fib_path_cmp_for_sort (void * v1,
1639                        void * v2)
1640 {
1641     fib_node_index_t *pi1 = v1, *pi2 = v2;
1642     fib_path_t *path1, *path2;
1643
1644     path1 = fib_path_get(*pi1);
1645     path2 = fib_path_get(*pi2);
1646
1647     /*
1648      * when sorting paths we want the highest preference paths
1649      * first, so that the choices set built is in prefernce order
1650      */
1651     if (path1->fp_preference != path2->fp_preference)
1652     {
1653         return (path1->fp_preference - path2->fp_preference);
1654     }
1655
1656     return (fib_path_cmp_i(path1, path2));
1657 }
1658
1659 /*
1660  * fib_path_cmp
1661  *
1662  * Compare two paths for equivalence.
1663  */
1664 int
1665 fib_path_cmp (fib_node_index_t pi1,
1666               fib_node_index_t pi2)
1667 {
1668     fib_path_t *path1, *path2;
1669
1670     path1 = fib_path_get(pi1);
1671     path2 = fib_path_get(pi2);
1672
1673     return (fib_path_cmp_i(path1, path2));
1674 }
1675
1676 int
1677 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1678                            const fib_route_path_t *rpath)
1679 {
1680     fib_path_t *path;
1681     int res;
1682
1683     path = fib_path_get(path_index);
1684
1685     res = 1;
1686
1687     if (path->fp_weight != rpath->frp_weight)
1688     {
1689         res = (path->fp_weight - rpath->frp_weight);
1690     }
1691     else
1692     {
1693         /*
1694          * both paths are of the same type.
1695          * consider each type and its attributes in turn.
1696          */
1697         switch (path->fp_type)
1698         {
1699         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1700             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1701                                    &rpath->frp_addr);
1702             if (0 == res)
1703             {
1704                 res = (path->attached_next_hop.fp_interface -
1705                        rpath->frp_sw_if_index);
1706             }
1707             break;
1708         case FIB_PATH_TYPE_ATTACHED:
1709             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1710             break;
1711         case FIB_PATH_TYPE_RECURSIVE:
1712             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1713             {
1714                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1715
1716                 if (res == 0)
1717                 {
1718                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1719                 }
1720             }
1721             else
1722             {
1723                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1724                                        &rpath->frp_addr);
1725             }
1726
1727             if (0 == res)
1728             {
1729                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1730             }
1731             break;
1732         case FIB_PATH_TYPE_BIER_FMASK:
1733             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1734             break;
1735         case FIB_PATH_TYPE_BIER_IMP:
1736             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1737             break;
1738         case FIB_PATH_TYPE_BIER_TABLE:
1739             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1740                                     &rpath->frp_bier_tbl);
1741             break;
1742         case FIB_PATH_TYPE_INTF_RX:
1743             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1744             break;
1745         case FIB_PATH_TYPE_UDP_ENCAP:
1746             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1747             break;
1748         case FIB_PATH_TYPE_DEAG:
1749             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1750             if (0 == res)
1751             {
1752                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1753             }
1754             break;
1755         case FIB_PATH_TYPE_DVR:
1756             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1757             break;
1758         case FIB_PATH_TYPE_EXCLUSIVE:
1759             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1760             break;
1761         case FIB_PATH_TYPE_RECEIVE:
1762             if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1763             {
1764                 res = 0;
1765             }
1766             else
1767             {
1768                 res = 1;
1769             }
1770             break;
1771         case FIB_PATH_TYPE_SPECIAL:
1772             res = 0;
1773             break;
1774         }
1775     }
1776     return (res);
1777 }
1778
1779 /*
1780  * fib_path_recursive_loop_detect
1781  *
1782  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1783  * walk is initiated when an entry is linking to a new path list or from an old.
1784  * The entry vector passed contains all the FIB entrys that are children of this
1785  * path (it is all the entries encountered on the walk so far). If this vector
1786  * contains the entry this path resolve via, then a loop is about to form.
1787  * The loop must be allowed to form, since we need the dependencies in place
1788  * so that we can track when the loop breaks.
1789  * However, we MUST not produce a loop in the forwarding graph (else packets
1790  * would loop around the switch path until the loop breaks), so we mark recursive
1791  * paths as looped so that they do not contribute forwarding information.
1792  * By marking the path as looped, an etry such as;
1793  *    X/Y
1794  *     via a.a.a.a (looped)
1795  *     via b.b.b.b (not looped)
1796  * can still forward using the info provided by b.b.b.b only
1797  */
1798 int
1799 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1800                                 fib_node_index_t **entry_indicies)
1801 {
1802     fib_path_t *path;
1803
1804     path = fib_path_get(path_index);
1805
1806     /*
1807      * the forced drop path is never looped, cos it is never resolved.
1808      */
1809     if (fib_path_is_permanent_drop(path))
1810     {
1811         return (0);
1812     }
1813
1814     switch (path->fp_type)
1815     {
1816     case FIB_PATH_TYPE_RECURSIVE:
1817     {
1818         fib_node_index_t *entry_index, *entries;
1819         int looped = 0;
1820         entries = *entry_indicies;
1821
1822         vec_foreach(entry_index, entries) {
1823             if (*entry_index == path->fp_via_fib)
1824             {
1825                 /*
1826                  * the entry that is about to link to this path-list (or
1827                  * one of this path-list's children) is the same entry that
1828                  * this recursive path resolves through. this is a cycle.
1829                  * abort the walk.
1830                  */
1831                 looped = 1;
1832                 break;
1833             }
1834         }
1835
1836         if (looped)
1837         {
1838             FIB_PATH_DBG(path, "recursive loop formed");
1839             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1840
1841             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1842         }
1843         else
1844         {
1845             /*
1846              * no loop here yet. keep forward walking the graph.
1847              */
1848             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1849             {
1850                 FIB_PATH_DBG(path, "recursive loop formed");
1851                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1852             }
1853             else
1854             {
1855                 FIB_PATH_DBG(path, "recursive loop cleared");
1856                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1857             }
1858         }
1859         break;
1860     }
1861     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1862     case FIB_PATH_TYPE_ATTACHED:
1863         if (dpo_is_adj(&path->fp_dpo) &&
1864             adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1865                                       entry_indicies))
1866         {
1867             FIB_PATH_DBG(path, "recursive loop formed");
1868             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1869         }
1870         else
1871         {
1872             FIB_PATH_DBG(path, "recursive loop cleared");
1873             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1874         }
1875         break;
1876     case FIB_PATH_TYPE_SPECIAL:
1877     case FIB_PATH_TYPE_DEAG:
1878     case FIB_PATH_TYPE_DVR:
1879     case FIB_PATH_TYPE_RECEIVE:
1880     case FIB_PATH_TYPE_INTF_RX:
1881     case FIB_PATH_TYPE_UDP_ENCAP:
1882     case FIB_PATH_TYPE_EXCLUSIVE:
1883     case FIB_PATH_TYPE_BIER_FMASK:
1884     case FIB_PATH_TYPE_BIER_TABLE:
1885     case FIB_PATH_TYPE_BIER_IMP:
1886         /*
1887          * these path types cannot be part of a loop, since they are the leaves
1888          * of the graph.
1889          */
1890         break;
1891     }
1892
1893     return (fib_path_is_looped(path_index));
1894 }
1895
1896 int
1897 fib_path_resolve (fib_node_index_t path_index)
1898 {
1899     fib_path_t *path;
1900
1901     path = fib_path_get(path_index);
1902
1903     /*
1904      * hope for the best.
1905      */
1906     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1907
1908     /*
1909      * the forced drop path resolves via the drop adj
1910      */
1911     if (fib_path_is_permanent_drop(path))
1912     {
1913         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1914         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1915         return (fib_path_is_resolved(path_index));
1916     }
1917
1918     switch (path->fp_type)
1919     {
1920     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1921         fib_path_attached_next_hop_set(path);
1922         break;
1923     case FIB_PATH_TYPE_ATTACHED:
1924     {
1925         dpo_id_t tmp = DPO_INVALID;
1926
1927         /*
1928          * path->attached.fp_interface
1929          */
1930         if (!vnet_sw_interface_is_up(vnet_get_main(),
1931                                      path->attached.fp_interface))
1932         {
1933             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1934         }
1935         fib_path_attached_get_adj(path,
1936                                   dpo_proto_to_link(path->fp_nh_proto),
1937                                   &tmp);
1938
1939         /*
1940          * re-fetch after possible mem realloc
1941          */
1942         path = fib_path_get(path_index);
1943         dpo_copy(&path->fp_dpo, &tmp);
1944
1945         /*
1946          * become a child of the adjacency so we receive updates
1947          * when the interface state changes
1948          */
1949         if (dpo_is_adj(&path->fp_dpo))
1950         {
1951             path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1952                                              FIB_NODE_TYPE_PATH,
1953                                              fib_path_get_index(path));
1954         }
1955         dpo_reset(&tmp);
1956         break;
1957     }
1958     case FIB_PATH_TYPE_RECURSIVE:
1959     {
1960         /*
1961          * Create a RR source entry in the table for the address
1962          * that this path recurses through.
1963          * This resolve action is recursive, hence we may create
1964          * more paths in the process. more creates mean maybe realloc
1965          * of this path.
1966          */
1967         fib_node_index_t fei;
1968         fib_prefix_t pfx;
1969
1970         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1971
1972         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1973         {
1974             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1975                                        path->recursive.fp_nh.fp_eos,
1976                                        &pfx);
1977         }
1978         else
1979         {
1980             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1981         }
1982
1983         fib_table_lock(path->recursive.fp_tbl_id,
1984                        dpo_proto_to_fib(path->fp_nh_proto),
1985                        FIB_SOURCE_RR);
1986         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1987                                           &pfx,
1988                                           FIB_SOURCE_RR,
1989                                           FIB_ENTRY_FLAG_NONE);
1990
1991         path = fib_path_get(path_index);
1992         path->fp_via_fib = fei;
1993
1994         /*
1995          * become a dependent child of the entry so the path is 
1996          * informed when the forwarding for the entry changes.
1997          */
1998         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1999                                                FIB_NODE_TYPE_PATH,
2000                                                fib_path_get_index(path));
2001
2002         /*
2003          * create and configure the IP DPO
2004          */
2005         fib_path_recursive_adj_update(
2006             path,
2007             fib_path_to_chain_type(path),
2008             &path->fp_dpo);
2009
2010         break;
2011     }
2012     case FIB_PATH_TYPE_BIER_FMASK:
2013     {
2014         /*
2015          * become a dependent child of the entry so the path is
2016          * informed when the forwarding for the entry changes.
2017          */
2018         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
2019                                                 FIB_NODE_TYPE_PATH,
2020                                                 fib_path_get_index(path));
2021
2022         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
2023         fib_path_bier_fmask_update(path, &path->fp_dpo);
2024
2025         break;
2026     }
2027     case FIB_PATH_TYPE_BIER_IMP:
2028         bier_imp_lock(path->bier_imp.fp_bier_imp);
2029         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2030                                        DPO_PROTO_IP4,
2031                                        &path->fp_dpo);
2032         break;
2033     case FIB_PATH_TYPE_BIER_TABLE:
2034     {
2035         /*
2036          * Find/create the BIER table to link to
2037          */
2038         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
2039
2040         path->fp_via_bier_tbl =
2041             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
2042
2043         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
2044                                          &path->fp_dpo);
2045         break;
2046     }
2047     case FIB_PATH_TYPE_SPECIAL:
2048         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2049         {
2050             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2051                                       IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
2052                                       &path->fp_dpo);
2053         }
2054         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2055         {
2056             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2057                                       IP_NULL_ACTION_SEND_ICMP_UNREACH,
2058                                       &path->fp_dpo);
2059         }
2060         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY)
2061         {
2062             dpo_set (&path->fp_dpo, DPO_CLASSIFY,
2063                      path->fp_nh_proto,
2064                      classify_dpo_create (path->fp_nh_proto,
2065                                           path->classify.fp_classify_table_id));
2066         }
2067         else
2068         {
2069             /*
2070              * Resolve via the drop
2071              */
2072             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
2073         }
2074         break;
2075     case FIB_PATH_TYPE_DEAG:
2076     {
2077         if (DPO_PROTO_BIER == path->fp_nh_proto)
2078         {
2079             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2080                                                   &path->fp_dpo);
2081         }
2082         else
2083         {
2084             /*
2085              * Resolve via a lookup DPO.
2086              * FIXME. control plane should add routes with a table ID
2087              */
2088             lookup_input_t input;
2089             lookup_cast_t cast;
2090
2091             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2092                     LOOKUP_MULTICAST :
2093                     LOOKUP_UNICAST);
2094             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2095                      LOOKUP_INPUT_SRC_ADDR :
2096                      LOOKUP_INPUT_DST_ADDR);
2097
2098             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2099                                                path->fp_nh_proto,
2100                                                cast,
2101                                                input,
2102                                                LOOKUP_TABLE_FROM_CONFIG,
2103                                                &path->fp_dpo);
2104         }
2105         break;
2106     }
2107     case FIB_PATH_TYPE_DVR:
2108         dvr_dpo_add_or_lock(path->attached.fp_interface,
2109                             path->fp_nh_proto,
2110                             &path->fp_dpo);
2111         break;
2112     case FIB_PATH_TYPE_RECEIVE:
2113         /*
2114          * Resolve via a receive DPO.
2115          */
2116         receive_dpo_add_or_lock(path->fp_nh_proto,
2117                                 path->receive.fp_interface,
2118                                 &path->receive.fp_addr,
2119                                 &path->fp_dpo);
2120         break;
2121     case FIB_PATH_TYPE_UDP_ENCAP:
2122         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2123         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2124                                         path->fp_nh_proto,
2125                                         &path->fp_dpo);
2126         break;
2127     case FIB_PATH_TYPE_INTF_RX: {
2128         /*
2129          * Resolve via a receive DPO.
2130          */
2131         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2132                                      path->intf_rx.fp_interface,
2133                                      &path->fp_dpo);
2134         break;
2135     }
2136     case FIB_PATH_TYPE_EXCLUSIVE:
2137         /*
2138          * Resolve via the user provided DPO
2139          */
2140         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2141         break;
2142     }
2143
2144     return (fib_path_is_resolved(path_index));
2145 }
2146
2147 u32
2148 fib_path_get_resolving_interface (fib_node_index_t path_index)
2149 {
2150     fib_path_t *path;
2151
2152     path = fib_path_get(path_index);
2153
2154     switch (path->fp_type)
2155     {
2156     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2157         return (path->attached_next_hop.fp_interface);
2158     case FIB_PATH_TYPE_ATTACHED:
2159         return (path->attached.fp_interface);
2160     case FIB_PATH_TYPE_RECEIVE:
2161         return (path->receive.fp_interface);
2162     case FIB_PATH_TYPE_RECURSIVE:
2163         if (fib_path_is_resolved(path_index))
2164         {
2165             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2166         }
2167         break;
2168     case FIB_PATH_TYPE_DVR:
2169         return (path->dvr.fp_interface);
2170     case FIB_PATH_TYPE_INTF_RX:
2171     case FIB_PATH_TYPE_UDP_ENCAP:
2172     case FIB_PATH_TYPE_SPECIAL:
2173     case FIB_PATH_TYPE_DEAG:
2174     case FIB_PATH_TYPE_EXCLUSIVE:
2175     case FIB_PATH_TYPE_BIER_FMASK:
2176     case FIB_PATH_TYPE_BIER_TABLE:
2177     case FIB_PATH_TYPE_BIER_IMP:
2178         break;
2179     }
2180     return (dpo_get_urpf(&path->fp_dpo));
2181 }
2182
2183 index_t
2184 fib_path_get_resolving_index (fib_node_index_t path_index)
2185 {
2186     fib_path_t *path;
2187
2188     path = fib_path_get(path_index);
2189
2190     switch (path->fp_type)
2191     {
2192     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2193     case FIB_PATH_TYPE_ATTACHED:
2194     case FIB_PATH_TYPE_RECEIVE:
2195     case FIB_PATH_TYPE_INTF_RX:
2196     case FIB_PATH_TYPE_SPECIAL:
2197     case FIB_PATH_TYPE_DEAG:
2198     case FIB_PATH_TYPE_DVR:
2199     case FIB_PATH_TYPE_EXCLUSIVE:
2200         break;
2201     case FIB_PATH_TYPE_UDP_ENCAP:
2202         return (path->udp_encap.fp_udp_encap_id);
2203     case FIB_PATH_TYPE_RECURSIVE:
2204         return (path->fp_via_fib);
2205     case FIB_PATH_TYPE_BIER_FMASK:
2206         return (path->bier_fmask.fp_bier_fmask);
2207    case FIB_PATH_TYPE_BIER_TABLE:
2208        return (path->fp_via_bier_tbl);
2209    case FIB_PATH_TYPE_BIER_IMP:
2210        return (path->bier_imp.fp_bier_imp);
2211     }
2212     return (~0);
2213 }
2214
2215 adj_index_t
2216 fib_path_get_adj (fib_node_index_t path_index)
2217 {
2218     fib_path_t *path;
2219
2220     path = fib_path_get(path_index);
2221
2222     if (dpo_is_adj(&path->fp_dpo))
2223     {
2224         return (path->fp_dpo.dpoi_index);
2225     }
2226     return (ADJ_INDEX_INVALID);
2227 }
2228
2229 u16
2230 fib_path_get_weight (fib_node_index_t path_index)
2231 {
2232     fib_path_t *path;
2233
2234     path = fib_path_get(path_index);
2235
2236     ASSERT(path);
2237
2238     return (path->fp_weight);
2239 }
2240
2241 u16
2242 fib_path_get_preference (fib_node_index_t path_index)
2243 {
2244     fib_path_t *path;
2245
2246     path = fib_path_get(path_index);
2247
2248     ASSERT(path);
2249
2250     return (path->fp_preference);
2251 }
2252
2253 u32
2254 fib_path_get_rpf_id (fib_node_index_t path_index)
2255 {
2256     fib_path_t *path;
2257
2258     path = fib_path_get(path_index);
2259
2260     ASSERT(path);
2261
2262     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2263     {
2264         return (path->deag.fp_rpf_id);
2265     }
2266
2267     return (~0);
2268 }
2269
2270 /**
2271  * @brief Contribute the path's adjacency to the list passed.
2272  * By calling this function over all paths, recursively, a child
2273  * can construct its full set of forwarding adjacencies, and hence its
2274  * uRPF list.
2275  */
2276 void
2277 fib_path_contribute_urpf (fib_node_index_t path_index,
2278                           index_t urpf)
2279 {
2280     fib_path_t *path;
2281
2282     path = fib_path_get(path_index);
2283
2284     /*
2285      * resolved and unresolved paths contribute to the RPF list.
2286      */
2287     switch (path->fp_type)
2288     {
2289     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2290         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2291         break;
2292
2293     case FIB_PATH_TYPE_ATTACHED:
2294         fib_urpf_list_append(urpf, path->attached.fp_interface);
2295         break;
2296
2297     case FIB_PATH_TYPE_RECURSIVE:
2298         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2299             !fib_path_is_looped(path_index))
2300         {
2301             /*
2302              * there's unresolved due to constraints, and there's unresolved
2303              * due to ain't got no via. can't do nowt w'out via.
2304              */
2305             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2306         }
2307         break;
2308
2309     case FIB_PATH_TYPE_EXCLUSIVE:
2310     case FIB_PATH_TYPE_SPECIAL:
2311     {
2312         /*
2313          * these path types may link to an adj, if that's what
2314          * the clinet gave
2315          */
2316         u32 rpf_sw_if_index;
2317
2318         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2319
2320         if (~0 != rpf_sw_if_index)
2321         {
2322             fib_urpf_list_append(urpf, rpf_sw_if_index);
2323         }
2324         break;
2325     }
2326     case FIB_PATH_TYPE_DVR:
2327         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2328         break;
2329     case FIB_PATH_TYPE_DEAG:
2330     case FIB_PATH_TYPE_RECEIVE:
2331     case FIB_PATH_TYPE_INTF_RX:
2332     case FIB_PATH_TYPE_UDP_ENCAP:
2333     case FIB_PATH_TYPE_BIER_FMASK:
2334     case FIB_PATH_TYPE_BIER_TABLE:
2335     case FIB_PATH_TYPE_BIER_IMP:
2336         /*
2337          * these path types don't link to an adj
2338          */
2339         break;
2340     }
2341 }
2342
2343 void
2344 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2345                           dpo_proto_t payload_proto,
2346                           fib_mpls_lsp_mode_t mode,
2347                           dpo_id_t *dpo)
2348 {
2349     fib_path_t *path;
2350
2351     path = fib_path_get(path_index);
2352
2353     ASSERT(path);
2354
2355     switch (path->fp_type)
2356     {
2357     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2358     {
2359         dpo_id_t tmp = DPO_INVALID;
2360
2361         dpo_copy(&tmp, dpo);
2362
2363         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2364         dpo_reset(&tmp);
2365         break;
2366     }                
2367     case FIB_PATH_TYPE_DEAG:
2368     {
2369         dpo_id_t tmp = DPO_INVALID;
2370
2371         dpo_copy(&tmp, dpo);
2372
2373         mpls_disp_dpo_create(payload_proto,
2374                              path->deag.fp_rpf_id,
2375                              mode, &tmp, dpo);
2376         dpo_reset(&tmp);
2377         break;
2378     }
2379     case FIB_PATH_TYPE_RECEIVE:
2380     case FIB_PATH_TYPE_ATTACHED:
2381     case FIB_PATH_TYPE_RECURSIVE:
2382     case FIB_PATH_TYPE_INTF_RX:
2383     case FIB_PATH_TYPE_UDP_ENCAP:
2384     case FIB_PATH_TYPE_EXCLUSIVE:
2385     case FIB_PATH_TYPE_SPECIAL:
2386     case FIB_PATH_TYPE_BIER_FMASK:
2387     case FIB_PATH_TYPE_BIER_TABLE:
2388     case FIB_PATH_TYPE_BIER_IMP:
2389     case FIB_PATH_TYPE_DVR:
2390         break;
2391     }
2392
2393     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_POP_PW_CW)
2394     {
2395         dpo_id_t tmp = DPO_INVALID;
2396
2397         dpo_copy(&tmp, dpo);
2398
2399         pw_cw_dpo_create(&tmp, dpo);
2400         dpo_reset(&tmp);
2401     }
2402 }
2403
2404 void
2405 fib_path_contribute_forwarding (fib_node_index_t path_index,
2406                                 fib_forward_chain_type_t fct,
2407                                 dpo_id_t *dpo)
2408 {
2409     fib_path_t *path;
2410
2411     path = fib_path_get(path_index);
2412
2413     ASSERT(path);
2414     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2415
2416     /*
2417      * The DPO stored in the path was created when the path was resolved.
2418      * This then represents the path's 'native' protocol; IP.
2419      * For all others will need to go find something else.
2420      */
2421     if (fib_path_to_chain_type(path) == fct)
2422     {
2423         dpo_copy(dpo, &path->fp_dpo);
2424     }
2425     else
2426     {
2427         switch (path->fp_type)
2428         {
2429         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2430             switch (fct)
2431             {
2432             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2433             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2434             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2435             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2436             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2437             case FIB_FORW_CHAIN_TYPE_NSH:
2438             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2439             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2440                 path = fib_path_attached_next_hop_get_adj(
2441                     path,
2442                     fib_forw_chain_type_to_link_type(fct),
2443                     dpo);
2444                 break;
2445             case FIB_FORW_CHAIN_TYPE_BIER:
2446                 break;
2447             }
2448             break;
2449         case FIB_PATH_TYPE_RECURSIVE:
2450             switch (fct)
2451             {
2452             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2453             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2454             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2455             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2456             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2457             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2458             case FIB_FORW_CHAIN_TYPE_BIER:
2459                 fib_path_recursive_adj_update(path, fct, dpo);
2460                 break;
2461             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2462             case FIB_FORW_CHAIN_TYPE_NSH:
2463                 ASSERT(0);
2464                 break;
2465             }
2466             break;
2467         case FIB_PATH_TYPE_BIER_TABLE:
2468             switch (fct)
2469             {
2470             case FIB_FORW_CHAIN_TYPE_BIER:
2471                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2472                 break;
2473             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2474             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2475             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2476             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2477             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2478             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2479             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2480             case FIB_FORW_CHAIN_TYPE_NSH:
2481                 ASSERT(0);
2482                 break;
2483             }
2484             break;
2485         case FIB_PATH_TYPE_BIER_FMASK:
2486             switch (fct)
2487             {
2488             case FIB_FORW_CHAIN_TYPE_BIER:
2489                 fib_path_bier_fmask_update(path, dpo);
2490                 break;
2491             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2492             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2493             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2494             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2495             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2496             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2497             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2498             case FIB_FORW_CHAIN_TYPE_NSH:
2499                 ASSERT(0);
2500                 break;
2501             }
2502             break;
2503         case FIB_PATH_TYPE_BIER_IMP:
2504             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2505                                            fib_forw_chain_type_to_dpo_proto(fct),
2506                                            dpo);
2507             break;
2508         case FIB_PATH_TYPE_DEAG:
2509             switch (fct)
2510             {
2511             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2512                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2513                                                   DPO_PROTO_MPLS,
2514                                                   LOOKUP_UNICAST,
2515                                                   LOOKUP_INPUT_DST_ADDR,
2516                                                   LOOKUP_TABLE_FROM_CONFIG,
2517                                                   dpo);
2518                 break;
2519             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2520             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2521             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2522             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2523             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2524                 dpo_copy(dpo, &path->fp_dpo);
2525                 break;
2526             case FIB_FORW_CHAIN_TYPE_BIER:
2527                 break;
2528             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2529             case FIB_FORW_CHAIN_TYPE_NSH:
2530                 ASSERT(0);
2531                 break;
2532             }
2533             break;
2534         case FIB_PATH_TYPE_EXCLUSIVE:
2535             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2536             break;
2537         case FIB_PATH_TYPE_ATTACHED:
2538             switch (fct)
2539             {
2540             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2541             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2542             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2543             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2544             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2545             case FIB_FORW_CHAIN_TYPE_NSH:
2546             case FIB_FORW_CHAIN_TYPE_BIER:
2547                 fib_path_attached_get_adj(path,
2548                                           fib_forw_chain_type_to_link_type(fct),
2549                                           dpo);
2550                 break;
2551             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2552             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2553                 {
2554                     adj_index_t ai;
2555
2556                     /*
2557                      * Create the adj needed for sending IP multicast traffic
2558                      */
2559                     if (vnet_sw_interface_is_p2p(vnet_get_main(),
2560                                                  path->attached.fp_interface))
2561                     {
2562                         /*
2563                          * point-2-point interfaces do not require a glean, since
2564                          * there is nothing to ARP. Install a rewrite/nbr adj instead
2565                          */
2566                         ai = adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2567                                                  fib_forw_chain_type_to_link_type(fct),
2568                                                  &zero_addr,
2569                                                  path->attached.fp_interface);
2570                     }
2571                     else
2572                     {
2573                         ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2574                                                    fib_forw_chain_type_to_link_type(fct),
2575                                                    path->attached.fp_interface);
2576                     }
2577                     dpo_set(dpo, DPO_ADJACENCY,
2578                             fib_forw_chain_type_to_dpo_proto(fct),
2579                             ai);
2580                     adj_unlock(ai);
2581                 }
2582                 break;
2583             }
2584             break;
2585         case FIB_PATH_TYPE_INTF_RX:
2586             /*
2587              * Create the adj needed for sending IP multicast traffic
2588              */
2589             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2590                                          path->attached.fp_interface,
2591                                          dpo);
2592             break;
2593         case FIB_PATH_TYPE_UDP_ENCAP:
2594             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2595                                             path->fp_nh_proto,
2596                                             dpo);
2597             break;
2598         case FIB_PATH_TYPE_RECEIVE:
2599         case FIB_PATH_TYPE_SPECIAL:
2600         case FIB_PATH_TYPE_DVR:
2601             dpo_copy(dpo, &path->fp_dpo);
2602             break;
2603         }
2604     }
2605 }
2606
2607 load_balance_path_t *
2608 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2609                                        fib_forward_chain_type_t fct,
2610                                        load_balance_path_t *hash_key)
2611 {
2612     load_balance_path_t *mnh;
2613     fib_path_t *path;
2614
2615     path = fib_path_get(path_index);
2616
2617     ASSERT(path);
2618
2619     vec_add2(hash_key, mnh, 1);
2620
2621     mnh->path_weight = path->fp_weight;
2622     mnh->path_index = path_index;
2623
2624     if (fib_path_is_resolved(path_index))
2625     {
2626         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2627     }
2628     else
2629     {
2630         dpo_copy(&mnh->path_dpo,
2631                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2632     }
2633     return (hash_key);
2634 }
2635
2636 int
2637 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2638 {
2639     fib_path_t *path;
2640
2641     path = fib_path_get(path_index);
2642
2643     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2644             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2645              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2646 }
2647
2648 int
2649 fib_path_is_exclusive (fib_node_index_t path_index)
2650 {
2651     fib_path_t *path;
2652
2653     path = fib_path_get(path_index);
2654
2655     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2656 }
2657
2658 int
2659 fib_path_is_deag (fib_node_index_t path_index)
2660 {
2661     fib_path_t *path;
2662
2663     path = fib_path_get(path_index);
2664
2665     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2666 }
2667
2668 int
2669 fib_path_is_resolved (fib_node_index_t path_index)
2670 {
2671     fib_path_t *path;
2672
2673     path = fib_path_get(path_index);
2674
2675     return (dpo_id_is_valid(&path->fp_dpo) &&
2676             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2677             !fib_path_is_looped(path_index) &&
2678             !fib_path_is_permanent_drop(path));
2679 }
2680
2681 int
2682 fib_path_is_looped (fib_node_index_t path_index)
2683 {
2684     fib_path_t *path;
2685
2686     path = fib_path_get(path_index);
2687
2688     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2689 }
2690
2691 fib_path_list_walk_rc_t
2692 fib_path_encode (fib_node_index_t path_list_index,
2693                  fib_node_index_t path_index,
2694                  const fib_path_ext_t *path_ext,
2695                  void *args)
2696 {
2697     fib_path_encode_ctx_t *ctx = args;
2698     fib_route_path_t *rpath;
2699     fib_path_t *path;
2700
2701     path = fib_path_get(path_index);
2702     if (!path)
2703       return (FIB_PATH_LIST_WALK_CONTINUE);
2704
2705     vec_add2(ctx->rpaths, rpath, 1);
2706     rpath->frp_weight = path->fp_weight;
2707     rpath->frp_preference = path->fp_preference;
2708     rpath->frp_proto = path->fp_nh_proto;
2709     rpath->frp_sw_if_index = ~0;
2710     rpath->frp_fib_index = 0;
2711
2712     switch (path->fp_type)
2713     {
2714       case FIB_PATH_TYPE_RECEIVE:
2715         rpath->frp_addr = path->receive.fp_addr;
2716         rpath->frp_sw_if_index = path->receive.fp_interface;
2717         rpath->frp_flags |= FIB_ROUTE_PATH_LOCAL;
2718         break;
2719       case FIB_PATH_TYPE_ATTACHED:
2720         rpath->frp_sw_if_index = path->attached.fp_interface;
2721         break;
2722       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2723         rpath->frp_sw_if_index = path->attached_next_hop.fp_interface;
2724         rpath->frp_addr = path->attached_next_hop.fp_nh;
2725         break;
2726       case FIB_PATH_TYPE_BIER_FMASK:
2727         rpath->frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2728         break;
2729       case FIB_PATH_TYPE_SPECIAL:
2730         break;
2731       case FIB_PATH_TYPE_DEAG:
2732         rpath->frp_fib_index = path->deag.fp_tbl_id;
2733         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
2734         {
2735             rpath->frp_flags |= FIB_ROUTE_PATH_RPF_ID;
2736         }
2737         break;
2738       case FIB_PATH_TYPE_RECURSIVE:
2739         rpath->frp_addr = path->recursive.fp_nh.fp_ip;
2740         rpath->frp_fib_index = path->recursive.fp_tbl_id;
2741         break;
2742       case FIB_PATH_TYPE_DVR:
2743           rpath->frp_sw_if_index = path->dvr.fp_interface;
2744           rpath->frp_flags |= FIB_ROUTE_PATH_DVR;
2745           break;
2746       case FIB_PATH_TYPE_UDP_ENCAP:
2747           rpath->frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2748           rpath->frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2749           break;
2750       case FIB_PATH_TYPE_INTF_RX:
2751           rpath->frp_sw_if_index = path->receive.fp_interface;
2752           rpath->frp_flags |= FIB_ROUTE_PATH_INTF_RX;
2753           break;
2754       case FIB_PATH_TYPE_EXCLUSIVE:
2755         rpath->frp_flags |= FIB_ROUTE_PATH_EXCLUSIVE;
2756       default:
2757         break;
2758     }
2759
2760     if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS) 
2761     {
2762         rpath->frp_label_stack = path_ext->fpe_path.frp_label_stack;
2763     }
2764
2765     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP)
2766         rpath->frp_flags |= FIB_ROUTE_PATH_DROP;
2767     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2768         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_UNREACH;
2769     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2770         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_PROHIBIT;
2771
2772     return (FIB_PATH_LIST_WALK_CONTINUE);
2773 }
2774
2775 dpo_proto_t
2776 fib_path_get_proto (fib_node_index_t path_index)
2777 {
2778     fib_path_t *path;
2779
2780     path = fib_path_get(path_index);
2781
2782     return (path->fp_nh_proto);
2783 }
2784
2785 void
2786 fib_path_module_init (void)
2787 {
2788     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2789     fib_path_logger = vlib_log_register_class ("fib", "path");
2790 }
2791
2792 static clib_error_t *
2793 show_fib_path_command (vlib_main_t * vm,
2794                         unformat_input_t * input,
2795                         vlib_cli_command_t * cmd)
2796 {
2797     fib_node_index_t pi;
2798     fib_path_t *path;
2799
2800     if (unformat (input, "%d", &pi))
2801     {
2802         /*
2803          * show one in detail
2804          */
2805         if (!pool_is_free_index(fib_path_pool, pi))
2806         {
2807             path = fib_path_get(pi);
2808             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2809                            FIB_PATH_FORMAT_FLAGS_NONE);
2810             s = format(s, "\n  children:");
2811             s = fib_node_children_format(path->fp_node.fn_children, s);
2812             vlib_cli_output (vm, "%v", s);
2813             vec_free(s);
2814         }
2815         else
2816         {
2817             vlib_cli_output (vm, "path %d invalid", pi);
2818         }
2819     }
2820     else
2821     {
2822         vlib_cli_output (vm, "FIB Paths");
2823         pool_foreach_index (pi, fib_path_pool,
2824         ({
2825             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2826                              FIB_PATH_FORMAT_FLAGS_NONE);
2827         }));
2828     }
2829
2830     return (NULL);
2831 }
2832
2833 VLIB_CLI_COMMAND (show_fib_path, static) = {
2834   .path = "show fib paths",
2835   .function = show_fib_path_command,
2836   .short_help = "show fib paths",
2837 };