fib: fib path realloc during midchain stack
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/ip_null_dpo.h>
28 #include <vnet/dpo/classify_dpo.h>
29 #include <vnet/dpo/pw_cw.h>
30
31 #include <vnet/adj/adj.h>
32 #include <vnet/adj/adj_mcast.h>
33
34 #include <vnet/fib/fib_path.h>
35 #include <vnet/fib/fib_node.h>
36 #include <vnet/fib/fib_table.h>
37 #include <vnet/fib/fib_entry.h>
38 #include <vnet/fib/fib_path_list.h>
39 #include <vnet/fib/fib_internal.h>
40 #include <vnet/fib/fib_urpf_list.h>
41 #include <vnet/fib/mpls_fib.h>
42 #include <vnet/fib/fib_path_ext.h>
43 #include <vnet/udp/udp_encap.h>
44 #include <vnet/bier/bier_fmask.h>
45 #include <vnet/bier/bier_table.h>
46 #include <vnet/bier/bier_imp.h>
47 #include <vnet/bier/bier_disp_table.h>
48
49 /**
50  * Enurmeration of path types
51  */
52 typedef enum fib_path_type_t_ {
53     /**
54      * Marker. Add new types after this one.
55      */
56     FIB_PATH_TYPE_FIRST = 0,
57     /**
58      * Attached-nexthop. An interface and a nexthop are known.
59      */
60     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
61     /**
62      * attached. Only the interface is known.
63      */
64     FIB_PATH_TYPE_ATTACHED,
65     /**
66      * recursive. Only the next-hop is known.
67      */
68     FIB_PATH_TYPE_RECURSIVE,
69     /**
70      * special. nothing is known. so we drop.
71      */
72     FIB_PATH_TYPE_SPECIAL,
73     /**
74      * exclusive. user provided adj.
75      */
76     FIB_PATH_TYPE_EXCLUSIVE,
77     /**
78      * deag. Link to a lookup adj in the next table
79      */
80     FIB_PATH_TYPE_DEAG,
81     /**
82      * interface receive.
83      */
84     FIB_PATH_TYPE_INTF_RX,
85     /**
86      * Path resolves via a UDP encap object.
87      */
88     FIB_PATH_TYPE_UDP_ENCAP,
89     /**
90      * receive. it's for-us.
91      */
92     FIB_PATH_TYPE_RECEIVE,
93     /**
94      * bier-imp. it's via a BIER imposition.
95      */
96     FIB_PATH_TYPE_BIER_IMP,
97     /**
98      * bier-fmask. it's via a BIER ECMP-table.
99      */
100     FIB_PATH_TYPE_BIER_TABLE,
101     /**
102      * bier-fmask. it's via a BIER f-mask.
103      */
104     FIB_PATH_TYPE_BIER_FMASK,
105     /**
106      * via a DVR.
107      */
108     FIB_PATH_TYPE_DVR,
109     /**
110      * Marker. Add new types before this one, then update it.
111      */
112     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
113 } __attribute__ ((packed)) fib_path_type_t;
114
115 /**
116  * The maximum number of path_types
117  */
118 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
119
120 #define FIB_PATH_TYPES {                                        \
121     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
122     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
123     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
124     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
125     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
126     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
127     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
128     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
129     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
130     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
131     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
132     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
133     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
134 }
135
136 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
137     for (_item = FIB_PATH_TYPE_FIRST;           \
138          _item <= FIB_PATH_TYPE_LAST;           \
139          _item++)
140
141 /**
142  * Enurmeration of path operational (i.e. derived) attributes
143  */
144 typedef enum fib_path_oper_attribute_t_ {
145     /**
146      * Marker. Add new types after this one.
147      */
148     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
149     /**
150      * The path forms part of a recursive loop.
151      */
152     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
153     /**
154      * The path is resolved
155      */
156     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
157     /**
158      * The path is attached, despite what the next-hop may say.
159      */
160     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
161     /**
162      * The path has become a permanent drop.
163      */
164     FIB_PATH_OPER_ATTRIBUTE_DROP,
165     /**
166      * Marker. Add new types before this one, then update it.
167      */
168     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
169 } __attribute__ ((packed)) fib_path_oper_attribute_t;
170
171 /**
172  * The maximum number of path operational attributes
173  */
174 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
175
176 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
177     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
178     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
179     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
180 }
181
182 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
183     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
184          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
185          _item++)
186
187 /**
188  * Path flags from the attributes
189  */
190 typedef enum fib_path_oper_flags_t_ {
191     FIB_PATH_OPER_FLAG_NONE = 0,
192     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
193     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
194     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
195     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
196 } __attribute__ ((packed)) fib_path_oper_flags_t;
197
198 /**
199  * A FIB path
200  */
201 typedef struct fib_path_t_ {
202     /**
203      * A path is a node in the FIB graph.
204      */
205     fib_node_t fp_node;
206
207     /**
208      * The index of the path-list to which this path belongs
209      */
210     u32 fp_pl_index;
211
212     /**
213      * This marks the start of the memory area used to hash
214      * the path
215      */
216     STRUCT_MARK(path_hash_start);
217
218     /**
219      * Configuration Flags
220      */
221     fib_path_cfg_flags_t fp_cfg_flags;
222
223     /**
224      * The type of the path. This is the selector for the union
225      */
226     fib_path_type_t fp_type;
227
228     /**
229      * The protocol of the next-hop, i.e. the address family of the
230      * next-hop's address. We can't derive this from the address itself
231      * since the address can be all zeros
232      */
233     dpo_proto_t fp_nh_proto;
234
235     /**
236      * UCMP [unnormalised] weigth
237      */
238     u8 fp_weight;
239
240     /**
241      * A path preference. 0 is the best.
242      * Only paths of the best preference, that are 'up', are considered
243      * for forwarding.
244      */
245     u8 fp_preference;
246
247     /**
248      * per-type union of the data required to resolve the path
249      */
250     union {
251         struct {
252             /**
253              * The next-hop
254              */
255             ip46_address_t fp_nh;
256             /**
257              * The interface
258              */
259             u32 fp_interface;
260         } attached_next_hop;
261         struct {
262             /**
263              * The interface
264              */
265             u32 fp_interface;
266         } attached;
267         struct {
268             union
269             {
270                 /**
271                  * The next-hop
272                  */
273                 ip46_address_t fp_ip;
274                 struct {
275                     /**
276                      * The local label to resolve through.
277                      */
278                     mpls_label_t fp_local_label;
279                     /**
280                      * The EOS bit of the resolving label
281                      */
282                     mpls_eos_bit_t fp_eos;
283                 };
284             } fp_nh;
285             union {
286                 /**
287                  * The FIB table index in which to find the next-hop.
288                  */
289                 fib_node_index_t fp_tbl_id;
290                 /**
291                  * The BIER FIB the fmask is in
292                  */
293                 index_t fp_bier_fib;
294             };
295         } recursive;
296         struct {
297             /**
298              * BIER FMask ID
299              */
300             index_t fp_bier_fmask;
301         } bier_fmask;
302         struct {
303             /**
304              * The BIER table's ID
305              */
306             bier_table_id_t fp_bier_tbl;
307         } bier_table;
308         struct {
309             /**
310              * The BIER imposition object
311              * this is part of the path's key, since the index_t
312              * of an imposition object is the object's key.
313              */
314             index_t fp_bier_imp;
315         } bier_imp;
316         struct {
317             /**
318              * The FIB index in which to perfom the next lookup
319              */
320             fib_node_index_t fp_tbl_id;
321             /**
322              * The RPF-ID to tag the packets with
323              */
324             fib_rpf_id_t fp_rpf_id;
325         } deag;
326         struct {
327         } special;
328         struct {
329             /**
330              * The user provided 'exclusive' DPO
331              */
332             dpo_id_t fp_ex_dpo;
333         } exclusive;
334         struct {
335             /**
336              * The interface on which the local address is configured
337              */
338             u32 fp_interface;
339             /**
340              * The next-hop
341              */
342             ip46_address_t fp_addr;
343         } receive;
344         struct {
345             /**
346              * The interface on which the packets will be input.
347              */
348             u32 fp_interface;
349         } intf_rx;
350         struct {
351             /**
352              * The UDP Encap object this path resolves through
353              */
354             u32 fp_udp_encap_id;
355         } udp_encap;
356         struct {
357             /**
358              * The UDP Encap object this path resolves through
359              */
360             u32 fp_classify_table_id;
361         } classify;
362         struct {
363             /**
364              * The interface
365              */
366             u32 fp_interface;
367         } dvr;
368     };
369     STRUCT_MARK(path_hash_end);
370
371     /**
372      * Memebers in this last section represent information that is
373      * dervied during resolution. It should not be copied to new paths
374      * nor compared.
375      */
376
377     /**
378      * Operational Flags
379      */
380     fib_path_oper_flags_t fp_oper_flags;
381
382     union {
383         /**
384          * the resolving via fib. not part of the union, since it it not part
385          * of the path's hash.
386          */
387         fib_node_index_t fp_via_fib;
388         /**
389          * the resolving bier-table
390          */
391         index_t fp_via_bier_tbl;
392         /**
393          * the resolving bier-fmask
394          */
395         index_t fp_via_bier_fmask;
396     };
397
398     /**
399      * The Data-path objects through which this path resolves for IP.
400      */
401     dpo_id_t fp_dpo;
402
403     /**
404      * the index of this path in the parent's child list.
405      */
406     u32 fp_sibling;
407 } fib_path_t;
408
409 /*
410  * Array of strings/names for the path types and attributes
411  */
412 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
413 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
414 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
415
416 /*
417  * The memory pool from which we allocate all the paths
418  */
419 static fib_path_t *fib_path_pool;
420
421 /**
422  * the logger
423  */
424 vlib_log_class_t fib_path_logger;
425
426 /*
427  * Debug macro
428  */
429 #define FIB_PATH_DBG(_p, _fmt, _args...)                                \
430 {                                                                       \
431     vlib_log_debug (fib_path_logger,                                    \
432                     "[%U]: " _fmt,                                      \
433                     format_fib_path, fib_path_get_index(_p), 0,         \
434                     FIB_PATH_FORMAT_FLAGS_ONE_LINE,                     \
435                     ##_args);                                           \
436 }
437
438 static fib_path_t *
439 fib_path_get (fib_node_index_t index)
440 {
441     return (pool_elt_at_index(fib_path_pool, index));
442 }
443
444 static fib_node_index_t 
445 fib_path_get_index (fib_path_t *path)
446 {
447     return (path - fib_path_pool);
448 }
449
450 static fib_node_t *
451 fib_path_get_node (fib_node_index_t index)
452 {
453     return ((fib_node_t*)fib_path_get(index));
454 }
455
456 static fib_path_t*
457 fib_path_from_fib_node (fib_node_t *node)
458 {
459     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
460     return ((fib_path_t*)node);
461 }
462
463 u8 *
464 format_fib_path (u8 * s, va_list * args)
465 {
466     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
467     u32 indent = va_arg (*args, u32);
468     fib_format_path_flags_t flags =  va_arg (*args, fib_format_path_flags_t);
469     vnet_main_t * vnm = vnet_get_main();
470     fib_path_oper_attribute_t oattr;
471     fib_path_cfg_attribute_t cattr;
472     fib_path_t *path;
473     const char *eol;
474
475     if (flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE)
476     {
477         eol = "";
478     }
479     else
480     {
481         eol = "\n";
482     }
483
484     path = fib_path_get(path_index);
485
486     s = format (s, "%Upath:[%d] ", format_white_space, indent,
487                 fib_path_get_index(path));
488     s = format (s, "pl-index:%d ", path->fp_pl_index);
489     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
490     s = format (s, "weight=%d ", path->fp_weight);
491     s = format (s, "pref=%d ", path->fp_preference);
492     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
493     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
494         s = format(s, " oper-flags:");
495         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
496             if ((1<<oattr) & path->fp_oper_flags) {
497                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
498             }
499         }
500     }
501     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
502         s = format(s, " cfg-flags:");
503         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
504             if ((1<<cattr) & path->fp_cfg_flags) {
505                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
506             }
507         }
508     }
509     if (!(flags & FIB_PATH_FORMAT_FLAGS_ONE_LINE))
510         s = format(s, "\n%U", format_white_space, indent+2);
511
512     switch (path->fp_type)
513     {
514     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
515         s = format (s, "%U", format_ip46_address,
516                     &path->attached_next_hop.fp_nh,
517                     IP46_TYPE_ANY);
518         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
519         {
520             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
521         }
522         else
523         {
524             s = format (s, " %U",
525                         format_vnet_sw_interface_name,
526                         vnm,
527                         vnet_get_sw_interface(
528                             vnm,
529                             path->attached_next_hop.fp_interface));
530             if (vnet_sw_interface_is_p2p(vnet_get_main(),
531                                          path->attached_next_hop.fp_interface))
532             {
533                 s = format (s, " (p2p)");
534             }
535         }
536         if (!dpo_id_is_valid(&path->fp_dpo))
537         {
538             s = format(s, "%s%Uunresolved", eol, format_white_space, indent+2);
539         }
540         else
541         {
542             s = format(s, "%s%U%U", eol,
543                        format_white_space, indent,
544                        format_dpo_id,
545                        &path->fp_dpo, 13);
546         }
547         break;
548     case FIB_PATH_TYPE_ATTACHED:
549         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
550         {
551             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
552         }
553         else
554         {
555             s = format (s, " %U",
556                         format_vnet_sw_interface_name,
557                         vnm,
558                         vnet_get_sw_interface(
559                             vnm,
560                             path->attached.fp_interface));
561         }
562         break;
563     case FIB_PATH_TYPE_RECURSIVE:
564         if (DPO_PROTO_MPLS == path->fp_nh_proto)
565         {
566             s = format (s, "via %U %U",
567                         format_mpls_unicast_label,
568                         path->recursive.fp_nh.fp_local_label,
569                         format_mpls_eos_bit,
570                         path->recursive.fp_nh.fp_eos);
571         }
572         else
573         {
574             s = format (s, "via %U",
575                         format_ip46_address,
576                         &path->recursive.fp_nh.fp_ip,
577                         IP46_TYPE_ANY);
578         }
579         s = format (s, " in fib:%d",
580                     path->recursive.fp_tbl_id,
581                     path->fp_via_fib); 
582         s = format (s, " via-fib:%d", path->fp_via_fib); 
583         s = format (s, " via-dpo:[%U:%d]",
584                     format_dpo_type, path->fp_dpo.dpoi_type, 
585                     path->fp_dpo.dpoi_index);
586
587         break;
588     case FIB_PATH_TYPE_UDP_ENCAP:
589         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
590         break;
591     case FIB_PATH_TYPE_BIER_TABLE:
592         s = format (s, "via bier-table:[%U}",
593                     format_bier_table_id,
594                     &path->bier_table.fp_bier_tbl);
595         s = format (s, " via-dpo:[%U:%d]",
596                     format_dpo_type, path->fp_dpo.dpoi_type,
597                     path->fp_dpo.dpoi_index);
598         break;
599     case FIB_PATH_TYPE_BIER_FMASK:
600         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
601         s = format (s, " via-dpo:[%U:%d]",
602                     format_dpo_type, path->fp_dpo.dpoi_type, 
603                     path->fp_dpo.dpoi_index);
604         break;
605     case FIB_PATH_TYPE_BIER_IMP:
606         s = format (s, "via %U", format_bier_imp,
607                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
608         break;
609     case FIB_PATH_TYPE_DVR:
610         s = format (s, " %U",
611                     format_vnet_sw_interface_name,
612                     vnm,
613                     vnet_get_sw_interface(
614                         vnm,
615                         path->dvr.fp_interface));
616         break;
617     case FIB_PATH_TYPE_DEAG:
618         s = format (s, " %sfib-index:%d",
619                     (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?  "m" : ""),
620                     path->deag.fp_tbl_id);
621         break;
622     case FIB_PATH_TYPE_RECEIVE:
623     case FIB_PATH_TYPE_INTF_RX:
624     case FIB_PATH_TYPE_SPECIAL:
625     case FIB_PATH_TYPE_EXCLUSIVE:
626         if (dpo_id_is_valid(&path->fp_dpo))
627         {
628             s = format(s, "%U", format_dpo_id,
629                        &path->fp_dpo, indent+2);
630         }
631         break;
632     }
633     return (s);
634 }
635
636 /*
637  * fib_path_last_lock_gone
638  *
639  * We don't share paths, we share path lists, so the [un]lock functions
640  * are no-ops
641  */
642 static void
643 fib_path_last_lock_gone (fib_node_t *node)
644 {
645     ASSERT(0);
646 }
647
648 static fib_path_t*
649 fib_path_attached_next_hop_get_adj (fib_path_t *path,
650                                     vnet_link_t link,
651                                     dpo_id_t *dpo)
652 {
653     fib_node_index_t fib_path_index;
654     fib_protocol_t nh_proto;
655     adj_index_t ai;
656
657     fib_path_index = fib_path_get_index(path);
658     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
659
660     if (vnet_sw_interface_is_p2p(vnet_get_main(),
661                                  path->attached_next_hop.fp_interface))
662     {
663         /*
664          * if the interface is p2p then the adj for the specific
665          * neighbour on that link will never exist. on p2p links
666          * the subnet address (the attached route) links to the
667          * auto-adj (see below), we want that adj here too.
668          */
669         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
670                                  path->attached_next_hop.fp_interface);
671     }
672     else
673     {
674         ai = adj_nbr_add_or_lock(nh_proto, link,
675                                  &path->attached_next_hop.fp_nh,
676                                  path->attached_next_hop.fp_interface);
677     }
678
679     dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
680     adj_unlock(ai);
681
682     return (fib_path_get(fib_path_index));
683 }
684
685 static void
686 fib_path_attached_next_hop_set (fib_path_t *path)
687 {
688     /*
689      * resolve directly via the adjacency discribed by the
690      * interface and next-hop
691      */
692     path = fib_path_attached_next_hop_get_adj(path,
693                                               dpo_proto_to_link(path->fp_nh_proto),
694                                               &path->fp_dpo);
695
696     ASSERT(dpo_is_adj(&path->fp_dpo));
697
698     /*
699      * become a child of the adjacency so we receive updates
700      * when its rewrite changes
701      */
702     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
703                                      FIB_NODE_TYPE_PATH,
704                                      fib_path_get_index(path));
705
706     if (!vnet_sw_interface_is_up(vnet_get_main(),
707                                  path->attached_next_hop.fp_interface) ||
708         !adj_is_up(path->fp_dpo.dpoi_index))
709     {
710         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
711     }
712 }
713
714 static void
715 fib_path_attached_get_adj (fib_path_t *path,
716                            vnet_link_t link,
717                            dpo_id_t *dpo)
718 {
719     fib_protocol_t nh_proto;
720
721     nh_proto = dpo_proto_to_fib(path->fp_nh_proto);
722
723     if (vnet_sw_interface_is_p2p(vnet_get_main(),
724                                  path->attached.fp_interface))
725     {
726         /*
727          * point-2-point interfaces do not require a glean, since
728          * there is nothing to ARP. Install a rewrite/nbr adj instead
729          */
730         adj_index_t ai;
731
732         ai = adj_nbr_add_or_lock(nh_proto, link, &zero_addr,
733                                  path->attached.fp_interface);
734
735         dpo_set(dpo, DPO_ADJACENCY, vnet_link_to_dpo_proto(link), ai);
736         adj_unlock(ai);
737     }
738     else if (vnet_sw_interface_is_nbma(vnet_get_main(),
739                                        path->attached.fp_interface))
740     {
741         dpo_copy(dpo, drop_dpo_get(path->fp_nh_proto));
742     }
743     else
744     {
745         adj_index_t ai;
746
747         ai = adj_glean_add_or_lock(nh_proto, link,
748                                    path->attached.fp_interface,
749                                    NULL);
750         dpo_set(dpo, DPO_ADJACENCY_GLEAN, vnet_link_to_dpo_proto(link), ai);
751         adj_unlock(ai);
752     }
753 }
754
755 /*
756  * create of update the paths recursive adj
757  */
758 static void
759 fib_path_recursive_adj_update (fib_path_t *path,
760                                fib_forward_chain_type_t fct,
761                                dpo_id_t *dpo)
762 {
763     dpo_id_t via_dpo = DPO_INVALID;
764
765     /*
766      * get the DPO to resolve through from the via-entry
767      */
768     fib_entry_contribute_forwarding(path->fp_via_fib,
769                                     fct,
770                                     &via_dpo);
771
772
773     /*
774      * hope for the best - clear if restrictions apply.
775      */
776     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
777
778     /*
779      * Validate any recursion constraints and over-ride the via
780      * adj if not met
781      */
782     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
783     {
784         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
785         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
786     }
787     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
788     {
789         /*
790          * the via FIB must be a host route.
791          * note the via FIB just added will always be a host route
792          * since it is an RR source added host route. So what we need to
793          * check is whether the route has other sources. If it does then
794          * some other source has added it as a host route. If it doesn't
795          * then it was added only here and inherits forwarding from a cover.
796          * the cover is not a host route.
797          * The RR source is the lowest priority source, so we check if it
798          * is the best. if it is there are no other sources.
799          */
800         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
801         {
802             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
803             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
804
805             /*
806              * PIC edge trigger. let the load-balance maps know
807              */
808             load_balance_map_path_state_change(fib_path_get_index(path));
809         }
810     }
811     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
812     {
813         /*
814          * RR source entries inherit the flags from the cover, so
815          * we can check the via directly
816          */
817         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
818         {
819             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
820             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
821
822             /*
823              * PIC edge trigger. let the load-balance maps know
824              */
825             load_balance_map_path_state_change(fib_path_get_index(path));
826         }
827     }
828     /*
829      * check for over-riding factors on the FIB entry itself
830      */
831     if (!fib_entry_is_resolved(path->fp_via_fib))
832     {
833         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
834         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
835
836         /*
837          * PIC edge trigger. let the load-balance maps know
838          */
839         load_balance_map_path_state_change(fib_path_get_index(path));
840     }
841
842     /*
843      * If this path is contributing a drop, then it's not resolved
844      */
845     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
846     {
847         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
848     }
849
850     /*
851      * update the path's contributed DPO
852      */
853     dpo_copy(dpo, &via_dpo);
854
855     FIB_PATH_DBG(path, "recursive update:");
856
857     dpo_reset(&via_dpo);
858 }
859
860 /*
861  * re-evaulate the forwarding state for a via fmask path
862  */
863 static void
864 fib_path_bier_fmask_update (fib_path_t *path,
865                             dpo_id_t *dpo)
866 {
867     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
868
869     /*
870      * if we are stakcing on the drop, then the path is not resolved
871      */
872     if (dpo_is_drop(dpo))
873     {
874         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
875     }
876     else
877     {
878         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
879     }
880 }
881
882 /*
883  * fib_path_is_permanent_drop
884  *
885  * Return !0 if the path is configured to permanently drop,
886  * despite other attributes.
887  */
888 static int
889 fib_path_is_permanent_drop (fib_path_t *path)
890 {
891     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
892             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
893 }
894
895 /*
896  * fib_path_unresolve
897  *
898  * Remove our dependency on the resolution target
899  */
900 static void
901 fib_path_unresolve (fib_path_t *path)
902 {
903     /*
904      * the forced drop path does not need unresolving
905      */
906     if (fib_path_is_permanent_drop(path))
907     {
908         return;
909     }
910
911     switch (path->fp_type)
912     {
913     case FIB_PATH_TYPE_RECURSIVE:
914         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
915         {
916             fib_entry_child_remove(path->fp_via_fib,
917                                    path->fp_sibling);
918             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
919                                            fib_entry_get_prefix(path->fp_via_fib),
920                                            FIB_SOURCE_RR);
921             fib_table_unlock(path->recursive.fp_tbl_id,
922                              dpo_proto_to_fib(path->fp_nh_proto),
923                              FIB_SOURCE_RR);
924             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
925         }
926         break;
927     case FIB_PATH_TYPE_BIER_FMASK:
928         bier_fmask_child_remove(path->fp_via_bier_fmask,
929                                 path->fp_sibling);
930         break;
931     case FIB_PATH_TYPE_BIER_IMP:
932         bier_imp_unlock(path->fp_dpo.dpoi_index);
933         break;
934     case FIB_PATH_TYPE_BIER_TABLE:
935         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
936         break;
937     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
938     case FIB_PATH_TYPE_ATTACHED:
939         if (dpo_is_adj(&path->fp_dpo))
940             adj_child_remove(path->fp_dpo.dpoi_index,
941                              path->fp_sibling);
942         break;
943     case FIB_PATH_TYPE_UDP_ENCAP:
944         udp_encap_unlock(path->fp_dpo.dpoi_index);
945         break;
946     case FIB_PATH_TYPE_EXCLUSIVE:
947         dpo_reset(&path->exclusive.fp_ex_dpo);
948         break;
949     case FIB_PATH_TYPE_SPECIAL:
950     case FIB_PATH_TYPE_RECEIVE:
951     case FIB_PATH_TYPE_INTF_RX:
952     case FIB_PATH_TYPE_DEAG:
953     case FIB_PATH_TYPE_DVR:
954         /*
955          * these hold only the path's DPO, which is reset below.
956          */
957         break;
958     }
959
960     /*
961      * release the adj we were holding and pick up the
962      * drop just in case.
963      */
964     dpo_reset(&path->fp_dpo);
965     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
966
967     return;
968 }
969
970 static fib_forward_chain_type_t
971 fib_path_to_chain_type (const fib_path_t *path)
972 {
973     if (DPO_PROTO_MPLS == path->fp_nh_proto)
974     {
975         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
976             MPLS_EOS == path->recursive.fp_nh.fp_eos)
977         {
978             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
979         }
980         else
981         {
982             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
983         }
984     }
985     else
986     {
987         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
988     }
989 }
990
991 /*
992  * fib_path_back_walk_notify
993  *
994  * A back walk has reach this path.
995  */
996 static fib_node_back_walk_rc_t
997 fib_path_back_walk_notify (fib_node_t *node,
998                            fib_node_back_walk_ctx_t *ctx)
999 {
1000     fib_path_t *path;
1001
1002     path = fib_path_from_fib_node(node);
1003
1004     FIB_PATH_DBG(path, "bw:%U",
1005                  format_fib_node_bw_reason, ctx->fnbw_reason);
1006
1007     switch (path->fp_type)
1008     {
1009     case FIB_PATH_TYPE_RECURSIVE:
1010         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1011         {
1012             /*
1013              * modify the recursive adjacency to use the new forwarding
1014              * of the via-fib.
1015              * this update is visible to packets in flight in the DP.
1016              */
1017             fib_path_recursive_adj_update(
1018                 path,
1019                 fib_path_to_chain_type(path),
1020                 &path->fp_dpo);
1021         }
1022         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1023             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1024         {
1025             /*
1026              * ADJ updates (complete<->incomplete) do not need to propagate to
1027              * recursive entries.
1028              * The only reason its needed as far back as here, is that the adj
1029              * and the incomplete adj are a different DPO type, so the LBs need
1030              * to re-stack.
1031              * If this walk was quashed in the fib_entry, then any non-fib_path
1032              * children (like tunnels that collapse out the LB when they stack)
1033              * would not see the update.
1034              */
1035             return (FIB_NODE_BACK_WALK_CONTINUE);
1036         }
1037         break;
1038     case FIB_PATH_TYPE_BIER_FMASK:
1039         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1040         {
1041             /*
1042              * update to use the BIER fmask's new forwading
1043              */
1044             fib_path_bier_fmask_update(path, &path->fp_dpo);
1045         }
1046         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1047             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1048         {
1049             /*
1050              * ADJ updates (complete<->incomplete) do not need to propagate to
1051              * recursive entries.
1052              * The only reason its needed as far back as here, is that the adj
1053              * and the incomplete adj are a different DPO type, so the LBs need
1054              * to re-stack.
1055              * If this walk was quashed in the fib_entry, then any non-fib_path
1056              * children (like tunnels that collapse out the LB when they stack)
1057              * would not see the update.
1058              */
1059             return (FIB_NODE_BACK_WALK_CONTINUE);
1060         }
1061         break;
1062     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1063         /*
1064 FIXME comment
1065          * ADJ_UPDATE backwalk pass silently through here and up to
1066          * the path-list when the multipath adj collapse occurs.
1067          * The reason we do this is that the assumtption is that VPP
1068          * runs in an environment where the Control-Plane is remote
1069          * and hence reacts slowly to link up down. In order to remove
1070          * this down link from the ECMP set quickly, we back-walk.
1071          * VPP also has dedicated CPUs, so we are not stealing resources
1072          * from the CP to do so.
1073          */
1074         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1075         {
1076             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1077             {
1078                 /*
1079                  * alreday resolved. no need to walk back again
1080                  */
1081                 return (FIB_NODE_BACK_WALK_CONTINUE);
1082             }
1083             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1084         }
1085         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1086         {
1087             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1088             {
1089                 /*
1090                  * alreday unresolved. no need to walk back again
1091                  */
1092                 return (FIB_NODE_BACK_WALK_CONTINUE);
1093             }
1094             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1095         }
1096         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1097         {
1098             /*
1099              * The interface this path resolves through has been deleted.
1100              * This will leave the path in a permanent drop state. The route
1101              * needs to be removed and readded (and hence the path-list deleted)
1102              * before it can forward again.
1103              */
1104             fib_path_unresolve(path);
1105             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1106         }
1107         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1108         {
1109             /*
1110              * restack the DPO to pick up the correct DPO sub-type
1111              */
1112             uword if_is_up;
1113
1114             if_is_up = vnet_sw_interface_is_up(
1115                            vnet_get_main(),
1116                            path->attached_next_hop.fp_interface);
1117
1118             path = fib_path_attached_next_hop_get_adj(
1119                 path,
1120                 dpo_proto_to_link(path->fp_nh_proto),
1121                 &path->fp_dpo);
1122
1123             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1124             if (if_is_up && adj_is_up(path->fp_dpo.dpoi_index))
1125             {
1126                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1127             }
1128
1129             if (!if_is_up)
1130             {
1131                 /*
1132                  * If the interface is not up there is no reason to walk
1133                  * back to children. if we did they would only evalute
1134                  * that this path is unresolved and hence it would
1135                  * not contribute the adjacency - so it would be wasted
1136                  * CPU time.
1137                  */
1138                 return (FIB_NODE_BACK_WALK_CONTINUE);
1139             }
1140         }
1141         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1142         {
1143             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1144             {
1145                 /*
1146                  * alreday unresolved. no need to walk back again
1147                  */
1148                 return (FIB_NODE_BACK_WALK_CONTINUE);
1149             }
1150             /*
1151              * the adj has gone down. the path is no longer resolved.
1152              */
1153             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1154         }
1155         break;
1156     case FIB_PATH_TYPE_ATTACHED:
1157     case FIB_PATH_TYPE_DVR:
1158         /*
1159          * FIXME; this could schedule a lower priority walk, since attached
1160          * routes are not usually in ECMP configurations so the backwalk to
1161          * the FIB entry does not need to be high priority
1162          */
1163         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1164         {
1165             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1166         }
1167         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1168         {
1169             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1170         }
1171         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1172         {
1173             fib_path_unresolve(path);
1174             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1175         }
1176         break;
1177     case FIB_PATH_TYPE_UDP_ENCAP:
1178     {
1179         dpo_id_t via_dpo = DPO_INVALID;
1180
1181         /*
1182          * hope for the best - clear if restrictions apply.
1183          */
1184         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1185
1186         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1187                                         path->fp_nh_proto,
1188                                         &via_dpo);
1189         /*
1190          * If this path is contributing a drop, then it's not resolved
1191          */
1192         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1193         {
1194             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1195         }
1196
1197         /*
1198          * update the path's contributed DPO
1199          */
1200         dpo_copy(&path->fp_dpo, &via_dpo);
1201         dpo_reset(&via_dpo);
1202         break;
1203     }
1204     case FIB_PATH_TYPE_INTF_RX:
1205         ASSERT(0);
1206     case FIB_PATH_TYPE_DEAG:
1207         /*
1208          * FIXME When VRF delete is allowed this will need a poke.
1209          */
1210     case FIB_PATH_TYPE_SPECIAL:
1211     case FIB_PATH_TYPE_RECEIVE:
1212     case FIB_PATH_TYPE_EXCLUSIVE:
1213     case FIB_PATH_TYPE_BIER_TABLE:
1214     case FIB_PATH_TYPE_BIER_IMP:
1215         /*
1216          * these path types have no parents. so to be
1217          * walked from one is unexpected.
1218          */
1219         ASSERT(0);
1220         break;
1221     }
1222
1223     /*
1224      * propagate the backwalk further to the path-list
1225      */
1226     fib_path_list_back_walk(path->fp_pl_index, ctx);
1227
1228     return (FIB_NODE_BACK_WALK_CONTINUE);
1229 }
1230
1231 static void
1232 fib_path_memory_show (void)
1233 {
1234     fib_show_memory_usage("Path",
1235                           pool_elts(fib_path_pool),
1236                           pool_len(fib_path_pool),
1237                           sizeof(fib_path_t));
1238 }
1239
1240 /*
1241  * The FIB path's graph node virtual function table
1242  */
1243 static const fib_node_vft_t fib_path_vft = {
1244     .fnv_get = fib_path_get_node,
1245     .fnv_last_lock = fib_path_last_lock_gone,
1246     .fnv_back_walk = fib_path_back_walk_notify,
1247     .fnv_mem_show = fib_path_memory_show,
1248 };
1249
1250 static fib_path_cfg_flags_t
1251 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1252 {
1253     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1254
1255     if (rpath->frp_flags & FIB_ROUTE_PATH_POP_PW_CW)
1256         cfg_flags |= FIB_PATH_CFG_FLAG_POP_PW_CW;
1257     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1258         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1259     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1260         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1261     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1262         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1263     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1264         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1265     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1266         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1267     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1268         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1269     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1270         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1271     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1272         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1273     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1274         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1275     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_UNREACH)
1276         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_UNREACH;
1277     if (rpath->frp_flags & FIB_ROUTE_PATH_ICMP_PROHIBIT)
1278         cfg_flags |= FIB_PATH_CFG_FLAG_ICMP_PROHIBIT;
1279
1280     return (cfg_flags);
1281 }
1282
1283 /*
1284  * fib_path_create
1285  *
1286  * Create and initialise a new path object.
1287  * return the index of the path.
1288  */
1289 fib_node_index_t
1290 fib_path_create (fib_node_index_t pl_index,
1291                  const fib_route_path_t *rpath)
1292 {
1293     fib_path_t *path;
1294
1295     pool_get(fib_path_pool, path);
1296     clib_memset(path, 0, sizeof(*path));
1297
1298     fib_node_init(&path->fp_node,
1299                   FIB_NODE_TYPE_PATH);
1300
1301     dpo_reset(&path->fp_dpo);
1302     path->fp_pl_index = pl_index;
1303     path->fp_nh_proto = rpath->frp_proto;
1304     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1305     path->fp_weight = rpath->frp_weight;
1306     if (0 == path->fp_weight)
1307     {
1308         /*
1309          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1310          * clients to always use 1, or we can accept it and fixup approrpiately.
1311          */
1312         path->fp_weight = 1;
1313     }
1314     path->fp_preference = rpath->frp_preference;
1315     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1316
1317     /*
1318      * deduce the path's tpye from the parementers and save what is needed.
1319      */
1320     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1321     {
1322         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1323         path->receive.fp_interface = rpath->frp_sw_if_index;
1324         path->receive.fp_addr = rpath->frp_addr;
1325     }
1326     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1327     {
1328         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1329         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1330     }
1331     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1332     {
1333         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1334         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1335     }
1336     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1337     {
1338         path->fp_type = FIB_PATH_TYPE_DEAG;
1339         path->deag.fp_tbl_id = rpath->frp_fib_index;
1340         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1341     }
1342     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1343     {
1344         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1345         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1346     }
1347     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1348     {
1349         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1350         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1351     }
1352     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1353     {
1354         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1355         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1356     }
1357     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1358     {
1359         path->fp_type = FIB_PATH_TYPE_DEAG;
1360         path->deag.fp_tbl_id = rpath->frp_fib_index;
1361     }
1362     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1363     {
1364         path->fp_type = FIB_PATH_TYPE_DVR;
1365         path->dvr.fp_interface = rpath->frp_sw_if_index;
1366     }
1367     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1368     {
1369         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1370         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1371     }
1372     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT) ||
1373         (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH))
1374     {
1375         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1376     }
1377     else if ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY))
1378     {
1379         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1380         path->classify.fp_classify_table_id = rpath->frp_classify_table_id;
1381     }
1382     else if (~0 != rpath->frp_sw_if_index)
1383     {
1384         if (ip46_address_is_zero(&rpath->frp_addr))
1385         {
1386             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1387             path->attached.fp_interface = rpath->frp_sw_if_index;
1388         }
1389         else
1390         {
1391             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1392             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1393             path->attached_next_hop.fp_nh = rpath->frp_addr;
1394         }
1395     }
1396     else
1397     {
1398         if (ip46_address_is_zero(&rpath->frp_addr))
1399         {
1400             if (~0 == rpath->frp_fib_index)
1401             {
1402                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1403             }
1404             else
1405             {
1406                 path->fp_type = FIB_PATH_TYPE_DEAG;
1407                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1408                 path->deag.fp_rpf_id = ~0;
1409             }
1410         }
1411         else
1412         {
1413             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1414             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1415             {
1416                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1417                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1418             }
1419             else
1420             {
1421                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1422             }
1423             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1424         }
1425     }
1426
1427     FIB_PATH_DBG(path, "create");
1428
1429     return (fib_path_get_index(path));
1430 }
1431
1432 /*
1433  * fib_path_create_special
1434  *
1435  * Create and initialise a new path object.
1436  * return the index of the path.
1437  */
1438 fib_node_index_t
1439 fib_path_create_special (fib_node_index_t pl_index,
1440                          dpo_proto_t nh_proto,
1441                          fib_path_cfg_flags_t flags,
1442                          const dpo_id_t *dpo)
1443 {
1444     fib_path_t *path;
1445
1446     pool_get(fib_path_pool, path);
1447     clib_memset(path, 0, sizeof(*path));
1448
1449     fib_node_init(&path->fp_node,
1450                   FIB_NODE_TYPE_PATH);
1451     dpo_reset(&path->fp_dpo);
1452
1453     path->fp_pl_index = pl_index;
1454     path->fp_weight = 1;
1455     path->fp_preference = 0;
1456     path->fp_nh_proto = nh_proto;
1457     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1458     path->fp_cfg_flags = flags;
1459
1460     if (FIB_PATH_CFG_FLAG_DROP & flags)
1461     {
1462         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1463     }
1464     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1465     {
1466         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1467         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1468     }
1469     else
1470     {
1471         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1472         ASSERT(NULL != dpo);
1473         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1474     }
1475
1476     return (fib_path_get_index(path));
1477 }
1478
1479 /*
1480  * fib_path_copy
1481  *
1482  * Copy a path. return index of new path.
1483  */
1484 fib_node_index_t
1485 fib_path_copy (fib_node_index_t path_index,
1486                fib_node_index_t path_list_index)
1487 {
1488     fib_path_t *path, *orig_path;
1489
1490     pool_get(fib_path_pool, path);
1491
1492     orig_path = fib_path_get(path_index);
1493     ASSERT(NULL != orig_path);
1494
1495     clib_memcpy(path, orig_path, sizeof(*path));
1496
1497     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1498
1499     /*
1500      * reset the dynamic section
1501      */
1502     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1503     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1504     path->fp_pl_index  = path_list_index;
1505     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1506     clib_memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1507     dpo_reset(&path->fp_dpo);
1508
1509     return (fib_path_get_index(path));
1510 }
1511
1512 /*
1513  * fib_path_destroy
1514  *
1515  * destroy a path that is no longer required
1516  */
1517 void
1518 fib_path_destroy (fib_node_index_t path_index)
1519 {
1520     fib_path_t *path;
1521
1522     path = fib_path_get(path_index);
1523
1524     ASSERT(NULL != path);
1525     FIB_PATH_DBG(path, "destroy");
1526
1527     fib_path_unresolve(path);
1528
1529     fib_node_deinit(&path->fp_node);
1530     pool_put(fib_path_pool, path);
1531 }
1532
1533 /*
1534  * fib_path_destroy
1535  *
1536  * destroy a path that is no longer required
1537  */
1538 uword
1539 fib_path_hash (fib_node_index_t path_index)
1540 {
1541     fib_path_t *path;
1542
1543     path = fib_path_get(path_index);
1544
1545     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1546                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1547                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1548                         0));
1549 }
1550
1551 /*
1552  * fib_path_cmp_i
1553  *
1554  * Compare two paths for equivalence.
1555  */
1556 static int
1557 fib_path_cmp_i (const fib_path_t *path1,
1558                 const fib_path_t *path2)
1559 {
1560     int res;
1561
1562     res = 1;
1563
1564     /*
1565      * paths of different types and protocol are not equal.
1566      * different weights and/or preference only are the same path.
1567      */
1568     if (path1->fp_type != path2->fp_type)
1569     {
1570         res = (path1->fp_type - path2->fp_type);
1571     }
1572     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1573     {
1574         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1575     }
1576     else
1577     {
1578         /*
1579          * both paths are of the same type.
1580          * consider each type and its attributes in turn.
1581          */
1582         switch (path1->fp_type)
1583         {
1584         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1585             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1586                                    &path2->attached_next_hop.fp_nh);
1587             if (0 == res) {
1588                 res = (path1->attached_next_hop.fp_interface -
1589                        path2->attached_next_hop.fp_interface);
1590             }
1591             break;
1592         case FIB_PATH_TYPE_ATTACHED:
1593             res = (path1->attached.fp_interface -
1594                    path2->attached.fp_interface);
1595             break;
1596         case FIB_PATH_TYPE_RECURSIVE:
1597             res = ip46_address_cmp(&path1->recursive.fp_nh.fp_ip,
1598                                    &path2->recursive.fp_nh.fp_ip);
1599  
1600             if (0 == res)
1601             {
1602                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1603             }
1604             break;
1605         case FIB_PATH_TYPE_BIER_FMASK:
1606             res = (path1->bier_fmask.fp_bier_fmask -
1607                    path2->bier_fmask.fp_bier_fmask);
1608             break;
1609         case FIB_PATH_TYPE_BIER_IMP:
1610             res = (path1->bier_imp.fp_bier_imp -
1611                    path2->bier_imp.fp_bier_imp);
1612             break;
1613         case FIB_PATH_TYPE_BIER_TABLE:
1614             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1615                                     &path2->bier_table.fp_bier_tbl);
1616             break;
1617         case FIB_PATH_TYPE_DEAG:
1618             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1619             if (0 == res)
1620             {
1621                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1622             }
1623             break;
1624         case FIB_PATH_TYPE_INTF_RX:
1625             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1626             break;
1627         case FIB_PATH_TYPE_UDP_ENCAP:
1628             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1629             break;
1630         case FIB_PATH_TYPE_DVR:
1631             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1632             break;
1633         case FIB_PATH_TYPE_EXCLUSIVE:
1634             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1635             break;
1636         case FIB_PATH_TYPE_SPECIAL:
1637         case FIB_PATH_TYPE_RECEIVE:
1638             res = 0;
1639             break;
1640         }
1641     }
1642     return (res);
1643 }
1644
1645 /*
1646  * fib_path_cmp_for_sort
1647  *
1648  * Compare two paths for equivalence. Used during path sorting.
1649  * As usual 0 means equal.
1650  */
1651 int
1652 fib_path_cmp_for_sort (void * v1,
1653                        void * v2)
1654 {
1655     fib_node_index_t *pi1 = v1, *pi2 = v2;
1656     fib_path_t *path1, *path2;
1657
1658     path1 = fib_path_get(*pi1);
1659     path2 = fib_path_get(*pi2);
1660
1661     /*
1662      * when sorting paths we want the highest preference paths
1663      * first, so that the choices set built is in prefernce order
1664      */
1665     if (path1->fp_preference != path2->fp_preference)
1666     {
1667         return (path1->fp_preference - path2->fp_preference);
1668     }
1669
1670     return (fib_path_cmp_i(path1, path2));
1671 }
1672
1673 /*
1674  * fib_path_cmp
1675  *
1676  * Compare two paths for equivalence.
1677  */
1678 int
1679 fib_path_cmp (fib_node_index_t pi1,
1680               fib_node_index_t pi2)
1681 {
1682     fib_path_t *path1, *path2;
1683
1684     path1 = fib_path_get(pi1);
1685     path2 = fib_path_get(pi2);
1686
1687     return (fib_path_cmp_i(path1, path2));
1688 }
1689
1690 int
1691 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1692                            const fib_route_path_t *rpath)
1693 {
1694     fib_path_t *path;
1695     int res;
1696
1697     path = fib_path_get(path_index);
1698
1699     res = 1;
1700
1701     if (path->fp_weight != rpath->frp_weight)
1702     {
1703         res = (path->fp_weight - rpath->frp_weight);
1704     }
1705     else
1706     {
1707         /*
1708          * both paths are of the same type.
1709          * consider each type and its attributes in turn.
1710          */
1711         switch (path->fp_type)
1712         {
1713         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1714             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1715                                    &rpath->frp_addr);
1716             if (0 == res)
1717             {
1718                 res = (path->attached_next_hop.fp_interface -
1719                        rpath->frp_sw_if_index);
1720             }
1721             break;
1722         case FIB_PATH_TYPE_ATTACHED:
1723             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1724             break;
1725         case FIB_PATH_TYPE_RECURSIVE:
1726             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1727             {
1728                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1729
1730                 if (res == 0)
1731                 {
1732                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1733                 }
1734             }
1735             else
1736             {
1737                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1738                                        &rpath->frp_addr);
1739             }
1740
1741             if (0 == res)
1742             {
1743                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1744             }
1745             break;
1746         case FIB_PATH_TYPE_BIER_FMASK:
1747             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1748             break;
1749         case FIB_PATH_TYPE_BIER_IMP:
1750             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1751             break;
1752         case FIB_PATH_TYPE_BIER_TABLE:
1753             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1754                                     &rpath->frp_bier_tbl);
1755             break;
1756         case FIB_PATH_TYPE_INTF_RX:
1757             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1758             break;
1759         case FIB_PATH_TYPE_UDP_ENCAP:
1760             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1761             break;
1762         case FIB_PATH_TYPE_DEAG:
1763             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1764             if (0 == res)
1765             {
1766                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1767             }
1768             break;
1769         case FIB_PATH_TYPE_DVR:
1770             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1771             break;
1772         case FIB_PATH_TYPE_EXCLUSIVE:
1773             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1774             break;
1775         case FIB_PATH_TYPE_RECEIVE:
1776             if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1777             {
1778                 res = 0;
1779             }
1780             else
1781             {
1782                 res = 1;
1783             }
1784             break;
1785         case FIB_PATH_TYPE_SPECIAL:
1786             res = 0;
1787             break;
1788         }
1789     }
1790     return (res);
1791 }
1792
1793 /*
1794  * fib_path_recursive_loop_detect
1795  *
1796  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1797  * walk is initiated when an entry is linking to a new path list or from an old.
1798  * The entry vector passed contains all the FIB entrys that are children of this
1799  * path (it is all the entries encountered on the walk so far). If this vector
1800  * contains the entry this path resolve via, then a loop is about to form.
1801  * The loop must be allowed to form, since we need the dependencies in place
1802  * so that we can track when the loop breaks.
1803  * However, we MUST not produce a loop in the forwarding graph (else packets
1804  * would loop around the switch path until the loop breaks), so we mark recursive
1805  * paths as looped so that they do not contribute forwarding information.
1806  * By marking the path as looped, an etry such as;
1807  *    X/Y
1808  *     via a.a.a.a (looped)
1809  *     via b.b.b.b (not looped)
1810  * can still forward using the info provided by b.b.b.b only
1811  */
1812 int
1813 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1814                                 fib_node_index_t **entry_indicies)
1815 {
1816     fib_path_t *path;
1817
1818     path = fib_path_get(path_index);
1819
1820     /*
1821      * the forced drop path is never looped, cos it is never resolved.
1822      */
1823     if (fib_path_is_permanent_drop(path))
1824     {
1825         return (0);
1826     }
1827
1828     switch (path->fp_type)
1829     {
1830     case FIB_PATH_TYPE_RECURSIVE:
1831     {
1832         fib_node_index_t *entry_index, *entries;
1833         int looped = 0;
1834         entries = *entry_indicies;
1835
1836         vec_foreach(entry_index, entries) {
1837             if (*entry_index == path->fp_via_fib)
1838             {
1839                 /*
1840                  * the entry that is about to link to this path-list (or
1841                  * one of this path-list's children) is the same entry that
1842                  * this recursive path resolves through. this is a cycle.
1843                  * abort the walk.
1844                  */
1845                 looped = 1;
1846                 break;
1847             }
1848         }
1849
1850         if (looped)
1851         {
1852             FIB_PATH_DBG(path, "recursive loop formed");
1853             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1854
1855             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1856         }
1857         else
1858         {
1859             /*
1860              * no loop here yet. keep forward walking the graph.
1861              */
1862             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1863             {
1864                 FIB_PATH_DBG(path, "recursive loop formed");
1865                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1866             }
1867             else
1868             {
1869                 FIB_PATH_DBG(path, "recursive loop cleared");
1870                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1871             }
1872         }
1873         break;
1874     }
1875     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1876     case FIB_PATH_TYPE_ATTACHED:
1877         if (dpo_is_adj(&path->fp_dpo) &&
1878             adj_recursive_loop_detect(path->fp_dpo.dpoi_index,
1879                                       entry_indicies))
1880         {
1881             FIB_PATH_DBG(path, "recursive loop formed");
1882             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1883         }
1884         else
1885         {
1886             FIB_PATH_DBG(path, "recursive loop cleared");
1887             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1888         }
1889         break;
1890     case FIB_PATH_TYPE_SPECIAL:
1891     case FIB_PATH_TYPE_DEAG:
1892     case FIB_PATH_TYPE_DVR:
1893     case FIB_PATH_TYPE_RECEIVE:
1894     case FIB_PATH_TYPE_INTF_RX:
1895     case FIB_PATH_TYPE_UDP_ENCAP:
1896     case FIB_PATH_TYPE_EXCLUSIVE:
1897     case FIB_PATH_TYPE_BIER_FMASK:
1898     case FIB_PATH_TYPE_BIER_TABLE:
1899     case FIB_PATH_TYPE_BIER_IMP:
1900         /*
1901          * these path types cannot be part of a loop, since they are the leaves
1902          * of the graph.
1903          */
1904         break;
1905     }
1906
1907     return (fib_path_is_looped(path_index));
1908 }
1909
1910 int
1911 fib_path_resolve (fib_node_index_t path_index)
1912 {
1913     fib_path_t *path;
1914
1915     path = fib_path_get(path_index);
1916
1917     /*
1918      * hope for the best.
1919      */
1920     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1921
1922     /*
1923      * the forced drop path resolves via the drop adj
1924      */
1925     if (fib_path_is_permanent_drop(path))
1926     {
1927         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1928         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1929         return (fib_path_is_resolved(path_index));
1930     }
1931
1932     switch (path->fp_type)
1933     {
1934     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1935         fib_path_attached_next_hop_set(path);
1936         break;
1937     case FIB_PATH_TYPE_ATTACHED:
1938     {
1939         dpo_id_t tmp = DPO_INVALID;
1940
1941         /*
1942          * path->attached.fp_interface
1943          */
1944         if (!vnet_sw_interface_is_up(vnet_get_main(),
1945                                      path->attached.fp_interface))
1946         {
1947             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1948         }
1949         fib_path_attached_get_adj(path,
1950                                   dpo_proto_to_link(path->fp_nh_proto),
1951                                   &tmp);
1952
1953         /*
1954          * re-fetch after possible mem realloc
1955          */
1956         path = fib_path_get(path_index);
1957         dpo_copy(&path->fp_dpo, &tmp);
1958
1959         /*
1960          * become a child of the adjacency so we receive updates
1961          * when the interface state changes
1962          */
1963         if (dpo_is_adj(&path->fp_dpo))
1964         {
1965             path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1966                                              FIB_NODE_TYPE_PATH,
1967                                              fib_path_get_index(path));
1968         }
1969         dpo_reset(&tmp);
1970         break;
1971     }
1972     case FIB_PATH_TYPE_RECURSIVE:
1973     {
1974         /*
1975          * Create a RR source entry in the table for the address
1976          * that this path recurses through.
1977          * This resolve action is recursive, hence we may create
1978          * more paths in the process. more creates mean maybe realloc
1979          * of this path.
1980          */
1981         fib_node_index_t fei;
1982         fib_prefix_t pfx;
1983
1984         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1985
1986         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1987         {
1988             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1989                                        path->recursive.fp_nh.fp_eos,
1990                                        &pfx);
1991         }
1992         else
1993         {
1994             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1995         }
1996
1997         fib_table_lock(path->recursive.fp_tbl_id,
1998                        dpo_proto_to_fib(path->fp_nh_proto),
1999                        FIB_SOURCE_RR);
2000         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
2001                                           &pfx,
2002                                           FIB_SOURCE_RR,
2003                                           FIB_ENTRY_FLAG_NONE);
2004
2005         path = fib_path_get(path_index);
2006         path->fp_via_fib = fei;
2007
2008         /*
2009          * become a dependent child of the entry so the path is 
2010          * informed when the forwarding for the entry changes.
2011          */
2012         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
2013                                                FIB_NODE_TYPE_PATH,
2014                                                fib_path_get_index(path));
2015
2016         /*
2017          * create and configure the IP DPO
2018          */
2019         fib_path_recursive_adj_update(
2020             path,
2021             fib_path_to_chain_type(path),
2022             &path->fp_dpo);
2023
2024         break;
2025     }
2026     case FIB_PATH_TYPE_BIER_FMASK:
2027     {
2028         /*
2029          * become a dependent child of the entry so the path is
2030          * informed when the forwarding for the entry changes.
2031          */
2032         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
2033                                                 FIB_NODE_TYPE_PATH,
2034                                                 fib_path_get_index(path));
2035
2036         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
2037         fib_path_bier_fmask_update(path, &path->fp_dpo);
2038
2039         break;
2040     }
2041     case FIB_PATH_TYPE_BIER_IMP:
2042         bier_imp_lock(path->bier_imp.fp_bier_imp);
2043         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2044                                        DPO_PROTO_IP4,
2045                                        &path->fp_dpo);
2046         break;
2047     case FIB_PATH_TYPE_BIER_TABLE:
2048     {
2049         /*
2050          * Find/create the BIER table to link to
2051          */
2052         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
2053
2054         path->fp_via_bier_tbl =
2055             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
2056
2057         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
2058                                          &path->fp_dpo);
2059         break;
2060     }
2061     case FIB_PATH_TYPE_SPECIAL:
2062         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2063         {
2064             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2065                                       IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
2066                                       &path->fp_dpo);
2067         }
2068         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2069         {
2070             ip_null_dpo_add_and_lock (path->fp_nh_proto,
2071                                       IP_NULL_ACTION_SEND_ICMP_UNREACH,
2072                                       &path->fp_dpo);
2073         }
2074         else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_CLASSIFY)
2075         {
2076             dpo_set (&path->fp_dpo, DPO_CLASSIFY,
2077                      path->fp_nh_proto,
2078                      classify_dpo_create (path->fp_nh_proto,
2079                                           path->classify.fp_classify_table_id));
2080         }
2081         else
2082         {
2083             /*
2084              * Resolve via the drop
2085              */
2086             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
2087         }
2088         break;
2089     case FIB_PATH_TYPE_DEAG:
2090     {
2091         if (DPO_PROTO_BIER == path->fp_nh_proto)
2092         {
2093             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
2094                                                   &path->fp_dpo);
2095         }
2096         else
2097         {
2098             /*
2099              * Resolve via a lookup DPO.
2100              * FIXME. control plane should add routes with a table ID
2101              */
2102             lookup_input_t input;
2103             lookup_cast_t cast;
2104
2105             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
2106                     LOOKUP_MULTICAST :
2107                     LOOKUP_UNICAST);
2108             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2109                      LOOKUP_INPUT_SRC_ADDR :
2110                      LOOKUP_INPUT_DST_ADDR);
2111
2112             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2113                                                path->fp_nh_proto,
2114                                                cast,
2115                                                input,
2116                                                LOOKUP_TABLE_FROM_CONFIG,
2117                                                &path->fp_dpo);
2118         }
2119         break;
2120     }
2121     case FIB_PATH_TYPE_DVR:
2122         dvr_dpo_add_or_lock(path->attached.fp_interface,
2123                             path->fp_nh_proto,
2124                             &path->fp_dpo);
2125         break;
2126     case FIB_PATH_TYPE_RECEIVE:
2127         /*
2128          * Resolve via a receive DPO.
2129          */
2130         receive_dpo_add_or_lock(path->fp_nh_proto,
2131                                 path->receive.fp_interface,
2132                                 &path->receive.fp_addr,
2133                                 &path->fp_dpo);
2134         break;
2135     case FIB_PATH_TYPE_UDP_ENCAP:
2136         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2137         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2138                                         path->fp_nh_proto,
2139                                         &path->fp_dpo);
2140         break;
2141     case FIB_PATH_TYPE_INTF_RX: {
2142         /*
2143          * Resolve via a receive DPO.
2144          */
2145         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2146                                      path->intf_rx.fp_interface,
2147                                      &path->fp_dpo);
2148         break;
2149     }
2150     case FIB_PATH_TYPE_EXCLUSIVE:
2151         /*
2152          * Resolve via the user provided DPO
2153          */
2154         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2155         break;
2156     }
2157
2158     return (fib_path_is_resolved(path_index));
2159 }
2160
2161 u32
2162 fib_path_get_resolving_interface (fib_node_index_t path_index)
2163 {
2164     fib_path_t *path;
2165
2166     path = fib_path_get(path_index);
2167
2168     switch (path->fp_type)
2169     {
2170     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2171         return (path->attached_next_hop.fp_interface);
2172     case FIB_PATH_TYPE_ATTACHED:
2173         return (path->attached.fp_interface);
2174     case FIB_PATH_TYPE_RECEIVE:
2175         return (path->receive.fp_interface);
2176     case FIB_PATH_TYPE_RECURSIVE:
2177         if (fib_path_is_resolved(path_index))
2178         {
2179             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2180         }
2181         break;
2182     case FIB_PATH_TYPE_DVR:
2183         return (path->dvr.fp_interface);
2184     case FIB_PATH_TYPE_INTF_RX:
2185     case FIB_PATH_TYPE_UDP_ENCAP:
2186     case FIB_PATH_TYPE_SPECIAL:
2187     case FIB_PATH_TYPE_DEAG:
2188     case FIB_PATH_TYPE_EXCLUSIVE:
2189     case FIB_PATH_TYPE_BIER_FMASK:
2190     case FIB_PATH_TYPE_BIER_TABLE:
2191     case FIB_PATH_TYPE_BIER_IMP:
2192         break;
2193     }
2194     return (dpo_get_urpf(&path->fp_dpo));
2195 }
2196
2197 index_t
2198 fib_path_get_resolving_index (fib_node_index_t path_index)
2199 {
2200     fib_path_t *path;
2201
2202     path = fib_path_get(path_index);
2203
2204     switch (path->fp_type)
2205     {
2206     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2207     case FIB_PATH_TYPE_ATTACHED:
2208     case FIB_PATH_TYPE_RECEIVE:
2209     case FIB_PATH_TYPE_INTF_RX:
2210     case FIB_PATH_TYPE_SPECIAL:
2211     case FIB_PATH_TYPE_DEAG:
2212     case FIB_PATH_TYPE_DVR:
2213     case FIB_PATH_TYPE_EXCLUSIVE:
2214         break;
2215     case FIB_PATH_TYPE_UDP_ENCAP:
2216         return (path->udp_encap.fp_udp_encap_id);
2217     case FIB_PATH_TYPE_RECURSIVE:
2218         return (path->fp_via_fib);
2219     case FIB_PATH_TYPE_BIER_FMASK:
2220         return (path->bier_fmask.fp_bier_fmask);
2221    case FIB_PATH_TYPE_BIER_TABLE:
2222        return (path->fp_via_bier_tbl);
2223    case FIB_PATH_TYPE_BIER_IMP:
2224        return (path->bier_imp.fp_bier_imp);
2225     }
2226     return (~0);
2227 }
2228
2229 adj_index_t
2230 fib_path_get_adj (fib_node_index_t path_index)
2231 {
2232     fib_path_t *path;
2233
2234     path = fib_path_get(path_index);
2235
2236     if (dpo_is_adj(&path->fp_dpo))
2237     {
2238         return (path->fp_dpo.dpoi_index);
2239     }
2240     return (ADJ_INDEX_INVALID);
2241 }
2242
2243 u16
2244 fib_path_get_weight (fib_node_index_t path_index)
2245 {
2246     fib_path_t *path;
2247
2248     path = fib_path_get(path_index);
2249
2250     ASSERT(path);
2251
2252     return (path->fp_weight);
2253 }
2254
2255 u16
2256 fib_path_get_preference (fib_node_index_t path_index)
2257 {
2258     fib_path_t *path;
2259
2260     path = fib_path_get(path_index);
2261
2262     ASSERT(path);
2263
2264     return (path->fp_preference);
2265 }
2266
2267 u32
2268 fib_path_get_rpf_id (fib_node_index_t path_index)
2269 {
2270     fib_path_t *path;
2271
2272     path = fib_path_get(path_index);
2273
2274     ASSERT(path);
2275
2276     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2277     {
2278         return (path->deag.fp_rpf_id);
2279     }
2280
2281     return (~0);
2282 }
2283
2284 /**
2285  * @brief Contribute the path's adjacency to the list passed.
2286  * By calling this function over all paths, recursively, a child
2287  * can construct its full set of forwarding adjacencies, and hence its
2288  * uRPF list.
2289  */
2290 void
2291 fib_path_contribute_urpf (fib_node_index_t path_index,
2292                           index_t urpf)
2293 {
2294     fib_path_t *path;
2295
2296     path = fib_path_get(path_index);
2297
2298     /*
2299      * resolved and unresolved paths contribute to the RPF list.
2300      */
2301     switch (path->fp_type)
2302     {
2303     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2304         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2305         break;
2306
2307     case FIB_PATH_TYPE_ATTACHED:
2308         fib_urpf_list_append(urpf, path->attached.fp_interface);
2309         break;
2310
2311     case FIB_PATH_TYPE_RECURSIVE:
2312         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2313             !fib_path_is_looped(path_index))
2314         {
2315             /*
2316              * there's unresolved due to constraints, and there's unresolved
2317              * due to ain't got no via. can't do nowt w'out via.
2318              */
2319             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2320         }
2321         break;
2322
2323     case FIB_PATH_TYPE_EXCLUSIVE:
2324     case FIB_PATH_TYPE_SPECIAL:
2325     {
2326         /*
2327          * these path types may link to an adj, if that's what
2328          * the clinet gave
2329          */
2330         u32 rpf_sw_if_index;
2331
2332         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2333
2334         if (~0 != rpf_sw_if_index)
2335         {
2336             fib_urpf_list_append(urpf, rpf_sw_if_index);
2337         }
2338         break;
2339     }
2340     case FIB_PATH_TYPE_DVR:
2341         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2342         break;
2343     case FIB_PATH_TYPE_DEAG:
2344     case FIB_PATH_TYPE_RECEIVE:
2345     case FIB_PATH_TYPE_INTF_RX:
2346     case FIB_PATH_TYPE_UDP_ENCAP:
2347     case FIB_PATH_TYPE_BIER_FMASK:
2348     case FIB_PATH_TYPE_BIER_TABLE:
2349     case FIB_PATH_TYPE_BIER_IMP:
2350         /*
2351          * these path types don't link to an adj
2352          */
2353         break;
2354     }
2355 }
2356
2357 void
2358 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2359                           dpo_proto_t payload_proto,
2360                           fib_mpls_lsp_mode_t mode,
2361                           dpo_id_t *dpo)
2362 {
2363     fib_path_t *path;
2364
2365     path = fib_path_get(path_index);
2366
2367     ASSERT(path);
2368
2369     switch (path->fp_type)
2370     {
2371     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2372     {
2373         dpo_id_t tmp = DPO_INVALID;
2374
2375         dpo_copy(&tmp, dpo);
2376
2377         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2378         dpo_reset(&tmp);
2379         break;
2380     }                
2381     case FIB_PATH_TYPE_DEAG:
2382     {
2383         dpo_id_t tmp = DPO_INVALID;
2384
2385         dpo_copy(&tmp, dpo);
2386
2387         mpls_disp_dpo_create(payload_proto,
2388                              path->deag.fp_rpf_id,
2389                              mode, &tmp, dpo);
2390         dpo_reset(&tmp);
2391         break;
2392     }
2393     case FIB_PATH_TYPE_RECEIVE:
2394     case FIB_PATH_TYPE_ATTACHED:
2395     case FIB_PATH_TYPE_RECURSIVE:
2396     case FIB_PATH_TYPE_INTF_RX:
2397     case FIB_PATH_TYPE_UDP_ENCAP:
2398     case FIB_PATH_TYPE_EXCLUSIVE:
2399     case FIB_PATH_TYPE_SPECIAL:
2400     case FIB_PATH_TYPE_BIER_FMASK:
2401     case FIB_PATH_TYPE_BIER_TABLE:
2402     case FIB_PATH_TYPE_BIER_IMP:
2403     case FIB_PATH_TYPE_DVR:
2404         break;
2405     }
2406
2407     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_POP_PW_CW)
2408     {
2409         dpo_id_t tmp = DPO_INVALID;
2410
2411         dpo_copy(&tmp, dpo);
2412
2413         pw_cw_dpo_create(&tmp, dpo);
2414         dpo_reset(&tmp);
2415     }
2416 }
2417
2418 void
2419 fib_path_contribute_forwarding (fib_node_index_t path_index,
2420                                 fib_forward_chain_type_t fct,
2421                                 dpo_id_t *dpo)
2422 {
2423     fib_path_t *path;
2424
2425     path = fib_path_get(path_index);
2426
2427     ASSERT(path);
2428     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2429
2430     /*
2431      * The DPO stored in the path was created when the path was resolved.
2432      * This then represents the path's 'native' protocol; IP.
2433      * For all others will need to go find something else.
2434      */
2435     if (fib_path_to_chain_type(path) == fct)
2436     {
2437         dpo_copy(dpo, &path->fp_dpo);
2438     }
2439     else
2440     {
2441         switch (path->fp_type)
2442         {
2443         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2444             switch (fct)
2445             {
2446             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2447             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2448             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2449             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2450             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2451             case FIB_FORW_CHAIN_TYPE_NSH:
2452             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2453             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2454                 path = fib_path_attached_next_hop_get_adj(
2455                     path,
2456                     fib_forw_chain_type_to_link_type(fct),
2457                     dpo);
2458                 break;
2459             case FIB_FORW_CHAIN_TYPE_BIER:
2460                 break;
2461             }
2462             break;
2463         case FIB_PATH_TYPE_RECURSIVE:
2464             switch (fct)
2465             {
2466             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2467             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2468             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2469             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2470             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2471             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2472             case FIB_FORW_CHAIN_TYPE_BIER:
2473                 fib_path_recursive_adj_update(path, fct, dpo);
2474                 break;
2475             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2476             case FIB_FORW_CHAIN_TYPE_NSH:
2477                 ASSERT(0);
2478                 break;
2479             }
2480             break;
2481         case FIB_PATH_TYPE_BIER_TABLE:
2482             switch (fct)
2483             {
2484             case FIB_FORW_CHAIN_TYPE_BIER:
2485                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2486                 break;
2487             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2488             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2489             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2490             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2491             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2492             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2493             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2494             case FIB_FORW_CHAIN_TYPE_NSH:
2495                 ASSERT(0);
2496                 break;
2497             }
2498             break;
2499         case FIB_PATH_TYPE_BIER_FMASK:
2500             switch (fct)
2501             {
2502             case FIB_FORW_CHAIN_TYPE_BIER:
2503                 fib_path_bier_fmask_update(path, dpo);
2504                 break;
2505             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2506             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2507             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2508             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2509             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2510             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2511             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2512             case FIB_FORW_CHAIN_TYPE_NSH:
2513                 ASSERT(0);
2514                 break;
2515             }
2516             break;
2517         case FIB_PATH_TYPE_BIER_IMP:
2518             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2519                                            fib_forw_chain_type_to_dpo_proto(fct),
2520                                            dpo);
2521             break;
2522         case FIB_PATH_TYPE_DEAG:
2523             switch (fct)
2524             {
2525             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2526                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2527                                                   DPO_PROTO_MPLS,
2528                                                   LOOKUP_UNICAST,
2529                                                   LOOKUP_INPUT_DST_ADDR,
2530                                                   LOOKUP_TABLE_FROM_CONFIG,
2531                                                   dpo);
2532                 break;
2533             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2534             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2535             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2536             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2537             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2538                 dpo_copy(dpo, &path->fp_dpo);
2539                 break;
2540             case FIB_FORW_CHAIN_TYPE_BIER:
2541                 break;
2542             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2543             case FIB_FORW_CHAIN_TYPE_NSH:
2544                 ASSERT(0);
2545                 break;
2546             }
2547             break;
2548         case FIB_PATH_TYPE_EXCLUSIVE:
2549             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2550             break;
2551         case FIB_PATH_TYPE_ATTACHED:
2552             switch (fct)
2553             {
2554             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2555             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2556             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2557             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2558             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2559             case FIB_FORW_CHAIN_TYPE_NSH:
2560             case FIB_FORW_CHAIN_TYPE_BIER:
2561                 fib_path_attached_get_adj(path,
2562                                           fib_forw_chain_type_to_link_type(fct),
2563                                           dpo);
2564                 break;
2565             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2566             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2567                 {
2568                     adj_index_t ai;
2569
2570                     /*
2571                      * Create the adj needed for sending IP multicast traffic
2572                      */
2573                     if (vnet_sw_interface_is_p2p(vnet_get_main(),
2574                                                  path->attached.fp_interface))
2575                     {
2576                         /*
2577                          * point-2-point interfaces do not require a glean, since
2578                          * there is nothing to ARP. Install a rewrite/nbr adj instead
2579                          */
2580                         ai = adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2581                                                  fib_forw_chain_type_to_link_type(fct),
2582                                                  &zero_addr,
2583                                                  path->attached.fp_interface);
2584                     }
2585                     else
2586                     {
2587                         ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2588                                                    fib_forw_chain_type_to_link_type(fct),
2589                                                    path->attached.fp_interface);
2590                     }
2591                     dpo_set(dpo, DPO_ADJACENCY,
2592                             fib_forw_chain_type_to_dpo_proto(fct),
2593                             ai);
2594                     adj_unlock(ai);
2595                 }
2596                 break;
2597             }
2598             break;
2599         case FIB_PATH_TYPE_INTF_RX:
2600             /*
2601              * Create the adj needed for sending IP multicast traffic
2602              */
2603             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2604                                          path->attached.fp_interface,
2605                                          dpo);
2606             break;
2607         case FIB_PATH_TYPE_UDP_ENCAP:
2608             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2609                                             path->fp_nh_proto,
2610                                             dpo);
2611             break;
2612         case FIB_PATH_TYPE_RECEIVE:
2613         case FIB_PATH_TYPE_SPECIAL:
2614         case FIB_PATH_TYPE_DVR:
2615             dpo_copy(dpo, &path->fp_dpo);
2616             break;
2617         }
2618     }
2619 }
2620
2621 load_balance_path_t *
2622 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2623                                        fib_forward_chain_type_t fct,
2624                                        load_balance_path_t *hash_key)
2625 {
2626     load_balance_path_t *mnh;
2627     fib_path_t *path;
2628
2629     path = fib_path_get(path_index);
2630
2631     ASSERT(path);
2632
2633     vec_add2(hash_key, mnh, 1);
2634
2635     mnh->path_weight = path->fp_weight;
2636     mnh->path_index = path_index;
2637
2638     if (fib_path_is_resolved(path_index))
2639     {
2640         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2641     }
2642     else
2643     {
2644         dpo_copy(&mnh->path_dpo,
2645                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2646     }
2647     return (hash_key);
2648 }
2649
2650 int
2651 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2652 {
2653     fib_path_t *path;
2654
2655     path = fib_path_get(path_index);
2656
2657     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2658             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2659              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2660 }
2661
2662 int
2663 fib_path_is_exclusive (fib_node_index_t path_index)
2664 {
2665     fib_path_t *path;
2666
2667     path = fib_path_get(path_index);
2668
2669     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2670 }
2671
2672 int
2673 fib_path_is_deag (fib_node_index_t path_index)
2674 {
2675     fib_path_t *path;
2676
2677     path = fib_path_get(path_index);
2678
2679     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2680 }
2681
2682 int
2683 fib_path_is_resolved (fib_node_index_t path_index)
2684 {
2685     fib_path_t *path;
2686
2687     path = fib_path_get(path_index);
2688
2689     return (dpo_id_is_valid(&path->fp_dpo) &&
2690             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2691             !fib_path_is_looped(path_index) &&
2692             !fib_path_is_permanent_drop(path));
2693 }
2694
2695 int
2696 fib_path_is_looped (fib_node_index_t path_index)
2697 {
2698     fib_path_t *path;
2699
2700     path = fib_path_get(path_index);
2701
2702     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2703 }
2704
2705 fib_path_list_walk_rc_t
2706 fib_path_encode (fib_node_index_t path_list_index,
2707                  fib_node_index_t path_index,
2708                  const fib_path_ext_t *path_ext,
2709                  void *args)
2710 {
2711     fib_path_encode_ctx_t *ctx = args;
2712     fib_route_path_t *rpath;
2713     fib_path_t *path;
2714
2715     path = fib_path_get(path_index);
2716     if (!path)
2717       return (FIB_PATH_LIST_WALK_CONTINUE);
2718
2719     vec_add2(ctx->rpaths, rpath, 1);
2720     rpath->frp_weight = path->fp_weight;
2721     rpath->frp_preference = path->fp_preference;
2722     rpath->frp_proto = path->fp_nh_proto;
2723     rpath->frp_sw_if_index = ~0;
2724     rpath->frp_fib_index = 0;
2725
2726     switch (path->fp_type)
2727     {
2728       case FIB_PATH_TYPE_RECEIVE:
2729         rpath->frp_addr = path->receive.fp_addr;
2730         rpath->frp_sw_if_index = path->receive.fp_interface;
2731         rpath->frp_flags |= FIB_ROUTE_PATH_LOCAL;
2732         break;
2733       case FIB_PATH_TYPE_ATTACHED:
2734         rpath->frp_sw_if_index = path->attached.fp_interface;
2735         break;
2736       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2737         rpath->frp_sw_if_index = path->attached_next_hop.fp_interface;
2738         rpath->frp_addr = path->attached_next_hop.fp_nh;
2739         break;
2740       case FIB_PATH_TYPE_BIER_FMASK:
2741         rpath->frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2742         break;
2743       case FIB_PATH_TYPE_SPECIAL:
2744         break;
2745       case FIB_PATH_TYPE_DEAG:
2746         rpath->frp_fib_index = path->deag.fp_tbl_id;
2747         if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
2748         {
2749             rpath->frp_flags |= FIB_ROUTE_PATH_RPF_ID;
2750         }
2751         break;
2752       case FIB_PATH_TYPE_RECURSIVE:
2753         rpath->frp_addr = path->recursive.fp_nh.fp_ip;
2754         rpath->frp_fib_index = path->recursive.fp_tbl_id;
2755         break;
2756       case FIB_PATH_TYPE_DVR:
2757           rpath->frp_sw_if_index = path->dvr.fp_interface;
2758           rpath->frp_flags |= FIB_ROUTE_PATH_DVR;
2759           break;
2760       case FIB_PATH_TYPE_UDP_ENCAP:
2761           rpath->frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2762           rpath->frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2763           break;
2764       case FIB_PATH_TYPE_INTF_RX:
2765           rpath->frp_sw_if_index = path->receive.fp_interface;
2766           rpath->frp_flags |= FIB_ROUTE_PATH_INTF_RX;
2767           break;
2768       case FIB_PATH_TYPE_EXCLUSIVE:
2769         rpath->frp_flags |= FIB_ROUTE_PATH_EXCLUSIVE;
2770       default:
2771         break;
2772     }
2773
2774     if (path_ext && path_ext->fpe_type == FIB_PATH_EXT_MPLS) 
2775     {
2776         rpath->frp_label_stack = path_ext->fpe_path.frp_label_stack;
2777     }
2778
2779     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP)
2780         rpath->frp_flags |= FIB_ROUTE_PATH_DROP;
2781     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_UNREACH)
2782         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_UNREACH;
2783     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_ICMP_PROHIBIT)
2784         rpath->frp_flags |= FIB_ROUTE_PATH_ICMP_PROHIBIT;
2785
2786     return (FIB_PATH_LIST_WALK_CONTINUE);
2787 }
2788
2789 dpo_proto_t
2790 fib_path_get_proto (fib_node_index_t path_index)
2791 {
2792     fib_path_t *path;
2793
2794     path = fib_path_get(path_index);
2795
2796     return (path->fp_nh_proto);
2797 }
2798
2799 void
2800 fib_path_module_init (void)
2801 {
2802     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2803     fib_path_logger = vlib_log_register_class ("fib", "path");
2804 }
2805
2806 static clib_error_t *
2807 show_fib_path_command (vlib_main_t * vm,
2808                         unformat_input_t * input,
2809                         vlib_cli_command_t * cmd)
2810 {
2811     fib_node_index_t pi;
2812     fib_path_t *path;
2813
2814     if (unformat (input, "%d", &pi))
2815     {
2816         /*
2817          * show one in detail
2818          */
2819         if (!pool_is_free_index(fib_path_pool, pi))
2820         {
2821             path = fib_path_get(pi);
2822             u8 *s = format(NULL, "%U", format_fib_path, pi, 1,
2823                            FIB_PATH_FORMAT_FLAGS_NONE);
2824             s = format(s, "\n  children:");
2825             s = fib_node_children_format(path->fp_node.fn_children, s);
2826             vlib_cli_output (vm, "%v", s);
2827             vec_free(s);
2828         }
2829         else
2830         {
2831             vlib_cli_output (vm, "path %d invalid", pi);
2832         }
2833     }
2834     else
2835     {
2836         vlib_cli_output (vm, "FIB Paths");
2837         pool_foreach_index (pi, fib_path_pool,
2838         ({
2839             vlib_cli_output (vm, "%U", format_fib_path, pi, 0,
2840                              FIB_PATH_FORMAT_FLAGS_NONE);
2841         }));
2842     }
2843
2844     return (NULL);
2845 }
2846
2847 VLIB_CLI_COMMAND (show_fib_path, static) = {
2848   .path = "show fib paths",
2849   .function = show_fib_path_command,
2850   .short_help = "show fib paths",
2851 };