Fix fib_path encoding (VPP-921)
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26
27 #include <vnet/adj/adj.h>
28 #include <vnet/adj/adj_mcast.h>
29
30 #include <vnet/fib/fib_path.h>
31 #include <vnet/fib/fib_node.h>
32 #include <vnet/fib/fib_table.h>
33 #include <vnet/fib/fib_entry.h>
34 #include <vnet/fib/fib_path_list.h>
35 #include <vnet/fib/fib_internal.h>
36 #include <vnet/fib/fib_urpf_list.h>
37 #include <vnet/fib/mpls_fib.h>
38
39 /**
40  * Enurmeration of path types
41  */
42 typedef enum fib_path_type_t_ {
43     /**
44      * Marker. Add new types after this one.
45      */
46     FIB_PATH_TYPE_FIRST = 0,
47     /**
48      * Attached-nexthop. An interface and a nexthop are known.
49      */
50     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
51     /**
52      * attached. Only the interface is known.
53      */
54     FIB_PATH_TYPE_ATTACHED,
55     /**
56      * recursive. Only the next-hop is known.
57      */
58     FIB_PATH_TYPE_RECURSIVE,
59     /**
60      * special. nothing is known. so we drop.
61      */
62     FIB_PATH_TYPE_SPECIAL,
63     /**
64      * exclusive. user provided adj.
65      */
66     FIB_PATH_TYPE_EXCLUSIVE,
67     /**
68      * deag. Link to a lookup adj in the next table
69      */
70     FIB_PATH_TYPE_DEAG,
71     /**
72      * interface receive.
73      */
74     FIB_PATH_TYPE_INTF_RX,
75     /**
76      * receive. it's for-us.
77      */
78     FIB_PATH_TYPE_RECEIVE,
79     /**
80      * Marker. Add new types before this one, then update it.
81      */
82     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_RECEIVE,
83 } __attribute__ ((packed)) fib_path_type_t;
84
85 /**
86  * The maximum number of path_types
87  */
88 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
89
90 #define FIB_PATH_TYPES {                                        \
91     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
92     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
93     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
94     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
95     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
96     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
97     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
98     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
99 }
100
101 #define FOR_EACH_FIB_PATH_TYPE(_item) \
102     for (_item = FIB_PATH_TYPE_FIRST; _item <= FIB_PATH_TYPE_LAST; _item++)
103
104 /**
105  * Enurmeration of path operational (i.e. derived) attributes
106  */
107 typedef enum fib_path_oper_attribute_t_ {
108     /**
109      * Marker. Add new types after this one.
110      */
111     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
112     /**
113      * The path forms part of a recursive loop.
114      */
115     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
116     /**
117      * The path is resolved
118      */
119     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
120     /**
121      * The path is attached, despite what the next-hop may say.
122      */
123     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
124     /**
125      * The path has become a permanent drop.
126      */
127     FIB_PATH_OPER_ATTRIBUTE_DROP,
128     /**
129      * Marker. Add new types before this one, then update it.
130      */
131     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
132 } __attribute__ ((packed)) fib_path_oper_attribute_t;
133
134 /**
135  * The maximum number of path operational attributes
136  */
137 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
138
139 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
140     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
141     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
142     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
143 }
144
145 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
146     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
147          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
148          _item++)
149
150 /**
151  * Path flags from the attributes
152  */
153 typedef enum fib_path_oper_flags_t_ {
154     FIB_PATH_OPER_FLAG_NONE = 0,
155     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
156     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
157     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
158     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
159 } __attribute__ ((packed)) fib_path_oper_flags_t;
160
161 /**
162  * A FIB path
163  */
164 typedef struct fib_path_t_ {
165     /**
166      * A path is a node in the FIB graph.
167      */
168     fib_node_t fp_node;
169
170     /**
171      * The index of the path-list to which this path belongs
172      */
173     u32 fp_pl_index;
174
175     /**
176      * This marks the start of the memory area used to hash
177      * the path
178      */
179     STRUCT_MARK(path_hash_start);
180
181     /**
182      * Configuration Flags
183      */
184     fib_path_cfg_flags_t fp_cfg_flags;
185
186     /**
187      * The type of the path. This is the selector for the union
188      */
189     fib_path_type_t fp_type;
190
191     /**
192      * The protocol of the next-hop, i.e. the address family of the
193      * next-hop's address. We can't derive this from the address itself
194      * since the address can be all zeros
195      */
196     fib_protocol_t fp_nh_proto;
197
198     /**
199      * UCMP [unnormalised] weigth
200      */
201     u16 fp_weight;
202     /**
203      * A path preference. 0 is the best.
204      * Only paths of the best preference, that are 'up', are considered
205      * for forwarding.
206      */
207     u16 fp_preference;
208
209     /**
210      * per-type union of the data required to resolve the path
211      */
212     union {
213         struct {
214             /**
215              * The next-hop
216              */
217             ip46_address_t fp_nh;
218             /**
219              * The interface
220              */
221             u32 fp_interface;
222         } attached_next_hop;
223         struct {
224             /**
225              * The interface
226              */
227             u32 fp_interface;
228         } attached;
229         struct {
230             union
231             {
232                 /**
233                  * The next-hop
234                  */
235                 ip46_address_t fp_ip;
236                 struct {
237                     /**
238                      * The local label to resolve through.
239                      */
240                     mpls_label_t fp_local_label;
241                     /**
242                      * The EOS bit of the resolving label
243                      */
244                     mpls_eos_bit_t fp_eos;
245                 };
246             } fp_nh;
247             /**
248              * The FIB table index in which to find the next-hop.
249              */
250             fib_node_index_t fp_tbl_id;
251         } recursive;
252         struct {
253             /**
254              * The FIB index in which to perfom the next lookup
255              */
256             fib_node_index_t fp_tbl_id;
257             /**
258              * The RPF-ID to tag the packets with
259              */
260             fib_rpf_id_t fp_rpf_id;
261         } deag;
262         struct {
263         } special;
264         struct {
265             /**
266              * The user provided 'exclusive' DPO
267              */
268             dpo_id_t fp_ex_dpo;
269         } exclusive;
270         struct {
271             /**
272              * The interface on which the local address is configured
273              */
274             u32 fp_interface;
275             /**
276              * The next-hop
277              */
278             ip46_address_t fp_addr;
279         } receive;
280         struct {
281             /**
282              * The interface on which the packets will be input.
283              */
284             u32 fp_interface;
285         } intf_rx;
286     };
287     STRUCT_MARK(path_hash_end);
288
289     /**
290      * Memebers in this last section represent information that is
291      * dervied during resolution. It should not be copied to new paths
292      * nor compared.
293      */
294
295     /**
296      * Operational Flags
297      */
298     fib_path_oper_flags_t fp_oper_flags;
299
300     /**
301      * the resolving via fib. not part of the union, since it it not part
302      * of the path's hash.
303      */
304     fib_node_index_t fp_via_fib;
305
306     /**
307      * The Data-path objects through which this path resolves for IP.
308      */
309     dpo_id_t fp_dpo;
310
311     /**
312      * the index of this path in the parent's child list.
313      */
314     u32 fp_sibling;
315 } fib_path_t;
316
317 /*
318  * Array of strings/names for the path types and attributes
319  */
320 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
321 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
322 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
323
324 /*
325  * The memory pool from which we allocate all the paths
326  */
327 static fib_path_t *fib_path_pool;
328
329 /*
330  * Debug macro
331  */
332 #ifdef FIB_DEBUG
333 #define FIB_PATH_DBG(_p, _fmt, _args...)                        \
334 {                                                               \
335     u8 *_tmp = NULL;                                            \
336     _tmp = fib_path_format(fib_path_get_index(_p), _tmp);       \
337     clib_warning("path:[%d:%s]:" _fmt,                          \
338                  fib_path_get_index(_p), _tmp,                  \
339                  ##_args);                                      \
340     vec_free(_tmp);                                             \
341 }
342 #else
343 #define FIB_PATH_DBG(_p, _fmt, _args...)
344 #endif
345
346 static fib_path_t *
347 fib_path_get (fib_node_index_t index)
348 {
349     return (pool_elt_at_index(fib_path_pool, index));
350 }
351
352 static fib_node_index_t 
353 fib_path_get_index (fib_path_t *path)
354 {
355     return (path - fib_path_pool);
356 }
357
358 static fib_node_t *
359 fib_path_get_node (fib_node_index_t index)
360 {
361     return ((fib_node_t*)fib_path_get(index));
362 }
363
364 static fib_path_t*
365 fib_path_from_fib_node (fib_node_t *node)
366 {
367 #if CLIB_DEBUG > 0
368     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
369 #endif
370     return ((fib_path_t*)node);
371 }
372
373 u8 *
374 format_fib_path (u8 * s, va_list * args)
375 {
376     fib_path_t *path = va_arg (*args, fib_path_t *);
377     vnet_main_t * vnm = vnet_get_main();
378     fib_path_oper_attribute_t oattr;
379     fib_path_cfg_attribute_t cattr;
380
381     s = format (s, "      index:%d ", fib_path_get_index(path));
382     s = format (s, "pl-index:%d ", path->fp_pl_index);
383     s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto);
384     s = format (s, "weight=%d ", path->fp_weight);
385     s = format (s, "pref=%d ", path->fp_preference);
386     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
387     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
388         s = format(s, " oper-flags:");
389         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
390             if ((1<<oattr) & path->fp_oper_flags) {
391                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
392             }
393         }
394     }
395     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
396         s = format(s, " cfg-flags:");
397         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
398             if ((1<<cattr) & path->fp_cfg_flags) {
399                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
400             }
401         }
402     }
403     s = format(s, "\n       ");
404
405     switch (path->fp_type)
406     {
407     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
408         s = format (s, "%U", format_ip46_address,
409                     &path->attached_next_hop.fp_nh,
410                     IP46_TYPE_ANY);
411         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
412         {
413             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
414         }
415         else
416         {
417             s = format (s, " %U",
418                         format_vnet_sw_interface_name,
419                         vnm,
420                         vnet_get_sw_interface(
421                             vnm,
422                             path->attached_next_hop.fp_interface));
423             if (vnet_sw_interface_is_p2p(vnet_get_main(),
424                                          path->attached_next_hop.fp_interface))
425             {
426                 s = format (s, " (p2p)");
427             }
428         }
429         if (!dpo_id_is_valid(&path->fp_dpo))
430         {
431             s = format(s, "\n          unresolved");
432         }
433         else
434         {
435             s = format(s, "\n          %U",
436                        format_dpo_id,
437                        &path->fp_dpo, 13);
438         }
439         break;
440     case FIB_PATH_TYPE_ATTACHED:
441         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
442         {
443             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
444         }
445         else
446         {
447             s = format (s, " %U",
448                         format_vnet_sw_interface_name,
449                         vnm,
450                         vnet_get_sw_interface(
451                             vnm,
452                             path->attached.fp_interface));
453         }
454         break;
455     case FIB_PATH_TYPE_RECURSIVE:
456         if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
457         {
458             s = format (s, "via %U %U",
459                         format_mpls_unicast_label,
460                         path->recursive.fp_nh.fp_local_label,
461                         format_mpls_eos_bit,
462                         path->recursive.fp_nh.fp_eos);
463         }
464         else
465         {
466             s = format (s, "via %U",
467                         format_ip46_address,
468                         &path->recursive.fp_nh.fp_ip,
469                         IP46_TYPE_ANY);
470         }
471         s = format (s, " in fib:%d",
472                     path->recursive.fp_tbl_id,
473                     path->fp_via_fib); 
474         s = format (s, " via-fib:%d", path->fp_via_fib); 
475         s = format (s, " via-dpo:[%U:%d]",
476                     format_dpo_type, path->fp_dpo.dpoi_type, 
477                     path->fp_dpo.dpoi_index);
478
479         break;
480     case FIB_PATH_TYPE_RECEIVE:
481     case FIB_PATH_TYPE_INTF_RX:
482     case FIB_PATH_TYPE_SPECIAL:
483     case FIB_PATH_TYPE_DEAG:
484     case FIB_PATH_TYPE_EXCLUSIVE:
485         if (dpo_id_is_valid(&path->fp_dpo))
486         {
487             s = format(s, "%U", format_dpo_id,
488                        &path->fp_dpo, 2);
489         }
490         break;
491     }
492     return (s);
493 }
494
495 u8 *
496 fib_path_format (fib_node_index_t pi, u8 *s)
497 {
498     fib_path_t *path;
499
500     path = fib_path_get(pi);
501     ASSERT(NULL != path);
502
503     return (format (s, "%U", format_fib_path, path));
504 }
505
506 u8 *
507 fib_path_adj_format (fib_node_index_t pi,
508                      u32 indent,
509                      u8 *s)
510 {
511     fib_path_t *path;
512
513     path = fib_path_get(pi);
514     ASSERT(NULL != path);
515
516     if (!dpo_id_is_valid(&path->fp_dpo))
517     {
518         s = format(s, " unresolved");
519     }
520     else
521     {
522         s = format(s, "%U", format_dpo_id,
523                    &path->fp_dpo, 2);
524     }
525
526     return (s);
527 }
528
529 /*
530  * fib_path_last_lock_gone
531  *
532  * We don't share paths, we share path lists, so the [un]lock functions
533  * are no-ops
534  */
535 static void
536 fib_path_last_lock_gone (fib_node_t *node)
537 {
538     ASSERT(0);
539 }
540
541 static const adj_index_t
542 fib_path_attached_next_hop_get_adj (fib_path_t *path,
543                                     vnet_link_t link)
544 {
545     if (vnet_sw_interface_is_p2p(vnet_get_main(),
546                                  path->attached_next_hop.fp_interface))
547     {
548         /*
549          * if the interface is p2p then the adj for the specific
550          * neighbour on that link will never exist. on p2p links
551          * the subnet address (the attached route) links to the
552          * auto-adj (see below), we want that adj here too.
553          */
554         return (adj_nbr_add_or_lock(path->fp_nh_proto,
555                                     link,
556                                     &zero_addr,
557                                     path->attached_next_hop.fp_interface));
558     }
559     else
560     {
561         return (adj_nbr_add_or_lock(path->fp_nh_proto,
562                                     link,
563                                     &path->attached_next_hop.fp_nh,
564                                     path->attached_next_hop.fp_interface));
565     }
566 }
567
568 static void
569 fib_path_attached_next_hop_set (fib_path_t *path)
570 {
571     /*
572      * resolve directly via the adjacnecy discribed by the
573      * interface and next-hop
574      */
575     dpo_set(&path->fp_dpo,
576             DPO_ADJACENCY,
577             fib_proto_to_dpo(path->fp_nh_proto),
578             fib_path_attached_next_hop_get_adj(
579                  path,
580                  fib_proto_to_link(path->fp_nh_proto)));
581
582     /*
583      * become a child of the adjacency so we receive updates
584      * when its rewrite changes
585      */
586     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
587                                      FIB_NODE_TYPE_PATH,
588                                      fib_path_get_index(path));
589
590     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
591                                       path->attached_next_hop.fp_interface) ||
592         !adj_is_up(path->fp_dpo.dpoi_index))
593     {
594         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
595     }
596 }
597
598 static const adj_index_t
599 fib_path_attached_get_adj (fib_path_t *path,
600                            vnet_link_t link)
601 {
602     if (vnet_sw_interface_is_p2p(vnet_get_main(),
603                                  path->attached.fp_interface))
604     {
605         /*
606          * point-2-point interfaces do not require a glean, since
607          * there is nothing to ARP. Install a rewrite/nbr adj instead
608          */
609         return (adj_nbr_add_or_lock(path->fp_nh_proto,
610                                     link,
611                                     &zero_addr,
612                                     path->attached.fp_interface));
613     }
614     else
615     {
616         return (adj_glean_add_or_lock(path->fp_nh_proto,
617                                       path->attached.fp_interface,
618                                       NULL));
619     }
620 }
621
622 /*
623  * create of update the paths recursive adj
624  */
625 static void
626 fib_path_recursive_adj_update (fib_path_t *path,
627                                fib_forward_chain_type_t fct,
628                                dpo_id_t *dpo)
629 {
630     dpo_id_t via_dpo = DPO_INVALID;
631
632     /*
633      * get the DPO to resolve through from the via-entry
634      */
635     fib_entry_contribute_forwarding(path->fp_via_fib,
636                                     fct,
637                                     &via_dpo);
638
639
640     /*
641      * hope for the best - clear if restrictions apply.
642      */
643     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
644
645     /*
646      * Validate any recursion constraints and over-ride the via
647      * adj if not met
648      */
649     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
650     {
651         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
652         dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
653     }
654     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
655     {
656         /*
657          * the via FIB must be a host route.
658          * note the via FIB just added will always be a host route
659          * since it is an RR source added host route. So what we need to
660          * check is whether the route has other sources. If it does then
661          * some other source has added it as a host route. If it doesn't
662          * then it was added only here and inherits forwarding from a cover.
663          * the cover is not a host route.
664          * The RR source is the lowest priority source, so we check if it
665          * is the best. if it is there are no other sources.
666          */
667         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
668         {
669             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
670             dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
671
672             /*
673              * PIC edge trigger. let the load-balance maps know
674              */
675             load_balance_map_path_state_change(fib_path_get_index(path));
676         }
677     }
678     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
679     {
680         /*
681          * RR source entries inherit the flags from the cover, so
682          * we can check the via directly
683          */
684         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
685         {
686             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
687             dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
688
689             /*
690              * PIC edge trigger. let the load-balance maps know
691              */
692             load_balance_map_path_state_change(fib_path_get_index(path));
693         }
694     }
695     /*
696      * check for over-riding factors on the FIB entry itself
697      */
698     if (!fib_entry_is_resolved(path->fp_via_fib))
699     {
700         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
701         dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
702
703         /*
704          * PIC edge trigger. let the load-balance maps know
705          */
706         load_balance_map_path_state_change(fib_path_get_index(path));
707     }
708
709     /*
710      * If this path is contributing a drop, then it's not resolved
711      */
712     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
713     {
714         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
715     }
716
717     /*
718      * update the path's contributed DPO
719      */
720     dpo_copy(dpo, &via_dpo);
721
722     FIB_PATH_DBG(path, "recursive update: %U",
723                  fib_get_lookup_main(path->fp_nh_proto),
724                  &path->fp_dpo, 2);
725
726     dpo_reset(&via_dpo);
727 }
728
729 /*
730  * fib_path_is_permanent_drop
731  *
732  * Return !0 if the path is configured to permanently drop,
733  * despite other attributes.
734  */
735 static int
736 fib_path_is_permanent_drop (fib_path_t *path)
737 {
738     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
739             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
740 }
741
742 /*
743  * fib_path_unresolve
744  *
745  * Remove our dependency on the resolution target
746  */
747 static void
748 fib_path_unresolve (fib_path_t *path)
749 {
750     /*
751      * the forced drop path does not need unresolving
752      */
753     if (fib_path_is_permanent_drop(path))
754     {
755         return;
756     }
757
758     switch (path->fp_type)
759     {
760     case FIB_PATH_TYPE_RECURSIVE:
761         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
762         {
763             fib_prefix_t pfx;
764
765             fib_entry_get_prefix(path->fp_via_fib, &pfx);
766             fib_entry_child_remove(path->fp_via_fib,
767                                    path->fp_sibling);
768             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
769                                            &pfx,
770                                            FIB_SOURCE_RR);
771             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
772         }
773         break;
774     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
775     case FIB_PATH_TYPE_ATTACHED:
776         adj_child_remove(path->fp_dpo.dpoi_index,
777                          path->fp_sibling);
778         adj_unlock(path->fp_dpo.dpoi_index);
779         break;
780     case FIB_PATH_TYPE_EXCLUSIVE:
781         dpo_reset(&path->exclusive.fp_ex_dpo);
782         break;
783     case FIB_PATH_TYPE_SPECIAL:
784     case FIB_PATH_TYPE_RECEIVE:
785     case FIB_PATH_TYPE_INTF_RX:
786     case FIB_PATH_TYPE_DEAG:
787         /*
788          * these hold only the path's DPO, which is reset below.
789          */
790         break;
791     }
792
793     /*
794      * release the adj we were holding and pick up the
795      * drop just in case.
796      */
797     dpo_reset(&path->fp_dpo);
798     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
799
800     return;
801 }
802
803 static fib_forward_chain_type_t
804 fib_path_to_chain_type (const fib_path_t *path)
805 {
806     switch (path->fp_nh_proto)
807     {
808     case FIB_PROTOCOL_IP4:
809         return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
810     case FIB_PROTOCOL_IP6:
811         return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
812     case FIB_PROTOCOL_MPLS:
813         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
814             MPLS_EOS == path->recursive.fp_nh.fp_eos)
815         {
816             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
817         }
818         else
819         {
820             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
821         }
822     }
823     return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
824 }
825
826 /*
827  * fib_path_back_walk_notify
828  *
829  * A back walk has reach this path.
830  */
831 static fib_node_back_walk_rc_t
832 fib_path_back_walk_notify (fib_node_t *node,
833                            fib_node_back_walk_ctx_t *ctx)
834 {
835     fib_path_t *path;
836
837     path = fib_path_from_fib_node(node);
838
839     switch (path->fp_type)
840     {
841     case FIB_PATH_TYPE_RECURSIVE:
842         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
843         {
844             /*
845              * modify the recursive adjacency to use the new forwarding
846              * of the via-fib.
847              * this update is visible to packets in flight in the DP.
848              */
849             fib_path_recursive_adj_update(
850                 path,
851                 fib_path_to_chain_type(path),
852                 &path->fp_dpo);
853         }
854         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
855             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
856         {
857             /*
858              * ADJ updates (complete<->incomplete) do not need to propagate to
859              * recursive entries.
860              * The only reason its needed as far back as here, is that the adj
861              * and the incomplete adj are a different DPO type, so the LBs need
862              * to re-stack.
863              * If this walk was quashed in the fib_entry, then any non-fib_path
864              * children (like tunnels that collapse out the LB when they stack)
865              * would not see the update.
866              */
867             return (FIB_NODE_BACK_WALK_CONTINUE);
868         }
869         break;
870     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
871         /*
872 FIXME comment
873          * ADJ_UPDATE backwalk pass silently through here and up to
874          * the path-list when the multipath adj collapse occurs.
875          * The reason we do this is that the assumtption is that VPP
876          * runs in an environment where the Control-Plane is remote
877          * and hence reacts slowly to link up down. In order to remove
878          * this down link from the ECMP set quickly, we back-walk.
879          * VPP also has dedicated CPUs, so we are not stealing resources
880          * from the CP to do so.
881          */
882         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
883         {
884             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
885             {
886                 /*
887                  * alreday resolved. no need to walk back again
888                  */
889                 return (FIB_NODE_BACK_WALK_CONTINUE);
890             }
891             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
892         }
893         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
894         {
895             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
896             {
897                 /*
898                  * alreday unresolved. no need to walk back again
899                  */
900                 return (FIB_NODE_BACK_WALK_CONTINUE);
901             }
902             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
903         }
904         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
905         {
906             /*
907              * The interface this path resolves through has been deleted.
908              * This will leave the path in a permanent drop state. The route
909              * needs to be removed and readded (and hence the path-list deleted)
910              * before it can forward again.
911              */
912             fib_path_unresolve(path);
913             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
914         }
915         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
916         {
917             /*
918              * restack the DPO to pick up the correct DPO sub-type
919              */
920             uword if_is_up;
921             adj_index_t ai;
922
923             if_is_up = vnet_sw_interface_is_admin_up(
924                            vnet_get_main(),
925                            path->attached_next_hop.fp_interface);
926
927             ai = fib_path_attached_next_hop_get_adj(
928                      path,
929                      fib_proto_to_link(path->fp_nh_proto));
930
931             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
932             if (if_is_up && adj_is_up(ai))
933             {
934                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
935             }
936
937             dpo_set(&path->fp_dpo, DPO_ADJACENCY,
938                     fib_proto_to_dpo(path->fp_nh_proto),
939                     ai);
940             adj_unlock(ai);
941
942             if (!if_is_up)
943             {
944                 /*
945                  * If the interface is not up there is no reason to walk
946                  * back to children. if we did they would only evalute
947                  * that this path is unresolved and hence it would
948                  * not contribute the adjacency - so it would be wasted
949                  * CPU time.
950                  */
951                 return (FIB_NODE_BACK_WALK_CONTINUE);
952             }
953         }
954         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
955         {
956             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
957             {
958                 /*
959                  * alreday unresolved. no need to walk back again
960                  */
961                 return (FIB_NODE_BACK_WALK_CONTINUE);
962             }
963             /*
964              * the adj has gone down. the path is no longer resolved.
965              */
966             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
967         }
968         break;
969     case FIB_PATH_TYPE_ATTACHED:
970         /*
971          * FIXME; this could schedule a lower priority walk, since attached
972          * routes are not usually in ECMP configurations so the backwalk to
973          * the FIB entry does not need to be high priority
974          */
975         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
976         {
977             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
978         }
979         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
980         {
981             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
982         }
983         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
984         {
985             fib_path_unresolve(path);
986             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
987         }
988         break;
989     case FIB_PATH_TYPE_INTF_RX:
990         ASSERT(0);
991     case FIB_PATH_TYPE_DEAG:
992         /*
993          * FIXME When VRF delete is allowed this will need a poke.
994          */
995     case FIB_PATH_TYPE_SPECIAL:
996     case FIB_PATH_TYPE_RECEIVE:
997     case FIB_PATH_TYPE_EXCLUSIVE:
998         /*
999          * these path types have no parents. so to be
1000          * walked from one is unexpected.
1001          */
1002         ASSERT(0);
1003         break;
1004     }
1005
1006     /*
1007      * propagate the backwalk further to the path-list
1008      */
1009     fib_path_list_back_walk(path->fp_pl_index, ctx);
1010
1011     return (FIB_NODE_BACK_WALK_CONTINUE);
1012 }
1013
1014 static void
1015 fib_path_memory_show (void)
1016 {
1017     fib_show_memory_usage("Path",
1018                           pool_elts(fib_path_pool),
1019                           pool_len(fib_path_pool),
1020                           sizeof(fib_path_t));
1021 }
1022
1023 /*
1024  * The FIB path's graph node virtual function table
1025  */
1026 static const fib_node_vft_t fib_path_vft = {
1027     .fnv_get = fib_path_get_node,
1028     .fnv_last_lock = fib_path_last_lock_gone,
1029     .fnv_back_walk = fib_path_back_walk_notify,
1030     .fnv_mem_show = fib_path_memory_show,
1031 };
1032
1033 static fib_path_cfg_flags_t
1034 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1035 {
1036     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1037
1038     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1039         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1040     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1041         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1042     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1043         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1044     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1045         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1046     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1047         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1048     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1049         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1050     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1051         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1052     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1053         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1054
1055     return (cfg_flags);
1056 }
1057
1058 /*
1059  * fib_path_create
1060  *
1061  * Create and initialise a new path object.
1062  * return the index of the path.
1063  */
1064 fib_node_index_t
1065 fib_path_create (fib_node_index_t pl_index,
1066                  const fib_route_path_t *rpath)
1067 {
1068     fib_path_t *path;
1069
1070     pool_get(fib_path_pool, path);
1071     memset(path, 0, sizeof(*path));
1072
1073     fib_node_init(&path->fp_node,
1074                   FIB_NODE_TYPE_PATH);
1075
1076     dpo_reset(&path->fp_dpo);
1077     path->fp_pl_index = pl_index;
1078     path->fp_nh_proto = rpath->frp_proto;
1079     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1080     path->fp_weight = rpath->frp_weight;
1081     if (0 == path->fp_weight)
1082     {
1083         /*
1084          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1085          * clients to always use 1, or we can accept it and fixup approrpiately.
1086          */
1087         path->fp_weight = 1;
1088     }
1089     path->fp_preference = rpath->frp_preference;
1090     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1091
1092     /*
1093      * deduce the path's tpye from the parementers and save what is needed.
1094      */
1095     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1096     {
1097         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1098         path->receive.fp_interface = rpath->frp_sw_if_index;
1099         path->receive.fp_addr = rpath->frp_addr;
1100     }
1101     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1102     {
1103         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1104         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1105     }
1106     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1107     {
1108         path->fp_type = FIB_PATH_TYPE_DEAG;
1109         path->deag.fp_tbl_id = rpath->frp_fib_index;
1110         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1111     }
1112     else if (~0 != rpath->frp_sw_if_index)
1113     {
1114         if (ip46_address_is_zero(&rpath->frp_addr))
1115         {
1116             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1117             path->attached.fp_interface = rpath->frp_sw_if_index;
1118         }
1119         else
1120         {
1121             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1122             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1123             path->attached_next_hop.fp_nh = rpath->frp_addr;
1124         }
1125     }
1126     else
1127     {
1128         if (ip46_address_is_zero(&rpath->frp_addr))
1129         {
1130             if (~0 == rpath->frp_fib_index)
1131             {
1132                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1133             }
1134             else
1135             {
1136                 path->fp_type = FIB_PATH_TYPE_DEAG;
1137                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1138             }           
1139         }
1140         else
1141         {
1142             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1143             if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1144             {
1145                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1146                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1147             }
1148             else
1149             {
1150                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1151             }
1152             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1153         }
1154     }
1155
1156     FIB_PATH_DBG(path, "create");
1157
1158     return (fib_path_get_index(path));
1159 }
1160
1161 /*
1162  * fib_path_create_special
1163  *
1164  * Create and initialise a new path object.
1165  * return the index of the path.
1166  */
1167 fib_node_index_t
1168 fib_path_create_special (fib_node_index_t pl_index,
1169                          fib_protocol_t nh_proto,
1170                          fib_path_cfg_flags_t flags,
1171                          const dpo_id_t *dpo)
1172 {
1173     fib_path_t *path;
1174
1175     pool_get(fib_path_pool, path);
1176     memset(path, 0, sizeof(*path));
1177
1178     fib_node_init(&path->fp_node,
1179                   FIB_NODE_TYPE_PATH);
1180     dpo_reset(&path->fp_dpo);
1181
1182     path->fp_pl_index = pl_index;
1183     path->fp_weight = 1;
1184     path->fp_preference = 0;
1185     path->fp_nh_proto = nh_proto;
1186     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1187     path->fp_cfg_flags = flags;
1188
1189     if (FIB_PATH_CFG_FLAG_DROP & flags)
1190     {
1191         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1192     }
1193     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1194     {
1195         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1196         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1197     }
1198     else
1199     {
1200         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1201         ASSERT(NULL != dpo);
1202         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1203     }
1204
1205     return (fib_path_get_index(path));
1206 }
1207
1208 /*
1209  * fib_path_copy
1210  *
1211  * Copy a path. return index of new path.
1212  */
1213 fib_node_index_t
1214 fib_path_copy (fib_node_index_t path_index,
1215                fib_node_index_t path_list_index)
1216 {
1217     fib_path_t *path, *orig_path;
1218
1219     pool_get(fib_path_pool, path);
1220
1221     orig_path = fib_path_get(path_index);
1222     ASSERT(NULL != orig_path);
1223
1224     memcpy(path, orig_path, sizeof(*path));
1225
1226     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1227
1228     /*
1229      * reset the dynamic section
1230      */
1231     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1232     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1233     path->fp_pl_index  = path_list_index;
1234     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1235     memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1236     dpo_reset(&path->fp_dpo);
1237
1238     return (fib_path_get_index(path));
1239 }
1240
1241 /*
1242  * fib_path_destroy
1243  *
1244  * destroy a path that is no longer required
1245  */
1246 void
1247 fib_path_destroy (fib_node_index_t path_index)
1248 {
1249     fib_path_t *path;
1250
1251     path = fib_path_get(path_index);
1252
1253     ASSERT(NULL != path);
1254     FIB_PATH_DBG(path, "destroy");
1255
1256     fib_path_unresolve(path);
1257
1258     fib_node_deinit(&path->fp_node);
1259     pool_put(fib_path_pool, path);
1260 }
1261
1262 /*
1263  * fib_path_destroy
1264  *
1265  * destroy a path that is no longer required
1266  */
1267 uword
1268 fib_path_hash (fib_node_index_t path_index)
1269 {
1270     fib_path_t *path;
1271
1272     path = fib_path_get(path_index);
1273
1274     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1275                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1276                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1277                         0));
1278 }
1279
1280 /*
1281  * fib_path_cmp_i
1282  *
1283  * Compare two paths for equivalence.
1284  */
1285 static int
1286 fib_path_cmp_i (const fib_path_t *path1,
1287                 const fib_path_t *path2)
1288 {
1289     int res;
1290
1291     res = 1;
1292
1293     /*
1294      * paths of different types and protocol are not equal.
1295      * different weights and/or preference only are the same path.
1296      */
1297     if (path1->fp_type != path2->fp_type)
1298     {
1299         res = (path1->fp_type - path2->fp_type);
1300     }
1301     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1302     {
1303         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1304     }
1305     else
1306     {
1307         /*
1308          * both paths are of the same type.
1309          * consider each type and its attributes in turn.
1310          */
1311         switch (path1->fp_type)
1312         {
1313         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1314             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1315                                    &path2->attached_next_hop.fp_nh);
1316             if (0 == res) {
1317                 res = (path1->attached_next_hop.fp_interface -
1318                        path2->attached_next_hop.fp_interface);
1319             }
1320             break;
1321         case FIB_PATH_TYPE_ATTACHED:
1322             res = (path1->attached.fp_interface -
1323                    path2->attached.fp_interface);
1324             break;
1325         case FIB_PATH_TYPE_RECURSIVE:
1326             res = ip46_address_cmp(&path1->recursive.fp_nh,
1327                                    &path2->recursive.fp_nh);
1328  
1329             if (0 == res)
1330             {
1331                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1332             }
1333             break;
1334         case FIB_PATH_TYPE_DEAG:
1335             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1336             if (0 == res)
1337             {
1338                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1339             }
1340             break;
1341         case FIB_PATH_TYPE_INTF_RX:
1342             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1343             break;
1344         case FIB_PATH_TYPE_SPECIAL:
1345         case FIB_PATH_TYPE_RECEIVE:
1346         case FIB_PATH_TYPE_EXCLUSIVE:
1347             res = 0;
1348             break;
1349         }
1350     }
1351     return (res);
1352 }
1353
1354 /*
1355  * fib_path_cmp_for_sort
1356  *
1357  * Compare two paths for equivalence. Used during path sorting.
1358  * As usual 0 means equal.
1359  */
1360 int
1361 fib_path_cmp_for_sort (void * v1,
1362                        void * v2)
1363 {
1364     fib_node_index_t *pi1 = v1, *pi2 = v2;
1365     fib_path_t *path1, *path2;
1366
1367     path1 = fib_path_get(*pi1);
1368     path2 = fib_path_get(*pi2);
1369
1370     /*
1371      * when sorting paths we want the highest preference paths
1372      * first, so that the choices set built is in prefernce order
1373      */
1374     if (path1->fp_preference != path2->fp_preference)
1375     {
1376         return (path1->fp_preference - path2->fp_preference);
1377     }
1378
1379     return (fib_path_cmp_i(path1, path2));
1380 }
1381
1382 /*
1383  * fib_path_cmp
1384  *
1385  * Compare two paths for equivalence.
1386  */
1387 int
1388 fib_path_cmp (fib_node_index_t pi1,
1389               fib_node_index_t pi2)
1390 {
1391     fib_path_t *path1, *path2;
1392
1393     path1 = fib_path_get(pi1);
1394     path2 = fib_path_get(pi2);
1395
1396     return (fib_path_cmp_i(path1, path2));
1397 }
1398
1399 int
1400 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1401                            const fib_route_path_t *rpath)
1402 {
1403     fib_path_t *path;
1404     int res;
1405
1406     path = fib_path_get(path_index);
1407
1408     res = 1;
1409
1410     if (path->fp_weight != rpath->frp_weight)
1411     {
1412         res = (path->fp_weight - rpath->frp_weight);
1413     }
1414     else
1415     {
1416         /*
1417          * both paths are of the same type.
1418          * consider each type and its attributes in turn.
1419          */
1420         switch (path->fp_type)
1421         {
1422         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1423             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1424                                    &rpath->frp_addr);
1425             if (0 == res)
1426             {
1427                 res = (path->attached_next_hop.fp_interface -
1428                        rpath->frp_sw_if_index);
1429             }
1430             break;
1431         case FIB_PATH_TYPE_ATTACHED:
1432             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1433             break;
1434         case FIB_PATH_TYPE_RECURSIVE:
1435             if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1436             {
1437                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1438
1439                 if (res == 0)
1440                 {
1441                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1442                 }
1443             }
1444             else
1445             {
1446                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1447                                        &rpath->frp_addr);
1448             }
1449
1450             if (0 == res)
1451             {
1452                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1453             }
1454             break;
1455         case FIB_PATH_TYPE_INTF_RX:
1456             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1457             break;
1458         case FIB_PATH_TYPE_DEAG:
1459             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1460             if (0 == res)
1461             {
1462                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1463             }
1464             break;
1465         case FIB_PATH_TYPE_SPECIAL:
1466         case FIB_PATH_TYPE_RECEIVE:
1467         case FIB_PATH_TYPE_EXCLUSIVE:
1468             res = 0;
1469             break;
1470         }
1471     }
1472     return (res);
1473 }
1474
1475 /*
1476  * fib_path_recursive_loop_detect
1477  *
1478  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1479  * walk is initiated when an entry is linking to a new path list or from an old.
1480  * The entry vector passed contains all the FIB entrys that are children of this
1481  * path (it is all the entries encountered on the walk so far). If this vector
1482  * contains the entry this path resolve via, then a loop is about to form.
1483  * The loop must be allowed to form, since we need the dependencies in place
1484  * so that we can track when the loop breaks.
1485  * However, we MUST not produce a loop in the forwarding graph (else packets
1486  * would loop around the switch path until the loop breaks), so we mark recursive
1487  * paths as looped so that they do not contribute forwarding information.
1488  * By marking the path as looped, an etry such as;
1489  *    X/Y
1490  *     via a.a.a.a (looped)
1491  *     via b.b.b.b (not looped)
1492  * can still forward using the info provided by b.b.b.b only
1493  */
1494 int
1495 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1496                                 fib_node_index_t **entry_indicies)
1497 {
1498     fib_path_t *path;
1499
1500     path = fib_path_get(path_index);
1501
1502     /*
1503      * the forced drop path is never looped, cos it is never resolved.
1504      */
1505     if (fib_path_is_permanent_drop(path))
1506     {
1507         return (0);
1508     }
1509
1510     switch (path->fp_type)
1511     {
1512     case FIB_PATH_TYPE_RECURSIVE:
1513     {
1514         fib_node_index_t *entry_index, *entries;
1515         int looped = 0;
1516         entries = *entry_indicies;
1517
1518         vec_foreach(entry_index, entries) {
1519             if (*entry_index == path->fp_via_fib)
1520             {
1521                 /*
1522                  * the entry that is about to link to this path-list (or
1523                  * one of this path-list's children) is the same entry that
1524                  * this recursive path resolves through. this is a cycle.
1525                  * abort the walk.
1526                  */
1527                 looped = 1;
1528                 break;
1529             }
1530         }
1531
1532         if (looped)
1533         {
1534             FIB_PATH_DBG(path, "recursive loop formed");
1535             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1536
1537             dpo_copy(&path->fp_dpo,
1538                     drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1539         }
1540         else
1541         {
1542             /*
1543              * no loop here yet. keep forward walking the graph.
1544              */     
1545             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1546             {
1547                 FIB_PATH_DBG(path, "recursive loop formed");
1548                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1549             }
1550             else
1551             {
1552                 FIB_PATH_DBG(path, "recursive loop cleared");
1553                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1554             }
1555         }
1556         break;
1557     }
1558     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1559     case FIB_PATH_TYPE_ATTACHED:
1560     case FIB_PATH_TYPE_SPECIAL:
1561     case FIB_PATH_TYPE_DEAG:
1562     case FIB_PATH_TYPE_RECEIVE:
1563     case FIB_PATH_TYPE_INTF_RX:
1564     case FIB_PATH_TYPE_EXCLUSIVE:
1565         /*
1566          * these path types cannot be part of a loop, since they are the leaves
1567          * of the graph.
1568          */
1569         break;
1570     }
1571
1572     return (fib_path_is_looped(path_index));
1573 }
1574
1575 int
1576 fib_path_resolve (fib_node_index_t path_index)
1577 {
1578     fib_path_t *path;
1579
1580     path = fib_path_get(path_index);
1581
1582     /*
1583      * hope for the best.
1584      */
1585     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1586
1587     /*
1588      * the forced drop path resolves via the drop adj
1589      */
1590     if (fib_path_is_permanent_drop(path))
1591     {
1592         dpo_copy(&path->fp_dpo,
1593                  drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1594         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1595         return (fib_path_is_resolved(path_index));
1596     }
1597
1598     switch (path->fp_type)
1599     {
1600     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1601         fib_path_attached_next_hop_set(path);
1602         break;
1603     case FIB_PATH_TYPE_ATTACHED:
1604         /*
1605          * path->attached.fp_interface
1606          */
1607         if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1608                                            path->attached.fp_interface))
1609         {
1610             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1611         }
1612         dpo_set(&path->fp_dpo,
1613                 DPO_ADJACENCY,
1614                 fib_proto_to_dpo(path->fp_nh_proto),
1615                 fib_path_attached_get_adj(path,
1616                                           fib_proto_to_link(path->fp_nh_proto)));
1617
1618         /*
1619          * become a child of the adjacency so we receive updates
1620          * when the interface state changes
1621          */
1622         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1623                                          FIB_NODE_TYPE_PATH,
1624                                          fib_path_get_index(path));
1625
1626         break;
1627     case FIB_PATH_TYPE_RECURSIVE:
1628     {
1629         /*
1630          * Create a RR source entry in the table for the address
1631          * that this path recurses through.
1632          * This resolve action is recursive, hence we may create
1633          * more paths in the process. more creates mean maybe realloc
1634          * of this path.
1635          */
1636         fib_node_index_t fei;
1637         fib_prefix_t pfx;
1638
1639         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1640
1641         if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1642         {
1643             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1644                                        path->recursive.fp_nh.fp_eos,
1645                                        &pfx);
1646         }
1647         else
1648         {
1649             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1650         }
1651
1652         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1653                                           &pfx,
1654                                           FIB_SOURCE_RR,
1655                                           FIB_ENTRY_FLAG_NONE);
1656
1657         path = fib_path_get(path_index);
1658         path->fp_via_fib = fei;
1659
1660         /*
1661          * become a dependent child of the entry so the path is 
1662          * informed when the forwarding for the entry changes.
1663          */
1664         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1665                                                FIB_NODE_TYPE_PATH,
1666                                                fib_path_get_index(path));
1667
1668         /*
1669          * create and configure the IP DPO
1670          */
1671         fib_path_recursive_adj_update(
1672             path,
1673             fib_path_to_chain_type(path),
1674             &path->fp_dpo);
1675
1676         break;
1677     }
1678     case FIB_PATH_TYPE_SPECIAL:
1679         /*
1680          * Resolve via the drop
1681          */
1682         dpo_copy(&path->fp_dpo,
1683                  drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1684         break;
1685     case FIB_PATH_TYPE_DEAG:
1686     {
1687         /*
1688          * Resolve via a lookup DPO.
1689          * FIXME. control plane should add routes with a table ID
1690          */
1691         lookup_cast_t cast;
1692         
1693         cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1694                 LOOKUP_MULTICAST :
1695                 LOOKUP_UNICAST);
1696
1697         lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
1698                                            fib_proto_to_dpo(path->fp_nh_proto),
1699                                            cast,
1700                                            LOOKUP_INPUT_DST_ADDR,
1701                                            LOOKUP_TABLE_FROM_CONFIG,
1702                                            &path->fp_dpo);
1703         break;
1704     }
1705     case FIB_PATH_TYPE_RECEIVE:
1706         /*
1707          * Resolve via a receive DPO.
1708          */
1709         receive_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1710                                 path->receive.fp_interface,
1711                                 &path->receive.fp_addr,
1712                                 &path->fp_dpo);
1713         break;
1714     case FIB_PATH_TYPE_INTF_RX: {
1715         /*
1716          * Resolve via a receive DPO.
1717          */
1718         interface_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1719                                   path->intf_rx.fp_interface,
1720                                   &path->fp_dpo);
1721         break;
1722     }
1723     case FIB_PATH_TYPE_EXCLUSIVE:
1724         /*
1725          * Resolve via the user provided DPO
1726          */
1727         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
1728         break;
1729     }
1730
1731     return (fib_path_is_resolved(path_index));
1732 }
1733
1734 u32
1735 fib_path_get_resolving_interface (fib_node_index_t path_index)
1736 {
1737     fib_path_t *path;
1738
1739     path = fib_path_get(path_index);
1740
1741     switch (path->fp_type)
1742     {
1743     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1744         return (path->attached_next_hop.fp_interface);
1745     case FIB_PATH_TYPE_ATTACHED:
1746         return (path->attached.fp_interface);
1747     case FIB_PATH_TYPE_RECEIVE:
1748         return (path->receive.fp_interface);
1749     case FIB_PATH_TYPE_RECURSIVE:
1750         if (fib_path_is_resolved(path_index))
1751         {
1752             return (fib_entry_get_resolving_interface(path->fp_via_fib));
1753         }
1754         break;
1755     case FIB_PATH_TYPE_INTF_RX:
1756     case FIB_PATH_TYPE_SPECIAL:
1757     case FIB_PATH_TYPE_DEAG:
1758     case FIB_PATH_TYPE_EXCLUSIVE:
1759         break;
1760     }
1761     return (~0);
1762 }
1763
1764 adj_index_t
1765 fib_path_get_adj (fib_node_index_t path_index)
1766 {
1767     fib_path_t *path;
1768
1769     path = fib_path_get(path_index);
1770
1771     ASSERT(dpo_is_adj(&path->fp_dpo));
1772     if (dpo_is_adj(&path->fp_dpo))
1773     {
1774         return (path->fp_dpo.dpoi_index);
1775     }
1776     return (ADJ_INDEX_INVALID);
1777 }
1778
1779 u16
1780 fib_path_get_weight (fib_node_index_t path_index)
1781 {
1782     fib_path_t *path;
1783
1784     path = fib_path_get(path_index);
1785
1786     ASSERT(path);
1787
1788     return (path->fp_weight);
1789 }
1790
1791 u16
1792 fib_path_get_preference (fib_node_index_t path_index)
1793 {
1794     fib_path_t *path;
1795
1796     path = fib_path_get(path_index);
1797
1798     ASSERT(path);
1799
1800     return (path->fp_preference);
1801 }
1802
1803 /**
1804  * @brief Contribute the path's adjacency to the list passed.
1805  * By calling this function over all paths, recursively, a child
1806  * can construct its full set of forwarding adjacencies, and hence its
1807  * uRPF list.
1808  */
1809 void
1810 fib_path_contribute_urpf (fib_node_index_t path_index,
1811                           index_t urpf)
1812 {
1813     fib_path_t *path;
1814
1815     path = fib_path_get(path_index);
1816
1817     /*
1818      * resolved and unresolved paths contribute to the RPF list.
1819      */
1820     switch (path->fp_type)
1821     {
1822     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1823         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
1824         break;
1825
1826     case FIB_PATH_TYPE_ATTACHED:
1827         fib_urpf_list_append(urpf, path->attached.fp_interface);
1828         break;
1829
1830     case FIB_PATH_TYPE_RECURSIVE:
1831         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
1832             !fib_path_is_looped(path_index))
1833         {
1834             /*
1835              * there's unresolved due to constraints, and there's unresolved
1836              * due to ain't got no via. can't do nowt w'out via.
1837              */
1838             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
1839         }
1840         break;
1841
1842     case FIB_PATH_TYPE_EXCLUSIVE:
1843     case FIB_PATH_TYPE_SPECIAL:
1844         /*
1845          * these path types may link to an adj, if that's what
1846          * the clinet gave
1847          */
1848         if (dpo_is_adj(&path->fp_dpo))
1849         {
1850             ip_adjacency_t *adj;
1851
1852             adj = adj_get(path->fp_dpo.dpoi_index);
1853
1854             fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
1855         }
1856         break;
1857
1858     case FIB_PATH_TYPE_DEAG:
1859     case FIB_PATH_TYPE_RECEIVE:
1860     case FIB_PATH_TYPE_INTF_RX:
1861         /*
1862          * these path types don't link to an adj
1863          */
1864         break;
1865     }
1866 }
1867
1868 void
1869 fib_path_stack_mpls_disp (fib_node_index_t path_index,
1870                           dpo_proto_t payload_proto,
1871                           dpo_id_t *dpo)
1872 {
1873     fib_path_t *path;
1874
1875     path = fib_path_get(path_index);
1876
1877     ASSERT(path);
1878
1879     switch (path->fp_type)
1880     {
1881     case FIB_PATH_TYPE_DEAG:
1882     {
1883         dpo_id_t tmp = DPO_INVALID;
1884
1885         dpo_copy(&tmp, dpo);
1886         dpo_set(dpo,
1887                 DPO_MPLS_DISPOSITION,
1888                 payload_proto,
1889                 mpls_disp_dpo_create(payload_proto,
1890                                      path->deag.fp_rpf_id,
1891                                      &tmp));
1892         dpo_reset(&tmp);
1893         break;
1894     }                
1895     case FIB_PATH_TYPE_RECEIVE:
1896     case FIB_PATH_TYPE_ATTACHED:
1897     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1898     case FIB_PATH_TYPE_RECURSIVE:
1899     case FIB_PATH_TYPE_INTF_RX:
1900     case FIB_PATH_TYPE_EXCLUSIVE:
1901     case FIB_PATH_TYPE_SPECIAL:
1902         break;
1903     }
1904 }
1905
1906 void
1907 fib_path_contribute_forwarding (fib_node_index_t path_index,
1908                                 fib_forward_chain_type_t fct,
1909                                 dpo_id_t *dpo)
1910 {
1911     fib_path_t *path;
1912
1913     path = fib_path_get(path_index);
1914
1915     ASSERT(path);
1916     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
1917
1918     FIB_PATH_DBG(path, "contribute");
1919
1920     /*
1921      * The DPO stored in the path was created when the path was resolved.
1922      * This then represents the path's 'native' protocol; IP.
1923      * For all others will need to go find something else.
1924      */
1925     if (fib_path_to_chain_type(path) == fct)
1926     {
1927         dpo_copy(dpo, &path->fp_dpo);
1928     }
1929     else
1930     {
1931         switch (path->fp_type)
1932         {
1933         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1934             switch (fct)
1935             {
1936             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1937             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1938             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1939             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1940             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1941             case FIB_FORW_CHAIN_TYPE_NSH:
1942             {
1943                 adj_index_t ai;
1944
1945                 /*
1946                  * get a appropriate link type adj.
1947                  */
1948                 ai = fib_path_attached_next_hop_get_adj(
1949                          path,
1950                          fib_forw_chain_type_to_link_type(fct));
1951                 dpo_set(dpo, DPO_ADJACENCY,
1952                         fib_forw_chain_type_to_dpo_proto(fct), ai);
1953                 adj_unlock(ai);
1954
1955                 break;
1956             }
1957             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1958             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1959             break;
1960             }
1961             break;
1962         case FIB_PATH_TYPE_RECURSIVE:
1963             switch (fct)
1964             {
1965             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1966             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1967             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1968             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1969             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1970             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1971                 fib_path_recursive_adj_update(path, fct, dpo);
1972                 break;
1973             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1974             case FIB_FORW_CHAIN_TYPE_NSH:
1975                 ASSERT(0);
1976                 break;
1977             }
1978             break;
1979         case FIB_PATH_TYPE_DEAG:
1980             switch (fct)
1981             {
1982             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1983                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
1984                                                   DPO_PROTO_MPLS,
1985                                                   LOOKUP_UNICAST,
1986                                                   LOOKUP_INPUT_DST_ADDR,
1987                                                   LOOKUP_TABLE_FROM_CONFIG,
1988                                                   dpo);
1989                 break;
1990             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1991             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1992             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1993                 dpo_copy(dpo, &path->fp_dpo);
1994                 break;
1995             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1996             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1997             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1998             case FIB_FORW_CHAIN_TYPE_NSH:
1999                 ASSERT(0);
2000                 break;
2001             }
2002             break;
2003         case FIB_PATH_TYPE_EXCLUSIVE:
2004             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2005             break;
2006         case FIB_PATH_TYPE_ATTACHED:
2007             switch (fct)
2008             {
2009             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2010             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2011             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2012             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2013             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2014             case FIB_FORW_CHAIN_TYPE_NSH:
2015                 {
2016                     adj_index_t ai;
2017
2018                     /*
2019                      * get a appropriate link type adj.
2020                      */
2021                     ai = fib_path_attached_get_adj(
2022                             path,
2023                             fib_forw_chain_type_to_link_type(fct));
2024                     dpo_set(dpo, DPO_ADJACENCY,
2025                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2026                     adj_unlock(ai);
2027                     break;
2028                 }
2029             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2030             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2031                 {
2032                     adj_index_t ai;
2033
2034                     /*
2035                      * Create the adj needed for sending IP multicast traffic
2036                      */
2037                     ai = adj_mcast_add_or_lock(path->fp_nh_proto,
2038                                                fib_forw_chain_type_to_link_type(fct),
2039                                                path->attached.fp_interface);
2040                     dpo_set(dpo, DPO_ADJACENCY,
2041                             fib_forw_chain_type_to_dpo_proto(fct),
2042                             ai);
2043                     adj_unlock(ai);
2044                 }
2045                 break;
2046             }
2047             break;
2048         case FIB_PATH_TYPE_INTF_RX:
2049             /*
2050              * Create the adj needed for sending IP multicast traffic
2051              */
2052             interface_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2053                                       path->attached.fp_interface,
2054                                       dpo);
2055             break;
2056         case FIB_PATH_TYPE_RECEIVE:
2057         case FIB_PATH_TYPE_SPECIAL:
2058             dpo_copy(dpo, &path->fp_dpo);
2059             break;
2060         }
2061     }
2062 }
2063
2064 load_balance_path_t *
2065 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2066                                        fib_forward_chain_type_t fct,
2067                                        load_balance_path_t *hash_key)
2068 {
2069     load_balance_path_t *mnh;
2070     fib_path_t *path;
2071
2072     path = fib_path_get(path_index);
2073
2074     ASSERT(path);
2075
2076     if (fib_path_is_resolved(path_index))
2077     {
2078         vec_add2(hash_key, mnh, 1);
2079
2080         mnh->path_weight = path->fp_weight;
2081         mnh->path_index = path_index;
2082         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2083     }
2084
2085     return (hash_key);
2086 }
2087
2088 int
2089 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2090 {
2091     fib_path_t *path;
2092
2093     path = fib_path_get(path_index);
2094
2095     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2096             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2097              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2098 }
2099
2100 int
2101 fib_path_is_exclusive (fib_node_index_t path_index)
2102 {
2103     fib_path_t *path;
2104
2105     path = fib_path_get(path_index);
2106
2107     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2108 }
2109
2110 int
2111 fib_path_is_deag (fib_node_index_t path_index)
2112 {
2113     fib_path_t *path;
2114
2115     path = fib_path_get(path_index);
2116
2117     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2118 }
2119
2120 int
2121 fib_path_is_resolved (fib_node_index_t path_index)
2122 {
2123     fib_path_t *path;
2124
2125     path = fib_path_get(path_index);
2126
2127     return (dpo_id_is_valid(&path->fp_dpo) &&
2128             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2129             !fib_path_is_looped(path_index) &&
2130             !fib_path_is_permanent_drop(path));
2131 }
2132
2133 int
2134 fib_path_is_looped (fib_node_index_t path_index)
2135 {
2136     fib_path_t *path;
2137
2138     path = fib_path_get(path_index);
2139
2140     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2141 }
2142
2143 fib_path_list_walk_rc_t
2144 fib_path_encode (fib_node_index_t path_list_index,
2145                  fib_node_index_t path_index,
2146                  void *ctx)
2147 {
2148     fib_route_path_encode_t **api_rpaths = ctx;
2149     fib_route_path_encode_t *api_rpath;
2150     fib_path_t *path;
2151
2152     path = fib_path_get(path_index);
2153     if (!path)
2154       return (FIB_PATH_LIST_WALK_CONTINUE);
2155     vec_add2(*api_rpaths, api_rpath, 1);
2156     api_rpath->rpath.frp_weight = path->fp_weight;
2157     api_rpath->rpath.frp_preference = path->fp_preference;
2158     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2159     api_rpath->rpath.frp_sw_if_index = ~0;
2160     api_rpath->dpo = path->exclusive.fp_ex_dpo;
2161     switch (path->fp_type)
2162       {
2163       case FIB_PATH_TYPE_RECEIVE:
2164         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2165         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2166         api_rpath->dpo = path->fp_dpo;
2167         break;
2168       case FIB_PATH_TYPE_ATTACHED:
2169         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2170         api_rpath->dpo = path->fp_dpo;
2171         break;
2172       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2173         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2174         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2175         break;
2176       case FIB_PATH_TYPE_SPECIAL:
2177         break;
2178       case FIB_PATH_TYPE_DEAG:
2179         break;
2180       case FIB_PATH_TYPE_RECURSIVE:
2181         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2182         break;
2183       default:
2184         break;
2185       }
2186     return (FIB_PATH_LIST_WALK_CONTINUE);
2187 }
2188
2189 fib_protocol_t
2190 fib_path_get_proto (fib_node_index_t path_index)
2191 {
2192     fib_path_t *path;
2193
2194     path = fib_path_get(path_index);
2195
2196     return (path->fp_nh_proto);
2197 }
2198
2199 void
2200 fib_path_module_init (void)
2201 {
2202     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2203 }
2204
2205 static clib_error_t *
2206 show_fib_path_command (vlib_main_t * vm,
2207                         unformat_input_t * input,
2208                         vlib_cli_command_t * cmd)
2209 {
2210     fib_node_index_t pi;
2211     fib_path_t *path;
2212
2213     if (unformat (input, "%d", &pi))
2214     {
2215         /*
2216          * show one in detail
2217          */
2218         if (!pool_is_free_index(fib_path_pool, pi))
2219         {
2220             path = fib_path_get(pi);
2221             u8 *s = fib_path_format(pi, NULL);
2222             s = format(s, "children:");
2223             s = fib_node_children_format(path->fp_node.fn_children, s);
2224             vlib_cli_output (vm, "%s", s);
2225             vec_free(s);
2226         }
2227         else
2228         {
2229             vlib_cli_output (vm, "path %d invalid", pi);
2230         }
2231     }
2232     else
2233     {
2234         vlib_cli_output (vm, "FIB Paths");
2235         pool_foreach(path, fib_path_pool,
2236         ({
2237             vlib_cli_output (vm, "%U", format_fib_path, path);
2238         }));
2239     }
2240
2241     return (NULL);
2242 }
2243
2244 VLIB_CLI_COMMAND (show_fib_path, static) = {
2245   .path = "show fib paths",
2246   .function = show_fib_path_command,
2247   .short_help = "show fib paths",
2248 };