Remove usued, redundant and deprecated code from lookup.h
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26
27 #include <vnet/adj/adj.h>
28 #include <vnet/adj/adj_mcast.h>
29
30 #include <vnet/fib/fib_path.h>
31 #include <vnet/fib/fib_node.h>
32 #include <vnet/fib/fib_table.h>
33 #include <vnet/fib/fib_entry.h>
34 #include <vnet/fib/fib_path_list.h>
35 #include <vnet/fib/fib_internal.h>
36 #include <vnet/fib/fib_urpf_list.h>
37 #include <vnet/fib/mpls_fib.h>
38
39 /**
40  * Enurmeration of path types
41  */
42 typedef enum fib_path_type_t_ {
43     /**
44      * Marker. Add new types after this one.
45      */
46     FIB_PATH_TYPE_FIRST = 0,
47     /**
48      * Attached-nexthop. An interface and a nexthop are known.
49      */
50     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
51     /**
52      * attached. Only the interface is known.
53      */
54     FIB_PATH_TYPE_ATTACHED,
55     /**
56      * recursive. Only the next-hop is known.
57      */
58     FIB_PATH_TYPE_RECURSIVE,
59     /**
60      * special. nothing is known. so we drop.
61      */
62     FIB_PATH_TYPE_SPECIAL,
63     /**
64      * exclusive. user provided adj.
65      */
66     FIB_PATH_TYPE_EXCLUSIVE,
67     /**
68      * deag. Link to a lookup adj in the next table
69      */
70     FIB_PATH_TYPE_DEAG,
71     /**
72      * interface receive.
73      */
74     FIB_PATH_TYPE_INTF_RX,
75     /**
76      * receive. it's for-us.
77      */
78     FIB_PATH_TYPE_RECEIVE,
79     /**
80      * Marker. Add new types before this one, then update it.
81      */
82     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_RECEIVE,
83 } __attribute__ ((packed)) fib_path_type_t;
84
85 /**
86  * The maximum number of path_types
87  */
88 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
89
90 #define FIB_PATH_TYPES {                                        \
91     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
92     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
93     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
94     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
95     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
96     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
97     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
98     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
99 }
100
101 #define FOR_EACH_FIB_PATH_TYPE(_item) \
102     for (_item = FIB_PATH_TYPE_FIRST; _item <= FIB_PATH_TYPE_LAST; _item++)
103
104 /**
105  * Enurmeration of path operational (i.e. derived) attributes
106  */
107 typedef enum fib_path_oper_attribute_t_ {
108     /**
109      * Marker. Add new types after this one.
110      */
111     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
112     /**
113      * The path forms part of a recursive loop.
114      */
115     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
116     /**
117      * The path is resolved
118      */
119     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
120     /**
121      * The path is attached, despite what the next-hop may say.
122      */
123     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
124     /**
125      * The path has become a permanent drop.
126      */
127     FIB_PATH_OPER_ATTRIBUTE_DROP,
128     /**
129      * Marker. Add new types before this one, then update it.
130      */
131     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
132 } __attribute__ ((packed)) fib_path_oper_attribute_t;
133
134 /**
135  * The maximum number of path operational attributes
136  */
137 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
138
139 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
140     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
141     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
142     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
143 }
144
145 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
146     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
147          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
148          _item++)
149
150 /**
151  * Path flags from the attributes
152  */
153 typedef enum fib_path_oper_flags_t_ {
154     FIB_PATH_OPER_FLAG_NONE = 0,
155     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
156     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
157     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
158     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
159 } __attribute__ ((packed)) fib_path_oper_flags_t;
160
161 /**
162  * A FIB path
163  */
164 typedef struct fib_path_t_ {
165     /**
166      * A path is a node in the FIB graph.
167      */
168     fib_node_t fp_node;
169
170     /**
171      * The index of the path-list to which this path belongs
172      */
173     u32 fp_pl_index;
174
175     /**
176      * This marks the start of the memory area used to hash
177      * the path
178      */
179     STRUCT_MARK(path_hash_start);
180
181     /**
182      * Configuration Flags
183      */
184     fib_path_cfg_flags_t fp_cfg_flags;
185
186     /**
187      * The type of the path. This is the selector for the union
188      */
189     fib_path_type_t fp_type;
190
191     /**
192      * The protocol of the next-hop, i.e. the address family of the
193      * next-hop's address. We can't derive this from the address itself
194      * since the address can be all zeros
195      */
196     fib_protocol_t fp_nh_proto;
197
198     /**
199      * UCMP [unnormalised] weigt
200      */
201     u32 fp_weight;
202
203     /**
204      * per-type union of the data required to resolve the path
205      */
206     union {
207         struct {
208             /**
209              * The next-hop
210              */
211             ip46_address_t fp_nh;
212             /**
213              * The interface
214              */
215             u32 fp_interface;
216         } attached_next_hop;
217         struct {
218             /**
219              * The interface
220              */
221             u32 fp_interface;
222         } attached;
223         struct {
224             union
225             {
226                 /**
227                  * The next-hop
228                  */
229                 ip46_address_t fp_ip;
230                 struct {
231                     /**
232                      * The local label to resolve through.
233                      */
234                     mpls_label_t fp_local_label;
235                     /**
236                      * The EOS bit of the resolving label
237                      */
238                     mpls_eos_bit_t fp_eos;
239                 };
240             } fp_nh;
241             /**
242              * The FIB table index in which to find the next-hop.
243              */
244             fib_node_index_t fp_tbl_id;
245         } recursive;
246         struct {
247             /**
248              * The FIB index in which to perfom the next lookup
249              */
250             fib_node_index_t fp_tbl_id;
251             /**
252              * The RPF-ID to tag the packets with
253              */
254             fib_rpf_id_t fp_rpf_id;
255         } deag;
256         struct {
257         } special;
258         struct {
259             /**
260              * The user provided 'exclusive' DPO
261              */
262             dpo_id_t fp_ex_dpo;
263         } exclusive;
264         struct {
265             /**
266              * The interface on which the local address is configured
267              */
268             u32 fp_interface;
269             /**
270              * The next-hop
271              */
272             ip46_address_t fp_addr;
273         } receive;
274         struct {
275             /**
276              * The interface on which the packets will be input.
277              */
278             u32 fp_interface;
279         } intf_rx;
280     };
281     STRUCT_MARK(path_hash_end);
282
283     /**
284      * Memebers in this last section represent information that is
285      * dervied during resolution. It should not be copied to new paths
286      * nor compared.
287      */
288
289     /**
290      * Operational Flags
291      */
292     fib_path_oper_flags_t fp_oper_flags;
293
294     /**
295      * the resolving via fib. not part of the union, since it it not part
296      * of the path's hash.
297      */
298     fib_node_index_t fp_via_fib;
299
300     /**
301      * The Data-path objects through which this path resolves for IP.
302      */
303     dpo_id_t fp_dpo;
304
305     /**
306      * the index of this path in the parent's child list.
307      */
308     u32 fp_sibling;
309 } fib_path_t;
310
311 /*
312  * Array of strings/names for the path types and attributes
313  */
314 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
315 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
316 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
317
318 /*
319  * The memory pool from which we allocate all the paths
320  */
321 static fib_path_t *fib_path_pool;
322
323 /*
324  * Debug macro
325  */
326 #ifdef FIB_DEBUG
327 #define FIB_PATH_DBG(_p, _fmt, _args...)                        \
328 {                                                               \
329     u8 *_tmp = NULL;                                            \
330     _tmp = fib_path_format(fib_path_get_index(_p), _tmp);       \
331     clib_warning("path:[%d:%s]:" _fmt,                          \
332                  fib_path_get_index(_p), _tmp,                  \
333                  ##_args);                                      \
334     vec_free(_tmp);                                             \
335 }
336 #else
337 #define FIB_PATH_DBG(_p, _fmt, _args...)
338 #endif
339
340 static fib_path_t *
341 fib_path_get (fib_node_index_t index)
342 {
343     return (pool_elt_at_index(fib_path_pool, index));
344 }
345
346 static fib_node_index_t 
347 fib_path_get_index (fib_path_t *path)
348 {
349     return (path - fib_path_pool);
350 }
351
352 static fib_node_t *
353 fib_path_get_node (fib_node_index_t index)
354 {
355     return ((fib_node_t*)fib_path_get(index));
356 }
357
358 static fib_path_t*
359 fib_path_from_fib_node (fib_node_t *node)
360 {
361 #if CLIB_DEBUG > 0
362     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
363 #endif
364     return ((fib_path_t*)node);
365 }
366
367 u8 *
368 format_fib_path (u8 * s, va_list * args)
369 {
370     fib_path_t *path = va_arg (*args, fib_path_t *);
371     vnet_main_t * vnm = vnet_get_main();
372     fib_path_oper_attribute_t oattr;
373     fib_path_cfg_attribute_t cattr;
374
375     s = format (s, "      index:%d ", fib_path_get_index(path));
376     s = format (s, "pl-index:%d ", path->fp_pl_index);
377     s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto);
378     s = format (s, "weight=%d ", path->fp_weight);
379     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
380     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
381         s = format(s, " oper-flags:");
382         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
383             if ((1<<oattr) & path->fp_oper_flags) {
384                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
385             }
386         }
387     }
388     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
389         s = format(s, " cfg-flags:");
390         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
391             if ((1<<cattr) & path->fp_cfg_flags) {
392                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
393             }
394         }
395     }
396     s = format(s, "\n       ");
397
398     switch (path->fp_type)
399     {
400     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
401         s = format (s, "%U", format_ip46_address,
402                     &path->attached_next_hop.fp_nh,
403                     IP46_TYPE_ANY);
404         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
405         {
406             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
407         }
408         else
409         {
410             s = format (s, " %U",
411                         format_vnet_sw_interface_name,
412                         vnm,
413                         vnet_get_sw_interface(
414                             vnm,
415                             path->attached_next_hop.fp_interface));
416             if (vnet_sw_interface_is_p2p(vnet_get_main(),
417                                          path->attached_next_hop.fp_interface))
418             {
419                 s = format (s, " (p2p)");
420             }
421         }
422         if (!dpo_id_is_valid(&path->fp_dpo))
423         {
424             s = format(s, "\n          unresolved");
425         }
426         else
427         {
428             s = format(s, "\n          %U",
429                        format_dpo_id,
430                        &path->fp_dpo, 13);
431         }
432         break;
433     case FIB_PATH_TYPE_ATTACHED:
434         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
435         {
436             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
437         }
438         else
439         {
440             s = format (s, " %U",
441                         format_vnet_sw_interface_name,
442                         vnm,
443                         vnet_get_sw_interface(
444                             vnm,
445                             path->attached.fp_interface));
446         }
447         break;
448     case FIB_PATH_TYPE_RECURSIVE:
449         if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
450         {
451             s = format (s, "via %U %U",
452                         format_mpls_unicast_label,
453                         path->recursive.fp_nh.fp_local_label,
454                         format_mpls_eos_bit,
455                         path->recursive.fp_nh.fp_eos);
456         }
457         else
458         {
459             s = format (s, "via %U",
460                         format_ip46_address,
461                         &path->recursive.fp_nh.fp_ip,
462                         IP46_TYPE_ANY);
463         }
464         s = format (s, " in fib:%d",
465                     path->recursive.fp_tbl_id,
466                     path->fp_via_fib); 
467         s = format (s, " via-fib:%d", path->fp_via_fib); 
468         s = format (s, " via-dpo:[%U:%d]",
469                     format_dpo_type, path->fp_dpo.dpoi_type, 
470                     path->fp_dpo.dpoi_index);
471
472         break;
473     case FIB_PATH_TYPE_RECEIVE:
474     case FIB_PATH_TYPE_INTF_RX:
475     case FIB_PATH_TYPE_SPECIAL:
476     case FIB_PATH_TYPE_DEAG:
477     case FIB_PATH_TYPE_EXCLUSIVE:
478         if (dpo_id_is_valid(&path->fp_dpo))
479         {
480             s = format(s, "%U", format_dpo_id,
481                        &path->fp_dpo, 2);
482         }
483         break;
484     }
485     return (s);
486 }
487
488 u8 *
489 fib_path_format (fib_node_index_t pi, u8 *s)
490 {
491     fib_path_t *path;
492
493     path = fib_path_get(pi);
494     ASSERT(NULL != path);
495
496     return (format (s, "%U", format_fib_path, path));
497 }
498
499 u8 *
500 fib_path_adj_format (fib_node_index_t pi,
501                      u32 indent,
502                      u8 *s)
503 {
504     fib_path_t *path;
505
506     path = fib_path_get(pi);
507     ASSERT(NULL != path);
508
509     if (!dpo_id_is_valid(&path->fp_dpo))
510     {
511         s = format(s, " unresolved");
512     }
513     else
514     {
515         s = format(s, "%U", format_dpo_id,
516                    &path->fp_dpo, 2);
517     }
518
519     return (s);
520 }
521
522 /*
523  * fib_path_last_lock_gone
524  *
525  * We don't share paths, we share path lists, so the [un]lock functions
526  * are no-ops
527  */
528 static void
529 fib_path_last_lock_gone (fib_node_t *node)
530 {
531     ASSERT(0);
532 }
533
534 static const adj_index_t
535 fib_path_attached_next_hop_get_adj (fib_path_t *path,
536                                     vnet_link_t link)
537 {
538     if (vnet_sw_interface_is_p2p(vnet_get_main(),
539                                  path->attached_next_hop.fp_interface))
540     {
541         /*
542          * if the interface is p2p then the adj for the specific
543          * neighbour on that link will never exist. on p2p links
544          * the subnet address (the attached route) links to the
545          * auto-adj (see below), we want that adj here too.
546          */
547         return (adj_nbr_add_or_lock(path->fp_nh_proto,
548                                     link,
549                                     &zero_addr,
550                                     path->attached_next_hop.fp_interface));
551     }
552     else
553     {
554         return (adj_nbr_add_or_lock(path->fp_nh_proto,
555                                     link,
556                                     &path->attached_next_hop.fp_nh,
557                                     path->attached_next_hop.fp_interface));
558     }
559 }
560
561 static void
562 fib_path_attached_next_hop_set (fib_path_t *path)
563 {
564     /*
565      * resolve directly via the adjacnecy discribed by the
566      * interface and next-hop
567      */
568     dpo_set(&path->fp_dpo,
569             DPO_ADJACENCY,
570             fib_proto_to_dpo(path->fp_nh_proto),
571             fib_path_attached_next_hop_get_adj(
572                  path,
573                  fib_proto_to_link(path->fp_nh_proto)));
574
575     /*
576      * become a child of the adjacency so we receive updates
577      * when its rewrite changes
578      */
579     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
580                                      FIB_NODE_TYPE_PATH,
581                                      fib_path_get_index(path));
582
583     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
584                                       path->attached_next_hop.fp_interface) ||
585         !adj_is_up(path->fp_dpo.dpoi_index))
586     {
587         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
588     }
589 }
590
591 /*
592  * create of update the paths recursive adj
593  */
594 static void
595 fib_path_recursive_adj_update (fib_path_t *path,
596                                fib_forward_chain_type_t fct,
597                                dpo_id_t *dpo)
598 {
599     dpo_id_t via_dpo = DPO_INVALID;
600
601     /*
602      * get the DPO to resolve through from the via-entry
603      */
604     fib_entry_contribute_forwarding(path->fp_via_fib,
605                                     fct,
606                                     &via_dpo);
607
608
609     /*
610      * hope for the best - clear if restrictions apply.
611      */
612     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
613
614     /*
615      * Validate any recursion constraints and over-ride the via
616      * adj if not met
617      */
618     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
619     {
620         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
621         dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
622     }
623     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
624     {
625         /*
626          * the via FIB must be a host route.
627          * note the via FIB just added will always be a host route
628          * since it is an RR source added host route. So what we need to
629          * check is whether the route has other sources. If it does then
630          * some other source has added it as a host route. If it doesn't
631          * then it was added only here and inherits forwarding from a cover.
632          * the cover is not a host route.
633          * The RR source is the lowest priority source, so we check if it
634          * is the best. if it is there are no other sources.
635          */
636         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
637         {
638             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
639             dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
640
641             /*
642              * PIC edge trigger. let the load-balance maps know
643              */
644             load_balance_map_path_state_change(fib_path_get_index(path));
645         }
646     }
647     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
648     {
649         /*
650          * RR source entries inherit the flags from the cover, so
651          * we can check the via directly
652          */
653         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
654         {
655             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
656             dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
657
658             /*
659              * PIC edge trigger. let the load-balance maps know
660              */
661             load_balance_map_path_state_change(fib_path_get_index(path));
662         }
663     }
664     /*
665      * check for over-riding factors on the FIB entry itself
666      */
667     if (!fib_entry_is_resolved(path->fp_via_fib))
668     {
669         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
670         dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
671
672         /*
673          * PIC edge trigger. let the load-balance maps know
674          */
675         load_balance_map_path_state_change(fib_path_get_index(path));
676     }
677
678     /*
679      * update the path's contributed DPO
680      */
681     dpo_copy(dpo, &via_dpo);
682
683     FIB_PATH_DBG(path, "recursive update: %U",
684                  fib_get_lookup_main(path->fp_nh_proto),
685                  &path->fp_dpo, 2);
686
687     dpo_reset(&via_dpo);
688 }
689
690 /*
691  * fib_path_is_permanent_drop
692  *
693  * Return !0 if the path is configured to permanently drop,
694  * despite other attributes.
695  */
696 static int
697 fib_path_is_permanent_drop (fib_path_t *path)
698 {
699     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
700             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
701 }
702
703 /*
704  * fib_path_unresolve
705  *
706  * Remove our dependency on the resolution target
707  */
708 static void
709 fib_path_unresolve (fib_path_t *path)
710 {
711     /*
712      * the forced drop path does not need unresolving
713      */
714     if (fib_path_is_permanent_drop(path))
715     {
716         return;
717     }
718
719     switch (path->fp_type)
720     {
721     case FIB_PATH_TYPE_RECURSIVE:
722         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
723         {
724             fib_prefix_t pfx;
725
726             fib_entry_get_prefix(path->fp_via_fib, &pfx);
727             fib_entry_child_remove(path->fp_via_fib,
728                                    path->fp_sibling);
729             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
730                                            &pfx,
731                                            FIB_SOURCE_RR);
732             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
733         }
734         break;
735     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
736     case FIB_PATH_TYPE_ATTACHED:
737         adj_child_remove(path->fp_dpo.dpoi_index,
738                          path->fp_sibling);
739         adj_unlock(path->fp_dpo.dpoi_index);
740         break;
741     case FIB_PATH_TYPE_EXCLUSIVE:
742         dpo_reset(&path->exclusive.fp_ex_dpo);
743         break;
744     case FIB_PATH_TYPE_SPECIAL:
745     case FIB_PATH_TYPE_RECEIVE:
746     case FIB_PATH_TYPE_INTF_RX:
747     case FIB_PATH_TYPE_DEAG:
748         /*
749          * these hold only the path's DPO, which is reset below.
750          */
751         break;
752     }
753
754     /*
755      * release the adj we were holding and pick up the
756      * drop just in case.
757      */
758     dpo_reset(&path->fp_dpo);
759     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
760
761     return;
762 }
763
764 static fib_forward_chain_type_t
765 fib_path_to_chain_type (const fib_path_t *path)
766 {
767     switch (path->fp_nh_proto)
768     {
769     case FIB_PROTOCOL_IP4:
770         return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
771     case FIB_PROTOCOL_IP6:
772         return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
773     case FIB_PROTOCOL_MPLS:
774         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
775             MPLS_EOS == path->recursive.fp_nh.fp_eos)
776         {
777             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
778         }
779         else
780         {
781             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
782         }
783     }
784     return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
785 }
786
787 /*
788  * fib_path_back_walk_notify
789  *
790  * A back walk has reach this path.
791  */
792 static fib_node_back_walk_rc_t
793 fib_path_back_walk_notify (fib_node_t *node,
794                            fib_node_back_walk_ctx_t *ctx)
795 {
796     fib_path_t *path;
797
798     path = fib_path_from_fib_node(node);
799
800     switch (path->fp_type)
801     {
802     case FIB_PATH_TYPE_RECURSIVE:
803         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
804         {
805             /*
806              * modify the recursive adjacency to use the new forwarding
807              * of the via-fib.
808              * this update is visible to packets in flight in the DP.
809              */
810             fib_path_recursive_adj_update(
811                 path,
812                 fib_path_to_chain_type(path),
813                 &path->fp_dpo);
814         }
815         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
816             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
817         {
818             /*
819              * ADJ updates (complete<->incomplete) do not need to propagate to
820              * recursive entries.
821              * The only reason its needed as far back as here, is that the adj
822              * and the incomplete adj are a different DPO type, so the LBs need
823              * to re-stack.
824              * If this walk was quashed in the fib_entry, then any non-fib_path
825              * children (like tunnels that collapse out the LB when they stack)
826              * would not see the update.
827              */
828             return (FIB_NODE_BACK_WALK_CONTINUE);
829         }
830         break;
831     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
832         /*
833 FIXME comment
834          * ADJ_UPDATE backwalk pass silently through here and up to
835          * the path-list when the multipath adj collapse occurs.
836          * The reason we do this is that the assumtption is that VPP
837          * runs in an environment where the Control-Plane is remote
838          * and hence reacts slowly to link up down. In order to remove
839          * this down link from the ECMP set quickly, we back-walk.
840          * VPP also has dedicated CPUs, so we are not stealing resources
841          * from the CP to do so.
842          */
843         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
844         {
845             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
846             {
847                 /*
848                  * alreday resolved. no need to walk back again
849                  */
850                 return (FIB_NODE_BACK_WALK_CONTINUE);
851             }
852             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
853         }
854         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
855         {
856             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
857             {
858                 /*
859                  * alreday unresolved. no need to walk back again
860                  */
861                 return (FIB_NODE_BACK_WALK_CONTINUE);
862             }
863             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
864         }
865         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
866         {
867             /*
868              * The interface this path resolves through has been deleted.
869              * This will leave the path in a permanent drop state. The route
870              * needs to be removed and readded (and hence the path-list deleted)
871              * before it can forward again.
872              */
873             fib_path_unresolve(path);
874             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
875         }
876         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
877         {
878             /*
879              * restack the DPO to pick up the correct DPO sub-type
880              */
881             uword if_is_up;
882             adj_index_t ai;
883
884             if_is_up = vnet_sw_interface_is_admin_up(
885                            vnet_get_main(),
886                            path->attached_next_hop.fp_interface);
887
888             ai = fib_path_attached_next_hop_get_adj(
889                      path,
890                      fib_proto_to_link(path->fp_nh_proto));
891
892             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
893             if (if_is_up && adj_is_up(ai))
894             {
895                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
896             }
897
898             dpo_set(&path->fp_dpo, DPO_ADJACENCY,
899                     fib_proto_to_dpo(path->fp_nh_proto),
900                     ai);
901             adj_unlock(ai);
902
903             if (!if_is_up)
904             {
905                 /*
906                  * If the interface is not up there is no reason to walk
907                  * back to children. if we did they would only evalute
908                  * that this path is unresolved and hence it would
909                  * not contribute the adjacency - so it would be wasted
910                  * CPU time.
911                  */
912                 return (FIB_NODE_BACK_WALK_CONTINUE);
913             }
914         }
915         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
916         {
917             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
918             {
919                 /*
920                  * alreday unresolved. no need to walk back again
921                  */
922                 return (FIB_NODE_BACK_WALK_CONTINUE);
923             }
924             /*
925              * the adj has gone down. the path is no longer resolved.
926              */
927             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
928         }
929         break;
930     case FIB_PATH_TYPE_ATTACHED:
931         /*
932          * FIXME; this could schedule a lower priority walk, since attached
933          * routes are not usually in ECMP configurations so the backwalk to
934          * the FIB entry does not need to be high priority
935          */
936         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
937         {
938             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
939         }
940         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
941         {
942             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
943         }
944         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
945         {
946             fib_path_unresolve(path);
947             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
948         }
949         break;
950     case FIB_PATH_TYPE_INTF_RX:
951         ASSERT(0);
952     case FIB_PATH_TYPE_DEAG:
953         /*
954          * FIXME When VRF delete is allowed this will need a poke.
955          */
956     case FIB_PATH_TYPE_SPECIAL:
957     case FIB_PATH_TYPE_RECEIVE:
958     case FIB_PATH_TYPE_EXCLUSIVE:
959         /*
960          * these path types have no parents. so to be
961          * walked from one is unexpected.
962          */
963         ASSERT(0);
964         break;
965     }
966
967     /*
968      * propagate the backwalk further to the path-list
969      */
970     fib_path_list_back_walk(path->fp_pl_index, ctx);
971
972     return (FIB_NODE_BACK_WALK_CONTINUE);
973 }
974
975 static void
976 fib_path_memory_show (void)
977 {
978     fib_show_memory_usage("Path",
979                           pool_elts(fib_path_pool),
980                           pool_len(fib_path_pool),
981                           sizeof(fib_path_t));
982 }
983
984 /*
985  * The FIB path's graph node virtual function table
986  */
987 static const fib_node_vft_t fib_path_vft = {
988     .fnv_get = fib_path_get_node,
989     .fnv_last_lock = fib_path_last_lock_gone,
990     .fnv_back_walk = fib_path_back_walk_notify,
991     .fnv_mem_show = fib_path_memory_show,
992 };
993
994 static fib_path_cfg_flags_t
995 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
996 {
997     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
998
999     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1000         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1001     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1002         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1003     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1004         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1005     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1006         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1007     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1008         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1009     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1010         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1011     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1012         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1013     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1014         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1015
1016     return (cfg_flags);
1017 }
1018
1019 /*
1020  * fib_path_create
1021  *
1022  * Create and initialise a new path object.
1023  * return the index of the path.
1024  */
1025 fib_node_index_t
1026 fib_path_create (fib_node_index_t pl_index,
1027                  const fib_route_path_t *rpath)
1028 {
1029     fib_path_t *path;
1030
1031     pool_get(fib_path_pool, path);
1032     memset(path, 0, sizeof(*path));
1033
1034     fib_node_init(&path->fp_node,
1035                   FIB_NODE_TYPE_PATH);
1036
1037     dpo_reset(&path->fp_dpo);
1038     path->fp_pl_index = pl_index;
1039     path->fp_nh_proto = rpath->frp_proto;
1040     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1041     path->fp_weight = rpath->frp_weight;
1042     if (0 == path->fp_weight)
1043     {
1044         /*
1045          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1046          * clients to always use 1, or we can accept it and fixup approrpiately.
1047          */
1048         path->fp_weight = 1;
1049     }
1050     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1051
1052     /*
1053      * deduce the path's tpye from the parementers and save what is needed.
1054      */
1055     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1056     {
1057         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1058         path->receive.fp_interface = rpath->frp_sw_if_index;
1059         path->receive.fp_addr = rpath->frp_addr;
1060     }
1061     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1062     {
1063         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1064         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1065     }
1066     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1067     {
1068         path->fp_type = FIB_PATH_TYPE_DEAG;
1069         path->deag.fp_tbl_id = rpath->frp_fib_index;
1070         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1071     }
1072     else if (~0 != rpath->frp_sw_if_index)
1073     {
1074         if (ip46_address_is_zero(&rpath->frp_addr))
1075         {
1076             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1077             path->attached.fp_interface = rpath->frp_sw_if_index;
1078         }
1079         else
1080         {
1081             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1082             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1083             path->attached_next_hop.fp_nh = rpath->frp_addr;
1084         }
1085     }
1086     else
1087     {
1088         if (ip46_address_is_zero(&rpath->frp_addr))
1089         {
1090             if (~0 == rpath->frp_fib_index)
1091             {
1092                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1093             }
1094             else
1095             {
1096                 path->fp_type = FIB_PATH_TYPE_DEAG;
1097                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1098             }           
1099         }
1100         else
1101         {
1102             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1103             if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1104             {
1105                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1106                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1107             }
1108             else
1109             {
1110                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1111             }
1112             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1113         }
1114     }
1115
1116     FIB_PATH_DBG(path, "create");
1117
1118     return (fib_path_get_index(path));
1119 }
1120
1121 /*
1122  * fib_path_create_special
1123  *
1124  * Create and initialise a new path object.
1125  * return the index of the path.
1126  */
1127 fib_node_index_t
1128 fib_path_create_special (fib_node_index_t pl_index,
1129                          fib_protocol_t nh_proto,
1130                          fib_path_cfg_flags_t flags,
1131                          const dpo_id_t *dpo)
1132 {
1133     fib_path_t *path;
1134
1135     pool_get(fib_path_pool, path);
1136     memset(path, 0, sizeof(*path));
1137
1138     fib_node_init(&path->fp_node,
1139                   FIB_NODE_TYPE_PATH);
1140     dpo_reset(&path->fp_dpo);
1141
1142     path->fp_pl_index = pl_index;
1143     path->fp_weight = 1;
1144     path->fp_nh_proto = nh_proto;
1145     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1146     path->fp_cfg_flags = flags;
1147
1148     if (FIB_PATH_CFG_FLAG_DROP & flags)
1149     {
1150         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1151     }
1152     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1153     {
1154         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1155         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1156     }
1157     else
1158     {
1159         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1160         ASSERT(NULL != dpo);
1161         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1162     }
1163
1164     return (fib_path_get_index(path));
1165 }
1166
1167 /*
1168  * fib_path_copy
1169  *
1170  * Copy a path. return index of new path.
1171  */
1172 fib_node_index_t
1173 fib_path_copy (fib_node_index_t path_index,
1174                fib_node_index_t path_list_index)
1175 {
1176     fib_path_t *path, *orig_path;
1177
1178     pool_get(fib_path_pool, path);
1179
1180     orig_path = fib_path_get(path_index);
1181     ASSERT(NULL != orig_path);
1182
1183     memcpy(path, orig_path, sizeof(*path));
1184
1185     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1186
1187     /*
1188      * reset the dynamic section
1189      */
1190     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1191     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1192     path->fp_pl_index  = path_list_index;
1193     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1194     memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1195     dpo_reset(&path->fp_dpo);
1196
1197     return (fib_path_get_index(path));
1198 }
1199
1200 /*
1201  * fib_path_destroy
1202  *
1203  * destroy a path that is no longer required
1204  */
1205 void
1206 fib_path_destroy (fib_node_index_t path_index)
1207 {
1208     fib_path_t *path;
1209
1210     path = fib_path_get(path_index);
1211
1212     ASSERT(NULL != path);
1213     FIB_PATH_DBG(path, "destroy");
1214
1215     fib_path_unresolve(path);
1216
1217     fib_node_deinit(&path->fp_node);
1218     pool_put(fib_path_pool, path);
1219 }
1220
1221 /*
1222  * fib_path_destroy
1223  *
1224  * destroy a path that is no longer required
1225  */
1226 uword
1227 fib_path_hash (fib_node_index_t path_index)
1228 {
1229     fib_path_t *path;
1230
1231     path = fib_path_get(path_index);
1232
1233     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1234                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1235                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1236                         0));
1237 }
1238
1239 /*
1240  * fib_path_cmp_i
1241  *
1242  * Compare two paths for equivalence.
1243  */
1244 static int
1245 fib_path_cmp_i (const fib_path_t *path1,
1246                 const fib_path_t *path2)
1247 {
1248     int res;
1249
1250     res = 1;
1251
1252     /*
1253      * paths of different types and protocol are not equal.
1254      * different weights only are the same path.
1255      */
1256     if (path1->fp_type != path2->fp_type)
1257     {
1258         res = (path1->fp_type - path2->fp_type);
1259     }
1260     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1261     {
1262         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1263     }
1264     else
1265     {
1266         /*
1267          * both paths are of the same type.
1268          * consider each type and its attributes in turn.
1269          */
1270         switch (path1->fp_type)
1271         {
1272         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1273             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1274                                    &path2->attached_next_hop.fp_nh);
1275             if (0 == res) {
1276                 res = (path1->attached_next_hop.fp_interface -
1277                        path2->attached_next_hop.fp_interface);
1278             }
1279             break;
1280         case FIB_PATH_TYPE_ATTACHED:
1281             res = (path1->attached.fp_interface -
1282                    path2->attached.fp_interface);
1283             break;
1284         case FIB_PATH_TYPE_RECURSIVE:
1285             res = ip46_address_cmp(&path1->recursive.fp_nh,
1286                                    &path2->recursive.fp_nh);
1287  
1288             if (0 == res)
1289             {
1290                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1291             }
1292             break;
1293         case FIB_PATH_TYPE_DEAG:
1294             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1295             if (0 == res)
1296             {
1297                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1298             }
1299             break;
1300         case FIB_PATH_TYPE_INTF_RX:
1301             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1302             break;
1303         case FIB_PATH_TYPE_SPECIAL:
1304         case FIB_PATH_TYPE_RECEIVE:
1305         case FIB_PATH_TYPE_EXCLUSIVE:
1306             res = 0;
1307             break;
1308         }
1309     }
1310     return (res);
1311 }
1312
1313 /*
1314  * fib_path_cmp_for_sort
1315  *
1316  * Compare two paths for equivalence. Used during path sorting.
1317  * As usual 0 means equal.
1318  */
1319 int
1320 fib_path_cmp_for_sort (void * v1,
1321                        void * v2)
1322 {
1323     fib_node_index_t *pi1 = v1, *pi2 = v2;
1324     fib_path_t *path1, *path2;
1325
1326     path1 = fib_path_get(*pi1);
1327     path2 = fib_path_get(*pi2);
1328
1329     return (fib_path_cmp_i(path1, path2));
1330 }
1331
1332 /*
1333  * fib_path_cmp
1334  *
1335  * Compare two paths for equivalence.
1336  */
1337 int
1338 fib_path_cmp (fib_node_index_t pi1,
1339               fib_node_index_t pi2)
1340 {
1341     fib_path_t *path1, *path2;
1342
1343     path1 = fib_path_get(pi1);
1344     path2 = fib_path_get(pi2);
1345
1346     return (fib_path_cmp_i(path1, path2));
1347 }
1348
1349 int
1350 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1351                            const fib_route_path_t *rpath)
1352 {
1353     fib_path_t *path;
1354     int res;
1355
1356     path = fib_path_get(path_index);
1357
1358     res = 1;
1359
1360     if (path->fp_weight != rpath->frp_weight)
1361     {
1362         res = (path->fp_weight - rpath->frp_weight);
1363     }
1364     else
1365     {
1366         /*
1367          * both paths are of the same type.
1368          * consider each type and its attributes in turn.
1369          */
1370         switch (path->fp_type)
1371         {
1372         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1373             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1374                                    &rpath->frp_addr);
1375             if (0 == res)
1376             {
1377                 res = (path->attached_next_hop.fp_interface -
1378                        rpath->frp_sw_if_index);
1379             }
1380             break;
1381         case FIB_PATH_TYPE_ATTACHED:
1382             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1383             break;
1384         case FIB_PATH_TYPE_RECURSIVE:
1385             if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1386             {
1387                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1388
1389                 if (res == 0)
1390                 {
1391                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1392                 }
1393             }
1394             else
1395             {
1396                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1397                                        &rpath->frp_addr);
1398             }
1399
1400             if (0 == res)
1401             {
1402                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1403             }
1404             break;
1405         case FIB_PATH_TYPE_INTF_RX:
1406             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1407             break;
1408         case FIB_PATH_TYPE_DEAG:
1409             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1410             if (0 == res)
1411             {
1412                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1413             }
1414             break;
1415         case FIB_PATH_TYPE_SPECIAL:
1416         case FIB_PATH_TYPE_RECEIVE:
1417         case FIB_PATH_TYPE_EXCLUSIVE:
1418             res = 0;
1419             break;
1420         }
1421     }
1422     return (res);
1423 }
1424
1425 /*
1426  * fib_path_recursive_loop_detect
1427  *
1428  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1429  * walk is initiated when an entry is linking to a new path list or from an old.
1430  * The entry vector passed contains all the FIB entrys that are children of this
1431  * path (it is all the entries encountered on the walk so far). If this vector
1432  * contains the entry this path resolve via, then a loop is about to form.
1433  * The loop must be allowed to form, since we need the dependencies in place
1434  * so that we can track when the loop breaks.
1435  * However, we MUST not produce a loop in the forwarding graph (else packets
1436  * would loop around the switch path until the loop breaks), so we mark recursive
1437  * paths as looped so that they do not contribute forwarding information.
1438  * By marking the path as looped, an etry such as;
1439  *    X/Y
1440  *     via a.a.a.a (looped)
1441  *     via b.b.b.b (not looped)
1442  * can still forward using the info provided by b.b.b.b only
1443  */
1444 int
1445 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1446                                 fib_node_index_t **entry_indicies)
1447 {
1448     fib_path_t *path;
1449
1450     path = fib_path_get(path_index);
1451
1452     /*
1453      * the forced drop path is never looped, cos it is never resolved.
1454      */
1455     if (fib_path_is_permanent_drop(path))
1456     {
1457         return (0);
1458     }
1459
1460     switch (path->fp_type)
1461     {
1462     case FIB_PATH_TYPE_RECURSIVE:
1463     {
1464         fib_node_index_t *entry_index, *entries;
1465         int looped = 0;
1466         entries = *entry_indicies;
1467
1468         vec_foreach(entry_index, entries) {
1469             if (*entry_index == path->fp_via_fib)
1470             {
1471                 /*
1472                  * the entry that is about to link to this path-list (or
1473                  * one of this path-list's children) is the same entry that
1474                  * this recursive path resolves through. this is a cycle.
1475                  * abort the walk.
1476                  */
1477                 looped = 1;
1478                 break;
1479             }
1480         }
1481
1482         if (looped)
1483         {
1484             FIB_PATH_DBG(path, "recursive loop formed");
1485             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1486
1487             dpo_copy(&path->fp_dpo,
1488                     drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1489         }
1490         else
1491         {
1492             /*
1493              * no loop here yet. keep forward walking the graph.
1494              */     
1495             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1496             {
1497                 FIB_PATH_DBG(path, "recursive loop formed");
1498                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1499             }
1500             else
1501             {
1502                 FIB_PATH_DBG(path, "recursive loop cleared");
1503                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1504             }
1505         }
1506         break;
1507     }
1508     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1509     case FIB_PATH_TYPE_ATTACHED:
1510     case FIB_PATH_TYPE_SPECIAL:
1511     case FIB_PATH_TYPE_DEAG:
1512     case FIB_PATH_TYPE_RECEIVE:
1513     case FIB_PATH_TYPE_INTF_RX:
1514     case FIB_PATH_TYPE_EXCLUSIVE:
1515         /*
1516          * these path types cannot be part of a loop, since they are the leaves
1517          * of the graph.
1518          */
1519         break;
1520     }
1521
1522     return (fib_path_is_looped(path_index));
1523 }
1524
1525 int
1526 fib_path_resolve (fib_node_index_t path_index)
1527 {
1528     fib_path_t *path;
1529
1530     path = fib_path_get(path_index);
1531
1532     /*
1533      * hope for the best.
1534      */
1535     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1536
1537     /*
1538      * the forced drop path resolves via the drop adj
1539      */
1540     if (fib_path_is_permanent_drop(path))
1541     {
1542         dpo_copy(&path->fp_dpo,
1543                  drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1544         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1545         return (fib_path_is_resolved(path_index));
1546     }
1547
1548     switch (path->fp_type)
1549     {
1550     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1551         fib_path_attached_next_hop_set(path);
1552         break;
1553     case FIB_PATH_TYPE_ATTACHED:
1554         /*
1555          * path->attached.fp_interface
1556          */
1557         if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1558                                            path->attached.fp_interface))
1559         {
1560             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1561         }
1562         if (vnet_sw_interface_is_p2p(vnet_get_main(),
1563                                      path->attached.fp_interface))
1564         {
1565             /*
1566              * point-2-point interfaces do not require a glean, since
1567              * there is nothing to ARP. Install a rewrite/nbr adj instead
1568              */
1569             dpo_set(&path->fp_dpo,
1570                     DPO_ADJACENCY,
1571                     fib_proto_to_dpo(path->fp_nh_proto),
1572                     adj_nbr_add_or_lock(
1573                         path->fp_nh_proto,
1574                         fib_proto_to_link(path->fp_nh_proto),
1575                         &zero_addr,
1576                         path->attached.fp_interface));
1577         }
1578         else
1579         {
1580             dpo_set(&path->fp_dpo,
1581                     DPO_ADJACENCY_GLEAN,
1582                     fib_proto_to_dpo(path->fp_nh_proto),
1583                     adj_glean_add_or_lock(path->fp_nh_proto,
1584                                           path->attached.fp_interface,
1585                                           NULL));
1586         }
1587         /*
1588          * become a child of the adjacency so we receive updates
1589          * when the interface state changes
1590          */
1591         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1592                                          FIB_NODE_TYPE_PATH,
1593                                          fib_path_get_index(path));
1594
1595         break;
1596     case FIB_PATH_TYPE_RECURSIVE:
1597     {
1598         /*
1599          * Create a RR source entry in the table for the address
1600          * that this path recurses through.
1601          * This resolve action is recursive, hence we may create
1602          * more paths in the process. more creates mean maybe realloc
1603          * of this path.
1604          */
1605         fib_node_index_t fei;
1606         fib_prefix_t pfx;
1607
1608         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1609
1610         if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1611         {
1612             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1613                                        path->recursive.fp_nh.fp_eos,
1614                                        &pfx);
1615         }
1616         else
1617         {
1618             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1619         }
1620
1621         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1622                                           &pfx,
1623                                           FIB_SOURCE_RR,
1624                                           FIB_ENTRY_FLAG_NONE,
1625                                           ADJ_INDEX_INVALID);
1626
1627         path = fib_path_get(path_index);
1628         path->fp_via_fib = fei;
1629
1630         /*
1631          * become a dependent child of the entry so the path is 
1632          * informed when the forwarding for the entry changes.
1633          */
1634         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1635                                                FIB_NODE_TYPE_PATH,
1636                                                fib_path_get_index(path));
1637
1638         /*
1639          * create and configure the IP DPO
1640          */
1641         fib_path_recursive_adj_update(
1642             path,
1643             fib_path_to_chain_type(path),
1644             &path->fp_dpo);
1645
1646         break;
1647     }
1648     case FIB_PATH_TYPE_SPECIAL:
1649         /*
1650          * Resolve via the drop
1651          */
1652         dpo_copy(&path->fp_dpo,
1653                  drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1654         break;
1655     case FIB_PATH_TYPE_DEAG:
1656     {
1657         /*
1658          * Resolve via a lookup DPO.
1659          * FIXME. control plane should add routes with a table ID
1660          */
1661         lookup_cast_t cast;
1662         
1663         cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1664                 LOOKUP_MULTICAST :
1665                 LOOKUP_UNICAST);
1666
1667         lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
1668                                            fib_proto_to_dpo(path->fp_nh_proto),
1669                                            cast,
1670                                            LOOKUP_INPUT_DST_ADDR,
1671                                            LOOKUP_TABLE_FROM_CONFIG,
1672                                            &path->fp_dpo);
1673         break;
1674     }
1675     case FIB_PATH_TYPE_RECEIVE:
1676         /*
1677          * Resolve via a receive DPO.
1678          */
1679         receive_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1680                                 path->receive.fp_interface,
1681                                 &path->receive.fp_addr,
1682                                 &path->fp_dpo);
1683         break;
1684     case FIB_PATH_TYPE_INTF_RX: {
1685         /*
1686          * Resolve via a receive DPO.
1687          */
1688         interface_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1689                                   path->intf_rx.fp_interface,
1690                                   &path->fp_dpo);
1691         break;
1692     }
1693     case FIB_PATH_TYPE_EXCLUSIVE:
1694         /*
1695          * Resolve via the user provided DPO
1696          */
1697         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
1698         break;
1699     }
1700
1701     return (fib_path_is_resolved(path_index));
1702 }
1703
1704 u32
1705 fib_path_get_resolving_interface (fib_node_index_t path_index)
1706 {
1707     fib_path_t *path;
1708
1709     path = fib_path_get(path_index);
1710
1711     switch (path->fp_type)
1712     {
1713     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1714         return (path->attached_next_hop.fp_interface);
1715     case FIB_PATH_TYPE_ATTACHED:
1716         return (path->attached.fp_interface);
1717     case FIB_PATH_TYPE_RECEIVE:
1718         return (path->receive.fp_interface);
1719     case FIB_PATH_TYPE_RECURSIVE:
1720         return (fib_entry_get_resolving_interface(path->fp_via_fib));    
1721     case FIB_PATH_TYPE_INTF_RX:
1722     case FIB_PATH_TYPE_SPECIAL:
1723     case FIB_PATH_TYPE_DEAG:
1724     case FIB_PATH_TYPE_EXCLUSIVE:
1725         break;
1726     }
1727     return (~0);
1728 }
1729
1730 adj_index_t
1731 fib_path_get_adj (fib_node_index_t path_index)
1732 {
1733     fib_path_t *path;
1734
1735     path = fib_path_get(path_index);
1736
1737     ASSERT(dpo_is_adj(&path->fp_dpo));
1738     if (dpo_is_adj(&path->fp_dpo))
1739     {
1740         return (path->fp_dpo.dpoi_index);
1741     }
1742     return (ADJ_INDEX_INVALID);
1743 }
1744
1745 int
1746 fib_path_get_weight (fib_node_index_t path_index)
1747 {
1748     fib_path_t *path;
1749
1750     path = fib_path_get(path_index);
1751
1752     ASSERT(path);
1753
1754     return (path->fp_weight);
1755 }
1756
1757 /**
1758  * @brief Contribute the path's adjacency to the list passed.
1759  * By calling this function over all paths, recursively, a child
1760  * can construct its full set of forwarding adjacencies, and hence its
1761  * uRPF list.
1762  */
1763 void
1764 fib_path_contribute_urpf (fib_node_index_t path_index,
1765                           index_t urpf)
1766 {
1767     fib_path_t *path;
1768
1769     path = fib_path_get(path_index);
1770
1771     /*
1772      * resolved and unresolved paths contribute to the RPF list.
1773      */
1774     switch (path->fp_type)
1775     {
1776     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1777         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
1778         break;
1779
1780     case FIB_PATH_TYPE_ATTACHED:
1781         fib_urpf_list_append(urpf, path->attached.fp_interface);
1782         break;
1783
1784     case FIB_PATH_TYPE_RECURSIVE:
1785         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
1786         {
1787             /*
1788              * there's unresolved due to constraints, and there's unresolved
1789              * due to ain't go no via. can't do nowt w'out via.
1790              */
1791             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
1792         }
1793         break;
1794
1795     case FIB_PATH_TYPE_EXCLUSIVE:
1796     case FIB_PATH_TYPE_SPECIAL:
1797         /*
1798          * these path types may link to an adj, if that's what
1799          * the clinet gave
1800          */
1801         if (dpo_is_adj(&path->fp_dpo))
1802         {
1803             ip_adjacency_t *adj;
1804
1805             adj = adj_get(path->fp_dpo.dpoi_index);
1806
1807             fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
1808         }
1809         break;
1810
1811     case FIB_PATH_TYPE_DEAG:
1812     case FIB_PATH_TYPE_RECEIVE:
1813     case FIB_PATH_TYPE_INTF_RX:
1814         /*
1815          * these path types don't link to an adj
1816          */
1817         break;
1818     }
1819 }
1820
1821 void
1822 fib_path_stack_mpls_disp (fib_node_index_t path_index,
1823                           dpo_proto_t payload_proto,
1824                           dpo_id_t *dpo)
1825 {
1826     fib_path_t *path;
1827
1828     path = fib_path_get(path_index);
1829
1830     ASSERT(path);
1831
1832     switch (path->fp_type)
1833     {
1834     case FIB_PATH_TYPE_DEAG:
1835     {
1836         dpo_id_t tmp = DPO_INVALID;
1837
1838         dpo_copy(&tmp, dpo);
1839         dpo_set(dpo,
1840                 DPO_MPLS_DISPOSITION,
1841                 payload_proto,
1842                 mpls_disp_dpo_create(payload_proto,
1843                                      path->deag.fp_rpf_id,
1844                                      &tmp));
1845         dpo_reset(&tmp);
1846         break;
1847     }                
1848     case FIB_PATH_TYPE_RECEIVE:
1849     case FIB_PATH_TYPE_ATTACHED:
1850     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1851     case FIB_PATH_TYPE_RECURSIVE:
1852     case FIB_PATH_TYPE_INTF_RX:
1853     case FIB_PATH_TYPE_EXCLUSIVE:
1854     case FIB_PATH_TYPE_SPECIAL:
1855         break;
1856     }
1857 }
1858
1859 void
1860 fib_path_contribute_forwarding (fib_node_index_t path_index,
1861                                 fib_forward_chain_type_t fct,
1862                                 dpo_id_t *dpo)
1863 {
1864     fib_path_t *path;
1865
1866     path = fib_path_get(path_index);
1867
1868     ASSERT(path);
1869     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
1870
1871     FIB_PATH_DBG(path, "contribute");
1872
1873     /*
1874      * The DPO stored in the path was created when the path was resolved.
1875      * This then represents the path's 'native' protocol; IP.
1876      * For all others will need to go find something else.
1877      */
1878     if (fib_path_to_chain_type(path) == fct)
1879     {
1880         dpo_copy(dpo, &path->fp_dpo);
1881     }
1882     else
1883     {
1884         switch (path->fp_type)
1885         {
1886         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1887             switch (fct)
1888             {
1889             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1890             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1891             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1892             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1893             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1894             case FIB_FORW_CHAIN_TYPE_NSH:
1895             {
1896                 adj_index_t ai;
1897
1898                 /*
1899                  * get a appropriate link type adj.
1900                  */
1901                 ai = fib_path_attached_next_hop_get_adj(
1902                          path,
1903                          fib_forw_chain_type_to_link_type(fct));
1904                 dpo_set(dpo, DPO_ADJACENCY,
1905                         fib_forw_chain_type_to_dpo_proto(fct), ai);
1906                 adj_unlock(ai);
1907
1908                 break;
1909             }
1910             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1911             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1912             break;
1913             }
1914             break;
1915         case FIB_PATH_TYPE_RECURSIVE:
1916             switch (fct)
1917             {
1918             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1919             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1920             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1921             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1922             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1923             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1924                 fib_path_recursive_adj_update(path, fct, dpo);
1925                 break;
1926             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1927             case FIB_FORW_CHAIN_TYPE_NSH:
1928                 ASSERT(0);
1929                 break;
1930             }
1931             break;
1932         case FIB_PATH_TYPE_DEAG:
1933             switch (fct)
1934             {
1935             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1936                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
1937                                                   DPO_PROTO_MPLS,
1938                                                   LOOKUP_UNICAST,
1939                                                   LOOKUP_INPUT_DST_ADDR,
1940                                                   LOOKUP_TABLE_FROM_CONFIG,
1941                                                   dpo);
1942                 break;
1943             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1944             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1945             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1946                 dpo_copy(dpo, &path->fp_dpo);
1947                 break;
1948             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1949             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1950             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1951             case FIB_FORW_CHAIN_TYPE_NSH:
1952                 ASSERT(0);
1953                 break;
1954             }
1955             break;
1956         case FIB_PATH_TYPE_EXCLUSIVE:
1957             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
1958             break;
1959         case FIB_PATH_TYPE_ATTACHED:
1960             switch (fct)
1961             {
1962             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1963             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1964             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1965             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1966             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1967             case FIB_FORW_CHAIN_TYPE_NSH:
1968                 break;
1969             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1970             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1971                 {
1972                     adj_index_t ai;
1973
1974                     /*
1975                      * Create the adj needed for sending IP multicast traffic
1976                      */
1977                     ai = adj_mcast_add_or_lock(path->fp_nh_proto,
1978                                                fib_forw_chain_type_to_link_type(fct),
1979                                                path->attached.fp_interface);
1980                     dpo_set(dpo, DPO_ADJACENCY,
1981                             fib_forw_chain_type_to_dpo_proto(fct),
1982                             ai);
1983                     adj_unlock(ai);
1984                 }
1985                 break;
1986             }
1987             break;
1988         case FIB_PATH_TYPE_INTF_RX:
1989             /*
1990              * Create the adj needed for sending IP multicast traffic
1991              */
1992             interface_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
1993                                       path->attached.fp_interface,
1994                                       dpo);
1995             break;
1996         case FIB_PATH_TYPE_RECEIVE:
1997         case FIB_PATH_TYPE_SPECIAL:
1998             dpo_copy(dpo, &path->fp_dpo);
1999             break;
2000         }
2001     }
2002 }
2003
2004 load_balance_path_t *
2005 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2006                                        fib_forward_chain_type_t fct,
2007                                        load_balance_path_t *hash_key)
2008 {
2009     load_balance_path_t *mnh;
2010     fib_path_t *path;
2011
2012     path = fib_path_get(path_index);
2013
2014     ASSERT(path);
2015
2016     if (fib_path_is_resolved(path_index))
2017     {
2018         vec_add2(hash_key, mnh, 1);
2019
2020         mnh->path_weight = path->fp_weight;
2021         mnh->path_index = path_index;
2022         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2023     }
2024
2025     return (hash_key);
2026 }
2027
2028 int
2029 fib_path_is_recursive (fib_node_index_t path_index)
2030 {
2031     fib_path_t *path;
2032
2033     path = fib_path_get(path_index);
2034
2035     return (FIB_PATH_TYPE_RECURSIVE == path->fp_type);
2036 }
2037
2038 int
2039 fib_path_is_exclusive (fib_node_index_t path_index)
2040 {
2041     fib_path_t *path;
2042
2043     path = fib_path_get(path_index);
2044
2045     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2046 }
2047
2048 int
2049 fib_path_is_deag (fib_node_index_t path_index)
2050 {
2051     fib_path_t *path;
2052
2053     path = fib_path_get(path_index);
2054
2055     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2056 }
2057
2058 int
2059 fib_path_is_resolved (fib_node_index_t path_index)
2060 {
2061     fib_path_t *path;
2062
2063     path = fib_path_get(path_index);
2064
2065     return (dpo_id_is_valid(&path->fp_dpo) &&
2066             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2067             !fib_path_is_looped(path_index) &&
2068             !fib_path_is_permanent_drop(path));
2069 }
2070
2071 int
2072 fib_path_is_looped (fib_node_index_t path_index)
2073 {
2074     fib_path_t *path;
2075
2076     path = fib_path_get(path_index);
2077
2078     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2079 }
2080
2081 int
2082 fib_path_encode (fib_node_index_t path_list_index,
2083                  fib_node_index_t path_index,
2084                  void *ctx)
2085 {
2086     fib_route_path_encode_t **api_rpaths = ctx;
2087     fib_route_path_encode_t *api_rpath;
2088     fib_path_t *path;
2089
2090     path = fib_path_get(path_index);
2091     if (!path)
2092       return (0);
2093     vec_add2(*api_rpaths, api_rpath, 1);
2094     api_rpath->rpath.frp_weight = path->fp_weight;
2095     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2096     api_rpath->rpath.frp_sw_if_index = ~0;
2097     api_rpath->dpo = path->exclusive.fp_ex_dpo;
2098     switch (path->fp_type)
2099       {
2100       case FIB_PATH_TYPE_RECEIVE:
2101         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2102         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2103         break;
2104       case FIB_PATH_TYPE_ATTACHED:
2105         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2106         break;
2107       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2108         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2109         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2110         break;
2111       case FIB_PATH_TYPE_SPECIAL:
2112         break;
2113       case FIB_PATH_TYPE_DEAG:
2114         break;
2115       case FIB_PATH_TYPE_RECURSIVE:
2116         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2117         break;
2118       default:
2119         break;
2120       }
2121     return (1);
2122 }
2123
2124 fib_protocol_t
2125 fib_path_get_proto (fib_node_index_t path_index)
2126 {
2127     fib_path_t *path;
2128
2129     path = fib_path_get(path_index);
2130
2131     return (path->fp_nh_proto);
2132 }
2133
2134 void
2135 fib_path_module_init (void)
2136 {
2137     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2138 }
2139
2140 static clib_error_t *
2141 show_fib_path_command (vlib_main_t * vm,
2142                         unformat_input_t * input,
2143                         vlib_cli_command_t * cmd)
2144 {
2145     fib_node_index_t pi;
2146     fib_path_t *path;
2147
2148     if (unformat (input, "%d", &pi))
2149     {
2150         /*
2151          * show one in detail
2152          */
2153         if (!pool_is_free_index(fib_path_pool, pi))
2154         {
2155             path = fib_path_get(pi);
2156             u8 *s = fib_path_format(pi, NULL);
2157             s = format(s, "children:");
2158             s = fib_node_children_format(path->fp_node.fn_children, s);
2159             vlib_cli_output (vm, "%s", s);
2160             vec_free(s);
2161         }
2162         else
2163         {
2164             vlib_cli_output (vm, "path %d invalid", pi);
2165         }
2166     }
2167     else
2168     {
2169         vlib_cli_output (vm, "FIB Paths");
2170         pool_foreach(path, fib_path_pool,
2171         ({
2172             vlib_cli_output (vm, "%U", format_fib_path, path);
2173         }));
2174     }
2175
2176     return (NULL);
2177 }
2178
2179 VLIB_CLI_COMMAND (show_fib_path, static) = {
2180   .path = "show fib paths",
2181   .function = show_fib_path_command,
2182   .short_help = "show fib paths",
2183 };