Labelled attached paths via an MPLS tunnel
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26
27 #include <vnet/adj/adj.h>
28 #include <vnet/adj/adj_mcast.h>
29
30 #include <vnet/fib/fib_path.h>
31 #include <vnet/fib/fib_node.h>
32 #include <vnet/fib/fib_table.h>
33 #include <vnet/fib/fib_entry.h>
34 #include <vnet/fib/fib_path_list.h>
35 #include <vnet/fib/fib_internal.h>
36 #include <vnet/fib/fib_urpf_list.h>
37 #include <vnet/fib/mpls_fib.h>
38
39 /**
40  * Enurmeration of path types
41  */
42 typedef enum fib_path_type_t_ {
43     /**
44      * Marker. Add new types after this one.
45      */
46     FIB_PATH_TYPE_FIRST = 0,
47     /**
48      * Attached-nexthop. An interface and a nexthop are known.
49      */
50     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
51     /**
52      * attached. Only the interface is known.
53      */
54     FIB_PATH_TYPE_ATTACHED,
55     /**
56      * recursive. Only the next-hop is known.
57      */
58     FIB_PATH_TYPE_RECURSIVE,
59     /**
60      * special. nothing is known. so we drop.
61      */
62     FIB_PATH_TYPE_SPECIAL,
63     /**
64      * exclusive. user provided adj.
65      */
66     FIB_PATH_TYPE_EXCLUSIVE,
67     /**
68      * deag. Link to a lookup adj in the next table
69      */
70     FIB_PATH_TYPE_DEAG,
71     /**
72      * interface receive.
73      */
74     FIB_PATH_TYPE_INTF_RX,
75     /**
76      * receive. it's for-us.
77      */
78     FIB_PATH_TYPE_RECEIVE,
79     /**
80      * Marker. Add new types before this one, then update it.
81      */
82     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_RECEIVE,
83 } __attribute__ ((packed)) fib_path_type_t;
84
85 /**
86  * The maximum number of path_types
87  */
88 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
89
90 #define FIB_PATH_TYPES {                                        \
91     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
92     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
93     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
94     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
95     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
96     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
97     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
98     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
99 }
100
101 #define FOR_EACH_FIB_PATH_TYPE(_item) \
102     for (_item = FIB_PATH_TYPE_FIRST; _item <= FIB_PATH_TYPE_LAST; _item++)
103
104 /**
105  * Enurmeration of path operational (i.e. derived) attributes
106  */
107 typedef enum fib_path_oper_attribute_t_ {
108     /**
109      * Marker. Add new types after this one.
110      */
111     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
112     /**
113      * The path forms part of a recursive loop.
114      */
115     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
116     /**
117      * The path is resolved
118      */
119     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
120     /**
121      * The path is attached, despite what the next-hop may say.
122      */
123     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
124     /**
125      * The path has become a permanent drop.
126      */
127     FIB_PATH_OPER_ATTRIBUTE_DROP,
128     /**
129      * Marker. Add new types before this one, then update it.
130      */
131     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
132 } __attribute__ ((packed)) fib_path_oper_attribute_t;
133
134 /**
135  * The maximum number of path operational attributes
136  */
137 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
138
139 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
140     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
141     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
142     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
143 }
144
145 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
146     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
147          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
148          _item++)
149
150 /**
151  * Path flags from the attributes
152  */
153 typedef enum fib_path_oper_flags_t_ {
154     FIB_PATH_OPER_FLAG_NONE = 0,
155     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
156     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
157     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
158     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
159 } __attribute__ ((packed)) fib_path_oper_flags_t;
160
161 /**
162  * A FIB path
163  */
164 typedef struct fib_path_t_ {
165     /**
166      * A path is a node in the FIB graph.
167      */
168     fib_node_t fp_node;
169
170     /**
171      * The index of the path-list to which this path belongs
172      */
173     u32 fp_pl_index;
174
175     /**
176      * This marks the start of the memory area used to hash
177      * the path
178      */
179     STRUCT_MARK(path_hash_start);
180
181     /**
182      * Configuration Flags
183      */
184     fib_path_cfg_flags_t fp_cfg_flags;
185
186     /**
187      * The type of the path. This is the selector for the union
188      */
189     fib_path_type_t fp_type;
190
191     /**
192      * The protocol of the next-hop, i.e. the address family of the
193      * next-hop's address. We can't derive this from the address itself
194      * since the address can be all zeros
195      */
196     fib_protocol_t fp_nh_proto;
197
198     /**
199      * UCMP [unnormalised] weigt
200      */
201     u32 fp_weight;
202
203     /**
204      * per-type union of the data required to resolve the path
205      */
206     union {
207         struct {
208             /**
209              * The next-hop
210              */
211             ip46_address_t fp_nh;
212             /**
213              * The interface
214              */
215             u32 fp_interface;
216         } attached_next_hop;
217         struct {
218             /**
219              * The interface
220              */
221             u32 fp_interface;
222         } attached;
223         struct {
224             union
225             {
226                 /**
227                  * The next-hop
228                  */
229                 ip46_address_t fp_ip;
230                 struct {
231                     /**
232                      * The local label to resolve through.
233                      */
234                     mpls_label_t fp_local_label;
235                     /**
236                      * The EOS bit of the resolving label
237                      */
238                     mpls_eos_bit_t fp_eos;
239                 };
240             } fp_nh;
241             /**
242              * The FIB table index in which to find the next-hop.
243              */
244             fib_node_index_t fp_tbl_id;
245         } recursive;
246         struct {
247             /**
248              * The FIB index in which to perfom the next lookup
249              */
250             fib_node_index_t fp_tbl_id;
251             /**
252              * The RPF-ID to tag the packets with
253              */
254             fib_rpf_id_t fp_rpf_id;
255         } deag;
256         struct {
257         } special;
258         struct {
259             /**
260              * The user provided 'exclusive' DPO
261              */
262             dpo_id_t fp_ex_dpo;
263         } exclusive;
264         struct {
265             /**
266              * The interface on which the local address is configured
267              */
268             u32 fp_interface;
269             /**
270              * The next-hop
271              */
272             ip46_address_t fp_addr;
273         } receive;
274         struct {
275             /**
276              * The interface on which the packets will be input.
277              */
278             u32 fp_interface;
279         } intf_rx;
280     };
281     STRUCT_MARK(path_hash_end);
282
283     /**
284      * Memebers in this last section represent information that is
285      * dervied during resolution. It should not be copied to new paths
286      * nor compared.
287      */
288
289     /**
290      * Operational Flags
291      */
292     fib_path_oper_flags_t fp_oper_flags;
293
294     /**
295      * the resolving via fib. not part of the union, since it it not part
296      * of the path's hash.
297      */
298     fib_node_index_t fp_via_fib;
299
300     /**
301      * The Data-path objects through which this path resolves for IP.
302      */
303     dpo_id_t fp_dpo;
304
305     /**
306      * the index of this path in the parent's child list.
307      */
308     u32 fp_sibling;
309 } fib_path_t;
310
311 /*
312  * Array of strings/names for the path types and attributes
313  */
314 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
315 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
316 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
317
318 /*
319  * The memory pool from which we allocate all the paths
320  */
321 static fib_path_t *fib_path_pool;
322
323 /*
324  * Debug macro
325  */
326 #ifdef FIB_DEBUG
327 #define FIB_PATH_DBG(_p, _fmt, _args...)                        \
328 {                                                               \
329     u8 *_tmp = NULL;                                            \
330     _tmp = fib_path_format(fib_path_get_index(_p), _tmp);       \
331     clib_warning("path:[%d:%s]:" _fmt,                          \
332                  fib_path_get_index(_p), _tmp,                  \
333                  ##_args);                                      \
334     vec_free(_tmp);                                             \
335 }
336 #else
337 #define FIB_PATH_DBG(_p, _fmt, _args...)
338 #endif
339
340 static fib_path_t *
341 fib_path_get (fib_node_index_t index)
342 {
343     return (pool_elt_at_index(fib_path_pool, index));
344 }
345
346 static fib_node_index_t 
347 fib_path_get_index (fib_path_t *path)
348 {
349     return (path - fib_path_pool);
350 }
351
352 static fib_node_t *
353 fib_path_get_node (fib_node_index_t index)
354 {
355     return ((fib_node_t*)fib_path_get(index));
356 }
357
358 static fib_path_t*
359 fib_path_from_fib_node (fib_node_t *node)
360 {
361 #if CLIB_DEBUG > 0
362     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
363 #endif
364     return ((fib_path_t*)node);
365 }
366
367 u8 *
368 format_fib_path (u8 * s, va_list * args)
369 {
370     fib_path_t *path = va_arg (*args, fib_path_t *);
371     vnet_main_t * vnm = vnet_get_main();
372     fib_path_oper_attribute_t oattr;
373     fib_path_cfg_attribute_t cattr;
374
375     s = format (s, "      index:%d ", fib_path_get_index(path));
376     s = format (s, "pl-index:%d ", path->fp_pl_index);
377     s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto);
378     s = format (s, "weight=%d ", path->fp_weight);
379     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
380     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
381         s = format(s, " oper-flags:");
382         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
383             if ((1<<oattr) & path->fp_oper_flags) {
384                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
385             }
386         }
387     }
388     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
389         s = format(s, " cfg-flags:");
390         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
391             if ((1<<cattr) & path->fp_cfg_flags) {
392                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
393             }
394         }
395     }
396     s = format(s, "\n       ");
397
398     switch (path->fp_type)
399     {
400     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
401         s = format (s, "%U", format_ip46_address,
402                     &path->attached_next_hop.fp_nh,
403                     IP46_TYPE_ANY);
404         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
405         {
406             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
407         }
408         else
409         {
410             s = format (s, " %U",
411                         format_vnet_sw_interface_name,
412                         vnm,
413                         vnet_get_sw_interface(
414                             vnm,
415                             path->attached_next_hop.fp_interface));
416             if (vnet_sw_interface_is_p2p(vnet_get_main(),
417                                          path->attached_next_hop.fp_interface))
418             {
419                 s = format (s, " (p2p)");
420             }
421         }
422         if (!dpo_id_is_valid(&path->fp_dpo))
423         {
424             s = format(s, "\n          unresolved");
425         }
426         else
427         {
428             s = format(s, "\n          %U",
429                        format_dpo_id,
430                        &path->fp_dpo, 13);
431         }
432         break;
433     case FIB_PATH_TYPE_ATTACHED:
434         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
435         {
436             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
437         }
438         else
439         {
440             s = format (s, " %U",
441                         format_vnet_sw_interface_name,
442                         vnm,
443                         vnet_get_sw_interface(
444                             vnm,
445                             path->attached.fp_interface));
446         }
447         break;
448     case FIB_PATH_TYPE_RECURSIVE:
449         if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
450         {
451             s = format (s, "via %U %U",
452                         format_mpls_unicast_label,
453                         path->recursive.fp_nh.fp_local_label,
454                         format_mpls_eos_bit,
455                         path->recursive.fp_nh.fp_eos);
456         }
457         else
458         {
459             s = format (s, "via %U",
460                         format_ip46_address,
461                         &path->recursive.fp_nh.fp_ip,
462                         IP46_TYPE_ANY);
463         }
464         s = format (s, " in fib:%d",
465                     path->recursive.fp_tbl_id,
466                     path->fp_via_fib); 
467         s = format (s, " via-fib:%d", path->fp_via_fib); 
468         s = format (s, " via-dpo:[%U:%d]",
469                     format_dpo_type, path->fp_dpo.dpoi_type, 
470                     path->fp_dpo.dpoi_index);
471
472         break;
473     case FIB_PATH_TYPE_RECEIVE:
474     case FIB_PATH_TYPE_INTF_RX:
475     case FIB_PATH_TYPE_SPECIAL:
476     case FIB_PATH_TYPE_DEAG:
477     case FIB_PATH_TYPE_EXCLUSIVE:
478         if (dpo_id_is_valid(&path->fp_dpo))
479         {
480             s = format(s, "%U", format_dpo_id,
481                        &path->fp_dpo, 2);
482         }
483         break;
484     }
485     return (s);
486 }
487
488 u8 *
489 fib_path_format (fib_node_index_t pi, u8 *s)
490 {
491     fib_path_t *path;
492
493     path = fib_path_get(pi);
494     ASSERT(NULL != path);
495
496     return (format (s, "%U", format_fib_path, path));
497 }
498
499 u8 *
500 fib_path_adj_format (fib_node_index_t pi,
501                      u32 indent,
502                      u8 *s)
503 {
504     fib_path_t *path;
505
506     path = fib_path_get(pi);
507     ASSERT(NULL != path);
508
509     if (!dpo_id_is_valid(&path->fp_dpo))
510     {
511         s = format(s, " unresolved");
512     }
513     else
514     {
515         s = format(s, "%U", format_dpo_id,
516                    &path->fp_dpo, 2);
517     }
518
519     return (s);
520 }
521
522 /*
523  * fib_path_last_lock_gone
524  *
525  * We don't share paths, we share path lists, so the [un]lock functions
526  * are no-ops
527  */
528 static void
529 fib_path_last_lock_gone (fib_node_t *node)
530 {
531     ASSERT(0);
532 }
533
534 static const adj_index_t
535 fib_path_attached_next_hop_get_adj (fib_path_t *path,
536                                     vnet_link_t link)
537 {
538     if (vnet_sw_interface_is_p2p(vnet_get_main(),
539                                  path->attached_next_hop.fp_interface))
540     {
541         /*
542          * if the interface is p2p then the adj for the specific
543          * neighbour on that link will never exist. on p2p links
544          * the subnet address (the attached route) links to the
545          * auto-adj (see below), we want that adj here too.
546          */
547         return (adj_nbr_add_or_lock(path->fp_nh_proto,
548                                     link,
549                                     &zero_addr,
550                                     path->attached_next_hop.fp_interface));
551     }
552     else
553     {
554         return (adj_nbr_add_or_lock(path->fp_nh_proto,
555                                     link,
556                                     &path->attached_next_hop.fp_nh,
557                                     path->attached_next_hop.fp_interface));
558     }
559 }
560
561 static void
562 fib_path_attached_next_hop_set (fib_path_t *path)
563 {
564     /*
565      * resolve directly via the adjacnecy discribed by the
566      * interface and next-hop
567      */
568     dpo_set(&path->fp_dpo,
569             DPO_ADJACENCY,
570             fib_proto_to_dpo(path->fp_nh_proto),
571             fib_path_attached_next_hop_get_adj(
572                  path,
573                  fib_proto_to_link(path->fp_nh_proto)));
574
575     /*
576      * become a child of the adjacency so we receive updates
577      * when its rewrite changes
578      */
579     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
580                                      FIB_NODE_TYPE_PATH,
581                                      fib_path_get_index(path));
582
583     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
584                                       path->attached_next_hop.fp_interface) ||
585         !adj_is_up(path->fp_dpo.dpoi_index))
586     {
587         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
588     }
589 }
590
591 static const adj_index_t
592 fib_path_attached_get_adj (fib_path_t *path,
593                            vnet_link_t link)
594 {
595     if (vnet_sw_interface_is_p2p(vnet_get_main(),
596                                  path->attached.fp_interface))
597     {
598         /*
599          * point-2-point interfaces do not require a glean, since
600          * there is nothing to ARP. Install a rewrite/nbr adj instead
601          */
602         return (adj_nbr_add_or_lock(path->fp_nh_proto,
603                                     link,
604                                     &zero_addr,
605                                     path->attached.fp_interface));
606     }
607     else
608     {
609         return (adj_glean_add_or_lock(path->fp_nh_proto,
610                                       path->attached.fp_interface,
611                                       NULL));
612     }
613 }
614
615 /*
616  * create of update the paths recursive adj
617  */
618 static void
619 fib_path_recursive_adj_update (fib_path_t *path,
620                                fib_forward_chain_type_t fct,
621                                dpo_id_t *dpo)
622 {
623     dpo_id_t via_dpo = DPO_INVALID;
624
625     /*
626      * get the DPO to resolve through from the via-entry
627      */
628     fib_entry_contribute_forwarding(path->fp_via_fib,
629                                     fct,
630                                     &via_dpo);
631
632
633     /*
634      * hope for the best - clear if restrictions apply.
635      */
636     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
637
638     /*
639      * Validate any recursion constraints and over-ride the via
640      * adj if not met
641      */
642     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
643     {
644         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
645         dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
646     }
647     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
648     {
649         /*
650          * the via FIB must be a host route.
651          * note the via FIB just added will always be a host route
652          * since it is an RR source added host route. So what we need to
653          * check is whether the route has other sources. If it does then
654          * some other source has added it as a host route. If it doesn't
655          * then it was added only here and inherits forwarding from a cover.
656          * the cover is not a host route.
657          * The RR source is the lowest priority source, so we check if it
658          * is the best. if it is there are no other sources.
659          */
660         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
661         {
662             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
663             dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
664
665             /*
666              * PIC edge trigger. let the load-balance maps know
667              */
668             load_balance_map_path_state_change(fib_path_get_index(path));
669         }
670     }
671     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
672     {
673         /*
674          * RR source entries inherit the flags from the cover, so
675          * we can check the via directly
676          */
677         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
678         {
679             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
680             dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
681
682             /*
683              * PIC edge trigger. let the load-balance maps know
684              */
685             load_balance_map_path_state_change(fib_path_get_index(path));
686         }
687     }
688     /*
689      * check for over-riding factors on the FIB entry itself
690      */
691     if (!fib_entry_is_resolved(path->fp_via_fib))
692     {
693         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
694         dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
695
696         /*
697          * PIC edge trigger. let the load-balance maps know
698          */
699         load_balance_map_path_state_change(fib_path_get_index(path));
700     }
701
702     /*
703      * update the path's contributed DPO
704      */
705     dpo_copy(dpo, &via_dpo);
706
707     FIB_PATH_DBG(path, "recursive update: %U",
708                  fib_get_lookup_main(path->fp_nh_proto),
709                  &path->fp_dpo, 2);
710
711     dpo_reset(&via_dpo);
712 }
713
714 /*
715  * fib_path_is_permanent_drop
716  *
717  * Return !0 if the path is configured to permanently drop,
718  * despite other attributes.
719  */
720 static int
721 fib_path_is_permanent_drop (fib_path_t *path)
722 {
723     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
724             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
725 }
726
727 /*
728  * fib_path_unresolve
729  *
730  * Remove our dependency on the resolution target
731  */
732 static void
733 fib_path_unresolve (fib_path_t *path)
734 {
735     /*
736      * the forced drop path does not need unresolving
737      */
738     if (fib_path_is_permanent_drop(path))
739     {
740         return;
741     }
742
743     switch (path->fp_type)
744     {
745     case FIB_PATH_TYPE_RECURSIVE:
746         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
747         {
748             fib_prefix_t pfx;
749
750             fib_entry_get_prefix(path->fp_via_fib, &pfx);
751             fib_entry_child_remove(path->fp_via_fib,
752                                    path->fp_sibling);
753             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
754                                            &pfx,
755                                            FIB_SOURCE_RR);
756             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
757         }
758         break;
759     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
760     case FIB_PATH_TYPE_ATTACHED:
761         adj_child_remove(path->fp_dpo.dpoi_index,
762                          path->fp_sibling);
763         adj_unlock(path->fp_dpo.dpoi_index);
764         break;
765     case FIB_PATH_TYPE_EXCLUSIVE:
766         dpo_reset(&path->exclusive.fp_ex_dpo);
767         break;
768     case FIB_PATH_TYPE_SPECIAL:
769     case FIB_PATH_TYPE_RECEIVE:
770     case FIB_PATH_TYPE_INTF_RX:
771     case FIB_PATH_TYPE_DEAG:
772         /*
773          * these hold only the path's DPO, which is reset below.
774          */
775         break;
776     }
777
778     /*
779      * release the adj we were holding and pick up the
780      * drop just in case.
781      */
782     dpo_reset(&path->fp_dpo);
783     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
784
785     return;
786 }
787
788 static fib_forward_chain_type_t
789 fib_path_to_chain_type (const fib_path_t *path)
790 {
791     switch (path->fp_nh_proto)
792     {
793     case FIB_PROTOCOL_IP4:
794         return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
795     case FIB_PROTOCOL_IP6:
796         return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
797     case FIB_PROTOCOL_MPLS:
798         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
799             MPLS_EOS == path->recursive.fp_nh.fp_eos)
800         {
801             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
802         }
803         else
804         {
805             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
806         }
807     }
808     return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
809 }
810
811 /*
812  * fib_path_back_walk_notify
813  *
814  * A back walk has reach this path.
815  */
816 static fib_node_back_walk_rc_t
817 fib_path_back_walk_notify (fib_node_t *node,
818                            fib_node_back_walk_ctx_t *ctx)
819 {
820     fib_path_t *path;
821
822     path = fib_path_from_fib_node(node);
823
824     switch (path->fp_type)
825     {
826     case FIB_PATH_TYPE_RECURSIVE:
827         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
828         {
829             /*
830              * modify the recursive adjacency to use the new forwarding
831              * of the via-fib.
832              * this update is visible to packets in flight in the DP.
833              */
834             fib_path_recursive_adj_update(
835                 path,
836                 fib_path_to_chain_type(path),
837                 &path->fp_dpo);
838         }
839         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
840             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
841         {
842             /*
843              * ADJ updates (complete<->incomplete) do not need to propagate to
844              * recursive entries.
845              * The only reason its needed as far back as here, is that the adj
846              * and the incomplete adj are a different DPO type, so the LBs need
847              * to re-stack.
848              * If this walk was quashed in the fib_entry, then any non-fib_path
849              * children (like tunnels that collapse out the LB when they stack)
850              * would not see the update.
851              */
852             return (FIB_NODE_BACK_WALK_CONTINUE);
853         }
854         break;
855     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
856         /*
857 FIXME comment
858          * ADJ_UPDATE backwalk pass silently through here and up to
859          * the path-list when the multipath adj collapse occurs.
860          * The reason we do this is that the assumtption is that VPP
861          * runs in an environment where the Control-Plane is remote
862          * and hence reacts slowly to link up down. In order to remove
863          * this down link from the ECMP set quickly, we back-walk.
864          * VPP also has dedicated CPUs, so we are not stealing resources
865          * from the CP to do so.
866          */
867         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
868         {
869             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
870             {
871                 /*
872                  * alreday resolved. no need to walk back again
873                  */
874                 return (FIB_NODE_BACK_WALK_CONTINUE);
875             }
876             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
877         }
878         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
879         {
880             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
881             {
882                 /*
883                  * alreday unresolved. no need to walk back again
884                  */
885                 return (FIB_NODE_BACK_WALK_CONTINUE);
886             }
887             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
888         }
889         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
890         {
891             /*
892              * The interface this path resolves through has been deleted.
893              * This will leave the path in a permanent drop state. The route
894              * needs to be removed and readded (and hence the path-list deleted)
895              * before it can forward again.
896              */
897             fib_path_unresolve(path);
898             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
899         }
900         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
901         {
902             /*
903              * restack the DPO to pick up the correct DPO sub-type
904              */
905             uword if_is_up;
906             adj_index_t ai;
907
908             if_is_up = vnet_sw_interface_is_admin_up(
909                            vnet_get_main(),
910                            path->attached_next_hop.fp_interface);
911
912             ai = fib_path_attached_next_hop_get_adj(
913                      path,
914                      fib_proto_to_link(path->fp_nh_proto));
915
916             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
917             if (if_is_up && adj_is_up(ai))
918             {
919                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
920             }
921
922             dpo_set(&path->fp_dpo, DPO_ADJACENCY,
923                     fib_proto_to_dpo(path->fp_nh_proto),
924                     ai);
925             adj_unlock(ai);
926
927             if (!if_is_up)
928             {
929                 /*
930                  * If the interface is not up there is no reason to walk
931                  * back to children. if we did they would only evalute
932                  * that this path is unresolved and hence it would
933                  * not contribute the adjacency - so it would be wasted
934                  * CPU time.
935                  */
936                 return (FIB_NODE_BACK_WALK_CONTINUE);
937             }
938         }
939         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
940         {
941             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
942             {
943                 /*
944                  * alreday unresolved. no need to walk back again
945                  */
946                 return (FIB_NODE_BACK_WALK_CONTINUE);
947             }
948             /*
949              * the adj has gone down. the path is no longer resolved.
950              */
951             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
952         }
953         break;
954     case FIB_PATH_TYPE_ATTACHED:
955         /*
956          * FIXME; this could schedule a lower priority walk, since attached
957          * routes are not usually in ECMP configurations so the backwalk to
958          * the FIB entry does not need to be high priority
959          */
960         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
961         {
962             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
963         }
964         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
965         {
966             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
967         }
968         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
969         {
970             fib_path_unresolve(path);
971             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
972         }
973         break;
974     case FIB_PATH_TYPE_INTF_RX:
975         ASSERT(0);
976     case FIB_PATH_TYPE_DEAG:
977         /*
978          * FIXME When VRF delete is allowed this will need a poke.
979          */
980     case FIB_PATH_TYPE_SPECIAL:
981     case FIB_PATH_TYPE_RECEIVE:
982     case FIB_PATH_TYPE_EXCLUSIVE:
983         /*
984          * these path types have no parents. so to be
985          * walked from one is unexpected.
986          */
987         ASSERT(0);
988         break;
989     }
990
991     /*
992      * propagate the backwalk further to the path-list
993      */
994     fib_path_list_back_walk(path->fp_pl_index, ctx);
995
996     return (FIB_NODE_BACK_WALK_CONTINUE);
997 }
998
999 static void
1000 fib_path_memory_show (void)
1001 {
1002     fib_show_memory_usage("Path",
1003                           pool_elts(fib_path_pool),
1004                           pool_len(fib_path_pool),
1005                           sizeof(fib_path_t));
1006 }
1007
1008 /*
1009  * The FIB path's graph node virtual function table
1010  */
1011 static const fib_node_vft_t fib_path_vft = {
1012     .fnv_get = fib_path_get_node,
1013     .fnv_last_lock = fib_path_last_lock_gone,
1014     .fnv_back_walk = fib_path_back_walk_notify,
1015     .fnv_mem_show = fib_path_memory_show,
1016 };
1017
1018 static fib_path_cfg_flags_t
1019 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1020 {
1021     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1022
1023     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1024         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1025     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1026         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1027     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1028         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1029     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1030         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1031     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1032         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1033     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1034         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1035     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1036         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1037     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1038         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1039
1040     return (cfg_flags);
1041 }
1042
1043 /*
1044  * fib_path_create
1045  *
1046  * Create and initialise a new path object.
1047  * return the index of the path.
1048  */
1049 fib_node_index_t
1050 fib_path_create (fib_node_index_t pl_index,
1051                  const fib_route_path_t *rpath)
1052 {
1053     fib_path_t *path;
1054
1055     pool_get(fib_path_pool, path);
1056     memset(path, 0, sizeof(*path));
1057
1058     fib_node_init(&path->fp_node,
1059                   FIB_NODE_TYPE_PATH);
1060
1061     dpo_reset(&path->fp_dpo);
1062     path->fp_pl_index = pl_index;
1063     path->fp_nh_proto = rpath->frp_proto;
1064     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1065     path->fp_weight = rpath->frp_weight;
1066     if (0 == path->fp_weight)
1067     {
1068         /*
1069          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1070          * clients to always use 1, or we can accept it and fixup approrpiately.
1071          */
1072         path->fp_weight = 1;
1073     }
1074     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1075
1076     /*
1077      * deduce the path's tpye from the parementers and save what is needed.
1078      */
1079     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1080     {
1081         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1082         path->receive.fp_interface = rpath->frp_sw_if_index;
1083         path->receive.fp_addr = rpath->frp_addr;
1084     }
1085     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1086     {
1087         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1088         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1089     }
1090     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1091     {
1092         path->fp_type = FIB_PATH_TYPE_DEAG;
1093         path->deag.fp_tbl_id = rpath->frp_fib_index;
1094         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1095     }
1096     else if (~0 != rpath->frp_sw_if_index)
1097     {
1098         if (ip46_address_is_zero(&rpath->frp_addr))
1099         {
1100             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1101             path->attached.fp_interface = rpath->frp_sw_if_index;
1102         }
1103         else
1104         {
1105             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1106             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1107             path->attached_next_hop.fp_nh = rpath->frp_addr;
1108         }
1109     }
1110     else
1111     {
1112         if (ip46_address_is_zero(&rpath->frp_addr))
1113         {
1114             if (~0 == rpath->frp_fib_index)
1115             {
1116                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1117             }
1118             else
1119             {
1120                 path->fp_type = FIB_PATH_TYPE_DEAG;
1121                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1122             }           
1123         }
1124         else
1125         {
1126             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1127             if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1128             {
1129                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1130                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1131             }
1132             else
1133             {
1134                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1135             }
1136             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1137         }
1138     }
1139
1140     FIB_PATH_DBG(path, "create");
1141
1142     return (fib_path_get_index(path));
1143 }
1144
1145 /*
1146  * fib_path_create_special
1147  *
1148  * Create and initialise a new path object.
1149  * return the index of the path.
1150  */
1151 fib_node_index_t
1152 fib_path_create_special (fib_node_index_t pl_index,
1153                          fib_protocol_t nh_proto,
1154                          fib_path_cfg_flags_t flags,
1155                          const dpo_id_t *dpo)
1156 {
1157     fib_path_t *path;
1158
1159     pool_get(fib_path_pool, path);
1160     memset(path, 0, sizeof(*path));
1161
1162     fib_node_init(&path->fp_node,
1163                   FIB_NODE_TYPE_PATH);
1164     dpo_reset(&path->fp_dpo);
1165
1166     path->fp_pl_index = pl_index;
1167     path->fp_weight = 1;
1168     path->fp_nh_proto = nh_proto;
1169     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1170     path->fp_cfg_flags = flags;
1171
1172     if (FIB_PATH_CFG_FLAG_DROP & flags)
1173     {
1174         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1175     }
1176     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1177     {
1178         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1179         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1180     }
1181     else
1182     {
1183         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1184         ASSERT(NULL != dpo);
1185         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1186     }
1187
1188     return (fib_path_get_index(path));
1189 }
1190
1191 /*
1192  * fib_path_copy
1193  *
1194  * Copy a path. return index of new path.
1195  */
1196 fib_node_index_t
1197 fib_path_copy (fib_node_index_t path_index,
1198                fib_node_index_t path_list_index)
1199 {
1200     fib_path_t *path, *orig_path;
1201
1202     pool_get(fib_path_pool, path);
1203
1204     orig_path = fib_path_get(path_index);
1205     ASSERT(NULL != orig_path);
1206
1207     memcpy(path, orig_path, sizeof(*path));
1208
1209     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1210
1211     /*
1212      * reset the dynamic section
1213      */
1214     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1215     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1216     path->fp_pl_index  = path_list_index;
1217     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1218     memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1219     dpo_reset(&path->fp_dpo);
1220
1221     return (fib_path_get_index(path));
1222 }
1223
1224 /*
1225  * fib_path_destroy
1226  *
1227  * destroy a path that is no longer required
1228  */
1229 void
1230 fib_path_destroy (fib_node_index_t path_index)
1231 {
1232     fib_path_t *path;
1233
1234     path = fib_path_get(path_index);
1235
1236     ASSERT(NULL != path);
1237     FIB_PATH_DBG(path, "destroy");
1238
1239     fib_path_unresolve(path);
1240
1241     fib_node_deinit(&path->fp_node);
1242     pool_put(fib_path_pool, path);
1243 }
1244
1245 /*
1246  * fib_path_destroy
1247  *
1248  * destroy a path that is no longer required
1249  */
1250 uword
1251 fib_path_hash (fib_node_index_t path_index)
1252 {
1253     fib_path_t *path;
1254
1255     path = fib_path_get(path_index);
1256
1257     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1258                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1259                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1260                         0));
1261 }
1262
1263 /*
1264  * fib_path_cmp_i
1265  *
1266  * Compare two paths for equivalence.
1267  */
1268 static int
1269 fib_path_cmp_i (const fib_path_t *path1,
1270                 const fib_path_t *path2)
1271 {
1272     int res;
1273
1274     res = 1;
1275
1276     /*
1277      * paths of different types and protocol are not equal.
1278      * different weights only are the same path.
1279      */
1280     if (path1->fp_type != path2->fp_type)
1281     {
1282         res = (path1->fp_type - path2->fp_type);
1283     }
1284     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1285     {
1286         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1287     }
1288     else
1289     {
1290         /*
1291          * both paths are of the same type.
1292          * consider each type and its attributes in turn.
1293          */
1294         switch (path1->fp_type)
1295         {
1296         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1297             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1298                                    &path2->attached_next_hop.fp_nh);
1299             if (0 == res) {
1300                 res = (path1->attached_next_hop.fp_interface -
1301                        path2->attached_next_hop.fp_interface);
1302             }
1303             break;
1304         case FIB_PATH_TYPE_ATTACHED:
1305             res = (path1->attached.fp_interface -
1306                    path2->attached.fp_interface);
1307             break;
1308         case FIB_PATH_TYPE_RECURSIVE:
1309             res = ip46_address_cmp(&path1->recursive.fp_nh,
1310                                    &path2->recursive.fp_nh);
1311  
1312             if (0 == res)
1313             {
1314                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1315             }
1316             break;
1317         case FIB_PATH_TYPE_DEAG:
1318             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1319             if (0 == res)
1320             {
1321                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1322             }
1323             break;
1324         case FIB_PATH_TYPE_INTF_RX:
1325             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1326             break;
1327         case FIB_PATH_TYPE_SPECIAL:
1328         case FIB_PATH_TYPE_RECEIVE:
1329         case FIB_PATH_TYPE_EXCLUSIVE:
1330             res = 0;
1331             break;
1332         }
1333     }
1334     return (res);
1335 }
1336
1337 /*
1338  * fib_path_cmp_for_sort
1339  *
1340  * Compare two paths for equivalence. Used during path sorting.
1341  * As usual 0 means equal.
1342  */
1343 int
1344 fib_path_cmp_for_sort (void * v1,
1345                        void * v2)
1346 {
1347     fib_node_index_t *pi1 = v1, *pi2 = v2;
1348     fib_path_t *path1, *path2;
1349
1350     path1 = fib_path_get(*pi1);
1351     path2 = fib_path_get(*pi2);
1352
1353     return (fib_path_cmp_i(path1, path2));
1354 }
1355
1356 /*
1357  * fib_path_cmp
1358  *
1359  * Compare two paths for equivalence.
1360  */
1361 int
1362 fib_path_cmp (fib_node_index_t pi1,
1363               fib_node_index_t pi2)
1364 {
1365     fib_path_t *path1, *path2;
1366
1367     path1 = fib_path_get(pi1);
1368     path2 = fib_path_get(pi2);
1369
1370     return (fib_path_cmp_i(path1, path2));
1371 }
1372
1373 int
1374 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1375                            const fib_route_path_t *rpath)
1376 {
1377     fib_path_t *path;
1378     int res;
1379
1380     path = fib_path_get(path_index);
1381
1382     res = 1;
1383
1384     if (path->fp_weight != rpath->frp_weight)
1385     {
1386         res = (path->fp_weight - rpath->frp_weight);
1387     }
1388     else
1389     {
1390         /*
1391          * both paths are of the same type.
1392          * consider each type and its attributes in turn.
1393          */
1394         switch (path->fp_type)
1395         {
1396         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1397             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1398                                    &rpath->frp_addr);
1399             if (0 == res)
1400             {
1401                 res = (path->attached_next_hop.fp_interface -
1402                        rpath->frp_sw_if_index);
1403             }
1404             break;
1405         case FIB_PATH_TYPE_ATTACHED:
1406             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1407             break;
1408         case FIB_PATH_TYPE_RECURSIVE:
1409             if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1410             {
1411                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1412
1413                 if (res == 0)
1414                 {
1415                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1416                 }
1417             }
1418             else
1419             {
1420                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1421                                        &rpath->frp_addr);
1422             }
1423
1424             if (0 == res)
1425             {
1426                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1427             }
1428             break;
1429         case FIB_PATH_TYPE_INTF_RX:
1430             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1431             break;
1432         case FIB_PATH_TYPE_DEAG:
1433             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1434             if (0 == res)
1435             {
1436                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1437             }
1438             break;
1439         case FIB_PATH_TYPE_SPECIAL:
1440         case FIB_PATH_TYPE_RECEIVE:
1441         case FIB_PATH_TYPE_EXCLUSIVE:
1442             res = 0;
1443             break;
1444         }
1445     }
1446     return (res);
1447 }
1448
1449 /*
1450  * fib_path_recursive_loop_detect
1451  *
1452  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1453  * walk is initiated when an entry is linking to a new path list or from an old.
1454  * The entry vector passed contains all the FIB entrys that are children of this
1455  * path (it is all the entries encountered on the walk so far). If this vector
1456  * contains the entry this path resolve via, then a loop is about to form.
1457  * The loop must be allowed to form, since we need the dependencies in place
1458  * so that we can track when the loop breaks.
1459  * However, we MUST not produce a loop in the forwarding graph (else packets
1460  * would loop around the switch path until the loop breaks), so we mark recursive
1461  * paths as looped so that they do not contribute forwarding information.
1462  * By marking the path as looped, an etry such as;
1463  *    X/Y
1464  *     via a.a.a.a (looped)
1465  *     via b.b.b.b (not looped)
1466  * can still forward using the info provided by b.b.b.b only
1467  */
1468 int
1469 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1470                                 fib_node_index_t **entry_indicies)
1471 {
1472     fib_path_t *path;
1473
1474     path = fib_path_get(path_index);
1475
1476     /*
1477      * the forced drop path is never looped, cos it is never resolved.
1478      */
1479     if (fib_path_is_permanent_drop(path))
1480     {
1481         return (0);
1482     }
1483
1484     switch (path->fp_type)
1485     {
1486     case FIB_PATH_TYPE_RECURSIVE:
1487     {
1488         fib_node_index_t *entry_index, *entries;
1489         int looped = 0;
1490         entries = *entry_indicies;
1491
1492         vec_foreach(entry_index, entries) {
1493             if (*entry_index == path->fp_via_fib)
1494             {
1495                 /*
1496                  * the entry that is about to link to this path-list (or
1497                  * one of this path-list's children) is the same entry that
1498                  * this recursive path resolves through. this is a cycle.
1499                  * abort the walk.
1500                  */
1501                 looped = 1;
1502                 break;
1503             }
1504         }
1505
1506         if (looped)
1507         {
1508             FIB_PATH_DBG(path, "recursive loop formed");
1509             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1510
1511             dpo_copy(&path->fp_dpo,
1512                     drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1513         }
1514         else
1515         {
1516             /*
1517              * no loop here yet. keep forward walking the graph.
1518              */     
1519             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1520             {
1521                 FIB_PATH_DBG(path, "recursive loop formed");
1522                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1523             }
1524             else
1525             {
1526                 FIB_PATH_DBG(path, "recursive loop cleared");
1527                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1528             }
1529         }
1530         break;
1531     }
1532     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1533     case FIB_PATH_TYPE_ATTACHED:
1534     case FIB_PATH_TYPE_SPECIAL:
1535     case FIB_PATH_TYPE_DEAG:
1536     case FIB_PATH_TYPE_RECEIVE:
1537     case FIB_PATH_TYPE_INTF_RX:
1538     case FIB_PATH_TYPE_EXCLUSIVE:
1539         /*
1540          * these path types cannot be part of a loop, since they are the leaves
1541          * of the graph.
1542          */
1543         break;
1544     }
1545
1546     return (fib_path_is_looped(path_index));
1547 }
1548
1549 int
1550 fib_path_resolve (fib_node_index_t path_index)
1551 {
1552     fib_path_t *path;
1553
1554     path = fib_path_get(path_index);
1555
1556     /*
1557      * hope for the best.
1558      */
1559     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1560
1561     /*
1562      * the forced drop path resolves via the drop adj
1563      */
1564     if (fib_path_is_permanent_drop(path))
1565     {
1566         dpo_copy(&path->fp_dpo,
1567                  drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1568         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1569         return (fib_path_is_resolved(path_index));
1570     }
1571
1572     switch (path->fp_type)
1573     {
1574     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1575         fib_path_attached_next_hop_set(path);
1576         break;
1577     case FIB_PATH_TYPE_ATTACHED:
1578         /*
1579          * path->attached.fp_interface
1580          */
1581         if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1582                                            path->attached.fp_interface))
1583         {
1584             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1585         }
1586         dpo_set(&path->fp_dpo,
1587                 DPO_ADJACENCY,
1588                 fib_proto_to_dpo(path->fp_nh_proto),
1589                 fib_path_attached_get_adj(path,
1590                                           fib_proto_to_link(path->fp_nh_proto)));
1591
1592         /*
1593          * become a child of the adjacency so we receive updates
1594          * when the interface state changes
1595          */
1596         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1597                                          FIB_NODE_TYPE_PATH,
1598                                          fib_path_get_index(path));
1599
1600         break;
1601     case FIB_PATH_TYPE_RECURSIVE:
1602     {
1603         /*
1604          * Create a RR source entry in the table for the address
1605          * that this path recurses through.
1606          * This resolve action is recursive, hence we may create
1607          * more paths in the process. more creates mean maybe realloc
1608          * of this path.
1609          */
1610         fib_node_index_t fei;
1611         fib_prefix_t pfx;
1612
1613         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1614
1615         if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1616         {
1617             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1618                                        path->recursive.fp_nh.fp_eos,
1619                                        &pfx);
1620         }
1621         else
1622         {
1623             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1624         }
1625
1626         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1627                                           &pfx,
1628                                           FIB_SOURCE_RR,
1629                                           FIB_ENTRY_FLAG_NONE);
1630
1631         path = fib_path_get(path_index);
1632         path->fp_via_fib = fei;
1633
1634         /*
1635          * become a dependent child of the entry so the path is 
1636          * informed when the forwarding for the entry changes.
1637          */
1638         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1639                                                FIB_NODE_TYPE_PATH,
1640                                                fib_path_get_index(path));
1641
1642         /*
1643          * create and configure the IP DPO
1644          */
1645         fib_path_recursive_adj_update(
1646             path,
1647             fib_path_to_chain_type(path),
1648             &path->fp_dpo);
1649
1650         break;
1651     }
1652     case FIB_PATH_TYPE_SPECIAL:
1653         /*
1654          * Resolve via the drop
1655          */
1656         dpo_copy(&path->fp_dpo,
1657                  drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1658         break;
1659     case FIB_PATH_TYPE_DEAG:
1660     {
1661         /*
1662          * Resolve via a lookup DPO.
1663          * FIXME. control plane should add routes with a table ID
1664          */
1665         lookup_cast_t cast;
1666         
1667         cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1668                 LOOKUP_MULTICAST :
1669                 LOOKUP_UNICAST);
1670
1671         lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
1672                                            fib_proto_to_dpo(path->fp_nh_proto),
1673                                            cast,
1674                                            LOOKUP_INPUT_DST_ADDR,
1675                                            LOOKUP_TABLE_FROM_CONFIG,
1676                                            &path->fp_dpo);
1677         break;
1678     }
1679     case FIB_PATH_TYPE_RECEIVE:
1680         /*
1681          * Resolve via a receive DPO.
1682          */
1683         receive_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1684                                 path->receive.fp_interface,
1685                                 &path->receive.fp_addr,
1686                                 &path->fp_dpo);
1687         break;
1688     case FIB_PATH_TYPE_INTF_RX: {
1689         /*
1690          * Resolve via a receive DPO.
1691          */
1692         interface_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1693                                   path->intf_rx.fp_interface,
1694                                   &path->fp_dpo);
1695         break;
1696     }
1697     case FIB_PATH_TYPE_EXCLUSIVE:
1698         /*
1699          * Resolve via the user provided DPO
1700          */
1701         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
1702         break;
1703     }
1704
1705     return (fib_path_is_resolved(path_index));
1706 }
1707
1708 u32
1709 fib_path_get_resolving_interface (fib_node_index_t path_index)
1710 {
1711     fib_path_t *path;
1712
1713     path = fib_path_get(path_index);
1714
1715     switch (path->fp_type)
1716     {
1717     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1718         return (path->attached_next_hop.fp_interface);
1719     case FIB_PATH_TYPE_ATTACHED:
1720         return (path->attached.fp_interface);
1721     case FIB_PATH_TYPE_RECEIVE:
1722         return (path->receive.fp_interface);
1723     case FIB_PATH_TYPE_RECURSIVE:
1724         if (fib_path_is_resolved(path_index))
1725         {
1726             return (fib_entry_get_resolving_interface(path->fp_via_fib));
1727         }
1728         break;
1729     case FIB_PATH_TYPE_INTF_RX:
1730     case FIB_PATH_TYPE_SPECIAL:
1731     case FIB_PATH_TYPE_DEAG:
1732     case FIB_PATH_TYPE_EXCLUSIVE:
1733         break;
1734     }
1735     return (~0);
1736 }
1737
1738 adj_index_t
1739 fib_path_get_adj (fib_node_index_t path_index)
1740 {
1741     fib_path_t *path;
1742
1743     path = fib_path_get(path_index);
1744
1745     ASSERT(dpo_is_adj(&path->fp_dpo));
1746     if (dpo_is_adj(&path->fp_dpo))
1747     {
1748         return (path->fp_dpo.dpoi_index);
1749     }
1750     return (ADJ_INDEX_INVALID);
1751 }
1752
1753 int
1754 fib_path_get_weight (fib_node_index_t path_index)
1755 {
1756     fib_path_t *path;
1757
1758     path = fib_path_get(path_index);
1759
1760     ASSERT(path);
1761
1762     return (path->fp_weight);
1763 }
1764
1765 /**
1766  * @brief Contribute the path's adjacency to the list passed.
1767  * By calling this function over all paths, recursively, a child
1768  * can construct its full set of forwarding adjacencies, and hence its
1769  * uRPF list.
1770  */
1771 void
1772 fib_path_contribute_urpf (fib_node_index_t path_index,
1773                           index_t urpf)
1774 {
1775     fib_path_t *path;
1776
1777     path = fib_path_get(path_index);
1778
1779     /*
1780      * resolved and unresolved paths contribute to the RPF list.
1781      */
1782     switch (path->fp_type)
1783     {
1784     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1785         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
1786         break;
1787
1788     case FIB_PATH_TYPE_ATTACHED:
1789         fib_urpf_list_append(urpf, path->attached.fp_interface);
1790         break;
1791
1792     case FIB_PATH_TYPE_RECURSIVE:
1793         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
1794             !fib_path_is_looped(path_index))
1795         {
1796             /*
1797              * there's unresolved due to constraints, and there's unresolved
1798              * due to ain't got no via. can't do nowt w'out via.
1799              */
1800             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
1801         }
1802         break;
1803
1804     case FIB_PATH_TYPE_EXCLUSIVE:
1805     case FIB_PATH_TYPE_SPECIAL:
1806         /*
1807          * these path types may link to an adj, if that's what
1808          * the clinet gave
1809          */
1810         if (dpo_is_adj(&path->fp_dpo))
1811         {
1812             ip_adjacency_t *adj;
1813
1814             adj = adj_get(path->fp_dpo.dpoi_index);
1815
1816             fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
1817         }
1818         break;
1819
1820     case FIB_PATH_TYPE_DEAG:
1821     case FIB_PATH_TYPE_RECEIVE:
1822     case FIB_PATH_TYPE_INTF_RX:
1823         /*
1824          * these path types don't link to an adj
1825          */
1826         break;
1827     }
1828 }
1829
1830 void
1831 fib_path_stack_mpls_disp (fib_node_index_t path_index,
1832                           dpo_proto_t payload_proto,
1833                           dpo_id_t *dpo)
1834 {
1835     fib_path_t *path;
1836
1837     path = fib_path_get(path_index);
1838
1839     ASSERT(path);
1840
1841     switch (path->fp_type)
1842     {
1843     case FIB_PATH_TYPE_DEAG:
1844     {
1845         dpo_id_t tmp = DPO_INVALID;
1846
1847         dpo_copy(&tmp, dpo);
1848         dpo_set(dpo,
1849                 DPO_MPLS_DISPOSITION,
1850                 payload_proto,
1851                 mpls_disp_dpo_create(payload_proto,
1852                                      path->deag.fp_rpf_id,
1853                                      &tmp));
1854         dpo_reset(&tmp);
1855         break;
1856     }                
1857     case FIB_PATH_TYPE_RECEIVE:
1858     case FIB_PATH_TYPE_ATTACHED:
1859     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1860     case FIB_PATH_TYPE_RECURSIVE:
1861     case FIB_PATH_TYPE_INTF_RX:
1862     case FIB_PATH_TYPE_EXCLUSIVE:
1863     case FIB_PATH_TYPE_SPECIAL:
1864         break;
1865     }
1866 }
1867
1868 void
1869 fib_path_contribute_forwarding (fib_node_index_t path_index,
1870                                 fib_forward_chain_type_t fct,
1871                                 dpo_id_t *dpo)
1872 {
1873     fib_path_t *path;
1874
1875     path = fib_path_get(path_index);
1876
1877     ASSERT(path);
1878     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
1879
1880     FIB_PATH_DBG(path, "contribute");
1881
1882     /*
1883      * The DPO stored in the path was created when the path was resolved.
1884      * This then represents the path's 'native' protocol; IP.
1885      * For all others will need to go find something else.
1886      */
1887     if (fib_path_to_chain_type(path) == fct)
1888     {
1889         dpo_copy(dpo, &path->fp_dpo);
1890     }
1891     else
1892     {
1893         switch (path->fp_type)
1894         {
1895         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1896             switch (fct)
1897             {
1898             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1899             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1900             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1901             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1902             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1903             case FIB_FORW_CHAIN_TYPE_NSH:
1904             {
1905                 adj_index_t ai;
1906
1907                 /*
1908                  * get a appropriate link type adj.
1909                  */
1910                 ai = fib_path_attached_next_hop_get_adj(
1911                          path,
1912                          fib_forw_chain_type_to_link_type(fct));
1913                 dpo_set(dpo, DPO_ADJACENCY,
1914                         fib_forw_chain_type_to_dpo_proto(fct), ai);
1915                 adj_unlock(ai);
1916
1917                 break;
1918             }
1919             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1920             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1921             break;
1922             }
1923             break;
1924         case FIB_PATH_TYPE_RECURSIVE:
1925             switch (fct)
1926             {
1927             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1928             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1929             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1930             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1931             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1932             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1933                 fib_path_recursive_adj_update(path, fct, dpo);
1934                 break;
1935             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1936             case FIB_FORW_CHAIN_TYPE_NSH:
1937                 ASSERT(0);
1938                 break;
1939             }
1940             break;
1941         case FIB_PATH_TYPE_DEAG:
1942             switch (fct)
1943             {
1944             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1945                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
1946                                                   DPO_PROTO_MPLS,
1947                                                   LOOKUP_UNICAST,
1948                                                   LOOKUP_INPUT_DST_ADDR,
1949                                                   LOOKUP_TABLE_FROM_CONFIG,
1950                                                   dpo);
1951                 break;
1952             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1953             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1954             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1955                 dpo_copy(dpo, &path->fp_dpo);
1956                 break;
1957             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1958             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1959             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1960             case FIB_FORW_CHAIN_TYPE_NSH:
1961                 ASSERT(0);
1962                 break;
1963             }
1964             break;
1965         case FIB_PATH_TYPE_EXCLUSIVE:
1966             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
1967             break;
1968         case FIB_PATH_TYPE_ATTACHED:
1969             switch (fct)
1970             {
1971             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1972             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1973             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1974             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1975             case FIB_FORW_CHAIN_TYPE_ETHERNET:
1976             case FIB_FORW_CHAIN_TYPE_NSH:
1977                 {
1978                     adj_index_t ai;
1979
1980                     /*
1981                      * get a appropriate link type adj.
1982                      */
1983                     ai = fib_path_attached_get_adj(
1984                             path,
1985                             fib_forw_chain_type_to_link_type(fct));
1986                     dpo_set(dpo, DPO_ADJACENCY,
1987                             fib_forw_chain_type_to_dpo_proto(fct), ai);
1988                     adj_unlock(ai);
1989                     break;
1990                 }
1991             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1992             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1993                 {
1994                     adj_index_t ai;
1995
1996                     /*
1997                      * Create the adj needed for sending IP multicast traffic
1998                      */
1999                     ai = adj_mcast_add_or_lock(path->fp_nh_proto,
2000                                                fib_forw_chain_type_to_link_type(fct),
2001                                                path->attached.fp_interface);
2002                     dpo_set(dpo, DPO_ADJACENCY,
2003                             fib_forw_chain_type_to_dpo_proto(fct),
2004                             ai);
2005                     adj_unlock(ai);
2006                 }
2007                 break;
2008             }
2009             break;
2010         case FIB_PATH_TYPE_INTF_RX:
2011             /*
2012              * Create the adj needed for sending IP multicast traffic
2013              */
2014             interface_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2015                                       path->attached.fp_interface,
2016                                       dpo);
2017             break;
2018         case FIB_PATH_TYPE_RECEIVE:
2019         case FIB_PATH_TYPE_SPECIAL:
2020             dpo_copy(dpo, &path->fp_dpo);
2021             break;
2022         }
2023     }
2024 }
2025
2026 load_balance_path_t *
2027 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2028                                        fib_forward_chain_type_t fct,
2029                                        load_balance_path_t *hash_key)
2030 {
2031     load_balance_path_t *mnh;
2032     fib_path_t *path;
2033
2034     path = fib_path_get(path_index);
2035
2036     ASSERT(path);
2037
2038     if (fib_path_is_resolved(path_index))
2039     {
2040         vec_add2(hash_key, mnh, 1);
2041
2042         mnh->path_weight = path->fp_weight;
2043         mnh->path_index = path_index;
2044         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2045     }
2046
2047     return (hash_key);
2048 }
2049
2050 int
2051 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2052 {
2053     fib_path_t *path;
2054
2055     path = fib_path_get(path_index);
2056
2057     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2058             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2059              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2060 }
2061
2062 int
2063 fib_path_is_exclusive (fib_node_index_t path_index)
2064 {
2065     fib_path_t *path;
2066
2067     path = fib_path_get(path_index);
2068
2069     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2070 }
2071
2072 int
2073 fib_path_is_deag (fib_node_index_t path_index)
2074 {
2075     fib_path_t *path;
2076
2077     path = fib_path_get(path_index);
2078
2079     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2080 }
2081
2082 int
2083 fib_path_is_resolved (fib_node_index_t path_index)
2084 {
2085     fib_path_t *path;
2086
2087     path = fib_path_get(path_index);
2088
2089     return (dpo_id_is_valid(&path->fp_dpo) &&
2090             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2091             !fib_path_is_looped(path_index) &&
2092             !fib_path_is_permanent_drop(path));
2093 }
2094
2095 int
2096 fib_path_is_looped (fib_node_index_t path_index)
2097 {
2098     fib_path_t *path;
2099
2100     path = fib_path_get(path_index);
2101
2102     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2103 }
2104
2105 fib_path_list_walk_rc_t
2106 fib_path_encode (fib_node_index_t path_list_index,
2107                  fib_node_index_t path_index,
2108                  void *ctx)
2109 {
2110     fib_route_path_encode_t **api_rpaths = ctx;
2111     fib_route_path_encode_t *api_rpath;
2112     fib_path_t *path;
2113
2114     path = fib_path_get(path_index);
2115     if (!path)
2116       return (FIB_PATH_LIST_WALK_CONTINUE);
2117     vec_add2(*api_rpaths, api_rpath, 1);
2118     api_rpath->rpath.frp_weight = path->fp_weight;
2119     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2120     api_rpath->rpath.frp_sw_if_index = ~0;
2121     api_rpath->dpo = path->exclusive.fp_ex_dpo;
2122     switch (path->fp_type)
2123       {
2124       case FIB_PATH_TYPE_RECEIVE:
2125         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2126         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2127         break;
2128       case FIB_PATH_TYPE_ATTACHED:
2129         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2130         break;
2131       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2132         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2133         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2134         break;
2135       case FIB_PATH_TYPE_SPECIAL:
2136         break;
2137       case FIB_PATH_TYPE_DEAG:
2138         break;
2139       case FIB_PATH_TYPE_RECURSIVE:
2140         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2141         break;
2142       default:
2143         break;
2144       }
2145     return (FIB_PATH_LIST_WALK_CONTINUE);
2146 }
2147
2148 fib_protocol_t
2149 fib_path_get_proto (fib_node_index_t path_index)
2150 {
2151     fib_path_t *path;
2152
2153     path = fib_path_get(path_index);
2154
2155     return (path->fp_nh_proto);
2156 }
2157
2158 void
2159 fib_path_module_init (void)
2160 {
2161     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2162 }
2163
2164 static clib_error_t *
2165 show_fib_path_command (vlib_main_t * vm,
2166                         unformat_input_t * input,
2167                         vlib_cli_command_t * cmd)
2168 {
2169     fib_node_index_t pi;
2170     fib_path_t *path;
2171
2172     if (unformat (input, "%d", &pi))
2173     {
2174         /*
2175          * show one in detail
2176          */
2177         if (!pool_is_free_index(fib_path_pool, pi))
2178         {
2179             path = fib_path_get(pi);
2180             u8 *s = fib_path_format(pi, NULL);
2181             s = format(s, "children:");
2182             s = fib_node_children_format(path->fp_node.fn_children, s);
2183             vlib_cli_output (vm, "%s", s);
2184             vec_free(s);
2185         }
2186         else
2187         {
2188             vlib_cli_output (vm, "path %d invalid", pi);
2189         }
2190     }
2191     else
2192     {
2193         vlib_cli_output (vm, "FIB Paths");
2194         pool_foreach(path, fib_path_pool,
2195         ({
2196             vlib_cli_output (vm, "%U", format_fib_path, path);
2197         }));
2198     }
2199
2200     return (NULL);
2201 }
2202
2203 VLIB_CLI_COMMAND (show_fib_path, static) = {
2204   .path = "show fib paths",
2205   .function = show_fib_path_command,
2206   .short_help = "show fib paths",
2207 };