Sticky Load-balance
[vpp.git] / src / vnet / fib / fib_path.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/format.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/dpo/receive_dpo.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/lookup_dpo.h>
24 #include <vnet/dpo/interface_rx_dpo.h>
25 #include <vnet/dpo/mpls_disposition.h>
26 #include <vnet/dpo/dvr_dpo.h>
27 #include <vnet/dpo/drop_dpo.h>
28
29 #include <vnet/adj/adj.h>
30 #include <vnet/adj/adj_mcast.h>
31
32 #include <vnet/fib/fib_path.h>
33 #include <vnet/fib/fib_node.h>
34 #include <vnet/fib/fib_table.h>
35 #include <vnet/fib/fib_entry.h>
36 #include <vnet/fib/fib_path_list.h>
37 #include <vnet/fib/fib_internal.h>
38 #include <vnet/fib/fib_urpf_list.h>
39 #include <vnet/fib/mpls_fib.h>
40 #include <vnet/udp/udp_encap.h>
41 #include <vnet/bier/bier_fmask.h>
42 #include <vnet/bier/bier_table.h>
43 #include <vnet/bier/bier_imp.h>
44 #include <vnet/bier/bier_disp_table.h>
45
46 /**
47  * Enurmeration of path types
48  */
49 typedef enum fib_path_type_t_ {
50     /**
51      * Marker. Add new types after this one.
52      */
53     FIB_PATH_TYPE_FIRST = 0,
54     /**
55      * Attached-nexthop. An interface and a nexthop are known.
56      */
57     FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
58     /**
59      * attached. Only the interface is known.
60      */
61     FIB_PATH_TYPE_ATTACHED,
62     /**
63      * recursive. Only the next-hop is known.
64      */
65     FIB_PATH_TYPE_RECURSIVE,
66     /**
67      * special. nothing is known. so we drop.
68      */
69     FIB_PATH_TYPE_SPECIAL,
70     /**
71      * exclusive. user provided adj.
72      */
73     FIB_PATH_TYPE_EXCLUSIVE,
74     /**
75      * deag. Link to a lookup adj in the next table
76      */
77     FIB_PATH_TYPE_DEAG,
78     /**
79      * interface receive.
80      */
81     FIB_PATH_TYPE_INTF_RX,
82     /**
83      * Path resolves via a UDP encap object.
84      */
85     FIB_PATH_TYPE_UDP_ENCAP,
86     /**
87      * receive. it's for-us.
88      */
89     FIB_PATH_TYPE_RECEIVE,
90     /**
91      * bier-imp. it's via a BIER imposition.
92      */
93     FIB_PATH_TYPE_BIER_IMP,
94     /**
95      * bier-fmask. it's via a BIER ECMP-table.
96      */
97     FIB_PATH_TYPE_BIER_TABLE,
98     /**
99      * bier-fmask. it's via a BIER f-mask.
100      */
101     FIB_PATH_TYPE_BIER_FMASK,
102     /**
103      * via a DVR.
104      */
105     FIB_PATH_TYPE_DVR,
106     /**
107      * Marker. Add new types before this one, then update it.
108      */
109     FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_BIER_FMASK,
110 } __attribute__ ((packed)) fib_path_type_t;
111
112 /**
113  * The maximum number of path_types
114  */
115 #define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
116
117 #define FIB_PATH_TYPES {                                        \
118     [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop",     \
119     [FIB_PATH_TYPE_ATTACHED]          = "attached",             \
120     [FIB_PATH_TYPE_RECURSIVE]         = "recursive",            \
121     [FIB_PATH_TYPE_SPECIAL]           = "special",              \
122     [FIB_PATH_TYPE_EXCLUSIVE]         = "exclusive",            \
123     [FIB_PATH_TYPE_DEAG]              = "deag",                 \
124     [FIB_PATH_TYPE_INTF_RX]           = "intf-rx",              \
125     [FIB_PATH_TYPE_UDP_ENCAP]         = "udp-encap",            \
126     [FIB_PATH_TYPE_RECEIVE]           = "receive",              \
127     [FIB_PATH_TYPE_BIER_IMP]          = "bier-imp",             \
128     [FIB_PATH_TYPE_BIER_TABLE]        = "bier-table",           \
129     [FIB_PATH_TYPE_BIER_FMASK]        = "bier-fmask",           \
130     [FIB_PATH_TYPE_DVR]               = "dvr",                  \
131 }
132
133 #define FOR_EACH_FIB_PATH_TYPE(_item)           \
134     for (_item = FIB_PATH_TYPE_FIRST;           \
135          _item <= FIB_PATH_TYPE_LAST;           \
136          _item++)
137
138 /**
139  * Enurmeration of path operational (i.e. derived) attributes
140  */
141 typedef enum fib_path_oper_attribute_t_ {
142     /**
143      * Marker. Add new types after this one.
144      */
145     FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
146     /**
147      * The path forms part of a recursive loop.
148      */
149     FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
150     /**
151      * The path is resolved
152      */
153     FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
154     /**
155      * The path is attached, despite what the next-hop may say.
156      */
157     FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
158     /**
159      * The path has become a permanent drop.
160      */
161     FIB_PATH_OPER_ATTRIBUTE_DROP,
162     /**
163      * Marker. Add new types before this one, then update it.
164      */
165     FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
166 } __attribute__ ((packed)) fib_path_oper_attribute_t;
167
168 /**
169  * The maximum number of path operational attributes
170  */
171 #define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
172
173 #define FIB_PATH_OPER_ATTRIBUTES {                                      \
174     [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop",        \
175     [FIB_PATH_OPER_ATTRIBUTE_RESOLVED]       = "resolved",              \
176     [FIB_PATH_OPER_ATTRIBUTE_DROP]           = "drop",                  \
177 }
178
179 #define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
180     for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
181          _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
182          _item++)
183
184 /**
185  * Path flags from the attributes
186  */
187 typedef enum fib_path_oper_flags_t_ {
188     FIB_PATH_OPER_FLAG_NONE = 0,
189     FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
190     FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
191     FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
192     FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
193 } __attribute__ ((packed)) fib_path_oper_flags_t;
194
195 /**
196  * A FIB path
197  */
198 typedef struct fib_path_t_ {
199     /**
200      * A path is a node in the FIB graph.
201      */
202     fib_node_t fp_node;
203
204     /**
205      * The index of the path-list to which this path belongs
206      */
207     u32 fp_pl_index;
208
209     /**
210      * This marks the start of the memory area used to hash
211      * the path
212      */
213     STRUCT_MARK(path_hash_start);
214
215     /**
216      * Configuration Flags
217      */
218     fib_path_cfg_flags_t fp_cfg_flags;
219
220     /**
221      * The type of the path. This is the selector for the union
222      */
223     fib_path_type_t fp_type;
224
225     /**
226      * The protocol of the next-hop, i.e. the address family of the
227      * next-hop's address. We can't derive this from the address itself
228      * since the address can be all zeros
229      */
230     dpo_proto_t fp_nh_proto;
231
232     /**
233      * UCMP [unnormalised] weigth
234      */
235     u8 fp_weight;
236
237     /**
238      * A path preference. 0 is the best.
239      * Only paths of the best preference, that are 'up', are considered
240      * for forwarding.
241      */
242     u8 fp_preference;
243
244     /**
245      * per-type union of the data required to resolve the path
246      */
247     union {
248         struct {
249             /**
250              * The next-hop
251              */
252             ip46_address_t fp_nh;
253             /**
254              * The interface
255              */
256             u32 fp_interface;
257         } attached_next_hop;
258         struct {
259             /**
260              * The interface
261              */
262             u32 fp_interface;
263         } attached;
264         struct {
265             union
266             {
267                 /**
268                  * The next-hop
269                  */
270                 ip46_address_t fp_ip;
271                 struct {
272                     /**
273                      * The local label to resolve through.
274                      */
275                     mpls_label_t fp_local_label;
276                     /**
277                      * The EOS bit of the resolving label
278                      */
279                     mpls_eos_bit_t fp_eos;
280                 };
281             } fp_nh;
282             union {
283                 /**
284                  * The FIB table index in which to find the next-hop.
285                  */
286                 fib_node_index_t fp_tbl_id;
287                 /**
288                  * The BIER FIB the fmask is in
289                  */
290                 index_t fp_bier_fib;
291             };
292         } recursive;
293         struct {
294             /**
295              * BIER FMask ID
296              */
297             index_t fp_bier_fmask;
298         } bier_fmask;
299         struct {
300             /**
301              * The BIER table's ID
302              */
303             bier_table_id_t fp_bier_tbl;
304         } bier_table;
305         struct {
306             /**
307              * The BIER imposition object
308              * this is part of the path's key, since the index_t
309              * of an imposition object is the object's key.
310              */
311             index_t fp_bier_imp;
312         } bier_imp;
313         struct {
314             /**
315              * The FIB index in which to perfom the next lookup
316              */
317             fib_node_index_t fp_tbl_id;
318             /**
319              * The RPF-ID to tag the packets with
320              */
321             fib_rpf_id_t fp_rpf_id;
322         } deag;
323         struct {
324         } special;
325         struct {
326             /**
327              * The user provided 'exclusive' DPO
328              */
329             dpo_id_t fp_ex_dpo;
330         } exclusive;
331         struct {
332             /**
333              * The interface on which the local address is configured
334              */
335             u32 fp_interface;
336             /**
337              * The next-hop
338              */
339             ip46_address_t fp_addr;
340         } receive;
341         struct {
342             /**
343              * The interface on which the packets will be input.
344              */
345             u32 fp_interface;
346         } intf_rx;
347         struct {
348             /**
349              * The UDP Encap object this path resolves through
350              */
351             u32 fp_udp_encap_id;
352         } udp_encap;
353         struct {
354             /**
355              * The interface
356              */
357             u32 fp_interface;
358         } dvr;
359     };
360     STRUCT_MARK(path_hash_end);
361
362     /**
363      * Memebers in this last section represent information that is
364      * dervied during resolution. It should not be copied to new paths
365      * nor compared.
366      */
367
368     /**
369      * Operational Flags
370      */
371     fib_path_oper_flags_t fp_oper_flags;
372
373     union {
374         /**
375          * the resolving via fib. not part of the union, since it it not part
376          * of the path's hash.
377          */
378         fib_node_index_t fp_via_fib;
379         /**
380          * the resolving bier-table
381          */
382         index_t fp_via_bier_tbl;
383         /**
384          * the resolving bier-fmask
385          */
386         index_t fp_via_bier_fmask;
387     };
388
389     /**
390      * The Data-path objects through which this path resolves for IP.
391      */
392     dpo_id_t fp_dpo;
393
394     /**
395      * the index of this path in the parent's child list.
396      */
397     u32 fp_sibling;
398 } fib_path_t;
399
400 /*
401  * Array of strings/names for the path types and attributes
402  */
403 static const char *fib_path_type_names[] = FIB_PATH_TYPES;
404 static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
405 static const char *fib_path_cfg_attribute_names[]  = FIB_PATH_CFG_ATTRIBUTES;
406
407 /*
408  * The memory pool from which we allocate all the paths
409  */
410 static fib_path_t *fib_path_pool;
411
412 /*
413  * Debug macro
414  */
415 #ifdef FIB_DEBUG
416 #define FIB_PATH_DBG(_p, _fmt, _args...)                        \
417 {                                                               \
418     u8 *_tmp = NULL;                                            \
419     _tmp = fib_path_format(fib_path_get_index(_p), _tmp);       \
420     clib_warning("path:[%d:%U]:" _fmt,                          \
421                  fib_path_get_index(_p), format_fib_path, _p, 0,\
422                  ##_args);                                      \
423     vec_free(_tmp);                                             \
424 }
425 #else
426 #define FIB_PATH_DBG(_p, _fmt, _args...)
427 #endif
428
429 static fib_path_t *
430 fib_path_get (fib_node_index_t index)
431 {
432     return (pool_elt_at_index(fib_path_pool, index));
433 }
434
435 static fib_node_index_t 
436 fib_path_get_index (fib_path_t *path)
437 {
438     return (path - fib_path_pool);
439 }
440
441 static fib_node_t *
442 fib_path_get_node (fib_node_index_t index)
443 {
444     return ((fib_node_t*)fib_path_get(index));
445 }
446
447 static fib_path_t*
448 fib_path_from_fib_node (fib_node_t *node)
449 {
450     ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
451     return ((fib_path_t*)node);
452 }
453
454 u8 *
455 format_fib_path (u8 * s, va_list * args)
456 {
457     fib_node_index_t path_index = va_arg (*args, fib_node_index_t);
458     u32 indent = va_arg (*args, u32);
459     vnet_main_t * vnm = vnet_get_main();
460     fib_path_oper_attribute_t oattr;
461     fib_path_cfg_attribute_t cattr;
462     fib_path_t *path;
463
464     path = fib_path_get(path_index);
465
466     s = format (s, "%Upath:[%d] ", format_white_space, indent,
467                 fib_path_get_index(path));
468     s = format (s, "pl-index:%d ", path->fp_pl_index);
469     s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
470     s = format (s, "weight=%d ", path->fp_weight);
471     s = format (s, "pref=%d ", path->fp_preference);
472     s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
473     if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
474         s = format(s, " oper-flags:");
475         FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
476             if ((1<<oattr) & path->fp_oper_flags) {
477                 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
478             }
479         }
480     }
481     if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
482         s = format(s, " cfg-flags:");
483         FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
484             if ((1<<cattr) & path->fp_cfg_flags) {
485                 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
486             }
487         }
488     }
489     s = format(s, "\n%U", format_white_space, indent+2);
490
491     switch (path->fp_type)
492     {
493     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
494         s = format (s, "%U", format_ip46_address,
495                     &path->attached_next_hop.fp_nh,
496                     IP46_TYPE_ANY);
497         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
498         {
499             s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
500         }
501         else
502         {
503             s = format (s, " %U",
504                         format_vnet_sw_interface_name,
505                         vnm,
506                         vnet_get_sw_interface(
507                             vnm,
508                             path->attached_next_hop.fp_interface));
509             if (vnet_sw_interface_is_p2p(vnet_get_main(),
510                                          path->attached_next_hop.fp_interface))
511             {
512                 s = format (s, " (p2p)");
513             }
514         }
515         if (!dpo_id_is_valid(&path->fp_dpo))
516         {
517             s = format(s, "\n%Uunresolved", format_white_space, indent+2);
518         }
519         else
520         {
521             s = format(s, "\n%U%U",
522                        format_white_space, indent,
523                        format_dpo_id,
524                        &path->fp_dpo, 13);
525         }
526         break;
527     case FIB_PATH_TYPE_ATTACHED:
528         if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
529         {
530             s = format (s, "if_index:%d", path->attached_next_hop.fp_interface);
531         }
532         else
533         {
534             s = format (s, " %U",
535                         format_vnet_sw_interface_name,
536                         vnm,
537                         vnet_get_sw_interface(
538                             vnm,
539                             path->attached.fp_interface));
540         }
541         break;
542     case FIB_PATH_TYPE_RECURSIVE:
543         if (DPO_PROTO_MPLS == path->fp_nh_proto)
544         {
545             s = format (s, "via %U %U",
546                         format_mpls_unicast_label,
547                         path->recursive.fp_nh.fp_local_label,
548                         format_mpls_eos_bit,
549                         path->recursive.fp_nh.fp_eos);
550         }
551         else
552         {
553             s = format (s, "via %U",
554                         format_ip46_address,
555                         &path->recursive.fp_nh.fp_ip,
556                         IP46_TYPE_ANY);
557         }
558         s = format (s, " in fib:%d",
559                     path->recursive.fp_tbl_id,
560                     path->fp_via_fib); 
561         s = format (s, " via-fib:%d", path->fp_via_fib); 
562         s = format (s, " via-dpo:[%U:%d]",
563                     format_dpo_type, path->fp_dpo.dpoi_type, 
564                     path->fp_dpo.dpoi_index);
565
566         break;
567     case FIB_PATH_TYPE_UDP_ENCAP:
568         s = format (s, "UDP-encap ID:%d", path->udp_encap.fp_udp_encap_id);
569         break;
570     case FIB_PATH_TYPE_BIER_TABLE:
571         s = format (s, "via bier-table:[%U}",
572                     format_bier_table_id,
573                     &path->bier_table.fp_bier_tbl);
574         s = format (s, " via-dpo:[%U:%d]",
575                     format_dpo_type, path->fp_dpo.dpoi_type,
576                     path->fp_dpo.dpoi_index);
577         break;
578     case FIB_PATH_TYPE_BIER_FMASK:
579         s = format (s, "via-fmask:%d", path->bier_fmask.fp_bier_fmask); 
580         s = format (s, " via-dpo:[%U:%d]",
581                     format_dpo_type, path->fp_dpo.dpoi_type, 
582                     path->fp_dpo.dpoi_index);
583         break;
584     case FIB_PATH_TYPE_BIER_IMP:
585         s = format (s, "via %U", format_bier_imp,
586                     path->bier_imp.fp_bier_imp, 0, BIER_SHOW_BRIEF);
587         break;
588     case FIB_PATH_TYPE_DVR:
589         s = format (s, " %U",
590                     format_vnet_sw_interface_name,
591                     vnm,
592                     vnet_get_sw_interface(
593                         vnm,
594                         path->dvr.fp_interface));
595         break;
596     case FIB_PATH_TYPE_RECEIVE:
597     case FIB_PATH_TYPE_INTF_RX:
598     case FIB_PATH_TYPE_SPECIAL:
599     case FIB_PATH_TYPE_DEAG:
600     case FIB_PATH_TYPE_EXCLUSIVE:
601         if (dpo_id_is_valid(&path->fp_dpo))
602         {
603             s = format(s, "%U", format_dpo_id,
604                        &path->fp_dpo, indent+2);
605         }
606         break;
607     }
608     return (s);
609 }
610
611 u8 *
612 fib_path_format (fib_node_index_t pi, u8 *s)
613 {
614     fib_path_t *path;
615
616     path = fib_path_get(pi);
617     ASSERT(NULL != path);
618
619     return (format (s, "%U", format_fib_path, path));
620 }
621
622 /*
623  * fib_path_last_lock_gone
624  *
625  * We don't share paths, we share path lists, so the [un]lock functions
626  * are no-ops
627  */
628 static void
629 fib_path_last_lock_gone (fib_node_t *node)
630 {
631     ASSERT(0);
632 }
633
634 static const adj_index_t
635 fib_path_attached_next_hop_get_adj (fib_path_t *path,
636                                     vnet_link_t link)
637 {
638     if (vnet_sw_interface_is_p2p(vnet_get_main(),
639                                  path->attached_next_hop.fp_interface))
640     {
641         /*
642          * if the interface is p2p then the adj for the specific
643          * neighbour on that link will never exist. on p2p links
644          * the subnet address (the attached route) links to the
645          * auto-adj (see below), we want that adj here too.
646          */
647         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
648                                     link,
649                                     &zero_addr,
650                                     path->attached_next_hop.fp_interface));
651     }
652     else
653     {
654         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
655                                     link,
656                                     &path->attached_next_hop.fp_nh,
657                                     path->attached_next_hop.fp_interface));
658     }
659 }
660
661 static void
662 fib_path_attached_next_hop_set (fib_path_t *path)
663 {
664     /*
665      * resolve directly via the adjacnecy discribed by the
666      * interface and next-hop
667      */
668     dpo_set(&path->fp_dpo,
669             DPO_ADJACENCY,
670             path->fp_nh_proto,
671             fib_path_attached_next_hop_get_adj(
672                  path,
673                  dpo_proto_to_link(path->fp_nh_proto)));
674
675     /*
676      * become a child of the adjacency so we receive updates
677      * when its rewrite changes
678      */
679     path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
680                                      FIB_NODE_TYPE_PATH,
681                                      fib_path_get_index(path));
682
683     if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
684                                       path->attached_next_hop.fp_interface) ||
685         !adj_is_up(path->fp_dpo.dpoi_index))
686     {
687         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
688     }
689 }
690
691 static const adj_index_t
692 fib_path_attached_get_adj (fib_path_t *path,
693                            vnet_link_t link)
694 {
695     if (vnet_sw_interface_is_p2p(vnet_get_main(),
696                                  path->attached.fp_interface))
697     {
698         /*
699          * point-2-point interfaces do not require a glean, since
700          * there is nothing to ARP. Install a rewrite/nbr adj instead
701          */
702         return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
703                                     link,
704                                     &zero_addr,
705                                     path->attached.fp_interface));
706     }
707     else
708     {
709         return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
710                                       link,
711                                       path->attached.fp_interface,
712                                       NULL));
713     }
714 }
715
716 /*
717  * create of update the paths recursive adj
718  */
719 static void
720 fib_path_recursive_adj_update (fib_path_t *path,
721                                fib_forward_chain_type_t fct,
722                                dpo_id_t *dpo)
723 {
724     dpo_id_t via_dpo = DPO_INVALID;
725
726     /*
727      * get the DPO to resolve through from the via-entry
728      */
729     fib_entry_contribute_forwarding(path->fp_via_fib,
730                                     fct,
731                                     &via_dpo);
732
733
734     /*
735      * hope for the best - clear if restrictions apply.
736      */
737     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
738
739     /*
740      * Validate any recursion constraints and over-ride the via
741      * adj if not met
742      */
743     if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
744     {
745         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
746         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
747     }
748     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
749     {
750         /*
751          * the via FIB must be a host route.
752          * note the via FIB just added will always be a host route
753          * since it is an RR source added host route. So what we need to
754          * check is whether the route has other sources. If it does then
755          * some other source has added it as a host route. If it doesn't
756          * then it was added only here and inherits forwarding from a cover.
757          * the cover is not a host route.
758          * The RR source is the lowest priority source, so we check if it
759          * is the best. if it is there are no other sources.
760          */
761         if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
762         {
763             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
764             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
765
766             /*
767              * PIC edge trigger. let the load-balance maps know
768              */
769             load_balance_map_path_state_change(fib_path_get_index(path));
770         }
771     }
772     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
773     {
774         /*
775          * RR source entries inherit the flags from the cover, so
776          * we can check the via directly
777          */
778         if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
779         {
780             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
781             dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
782
783             /*
784              * PIC edge trigger. let the load-balance maps know
785              */
786             load_balance_map_path_state_change(fib_path_get_index(path));
787         }
788     }
789     /*
790      * check for over-riding factors on the FIB entry itself
791      */
792     if (!fib_entry_is_resolved(path->fp_via_fib))
793     {
794         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
795         dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
796
797         /*
798          * PIC edge trigger. let the load-balance maps know
799          */
800         load_balance_map_path_state_change(fib_path_get_index(path));
801     }
802
803     /*
804      * If this path is contributing a drop, then it's not resolved
805      */
806     if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
807     {
808         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
809     }
810
811     /*
812      * update the path's contributed DPO
813      */
814     dpo_copy(dpo, &via_dpo);
815
816     FIB_PATH_DBG(path, "recursive update:");
817
818     dpo_reset(&via_dpo);
819 }
820
821 /*
822  * re-evaulate the forwarding state for a via fmask path
823  */
824 static void
825 fib_path_bier_fmask_update (fib_path_t *path,
826                             dpo_id_t *dpo)
827 {
828     bier_fmask_contribute_forwarding(path->bier_fmask.fp_bier_fmask, dpo);
829
830     /*
831      * if we are stakcing on the drop, then the path is not resolved
832      */
833     if (dpo_is_drop(dpo))
834     {
835         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
836     }
837     else
838     {
839         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
840     }
841 }
842
843 /*
844  * fib_path_is_permanent_drop
845  *
846  * Return !0 if the path is configured to permanently drop,
847  * despite other attributes.
848  */
849 static int
850 fib_path_is_permanent_drop (fib_path_t *path)
851 {
852     return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
853             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
854 }
855
856 /*
857  * fib_path_unresolve
858  *
859  * Remove our dependency on the resolution target
860  */
861 static void
862 fib_path_unresolve (fib_path_t *path)
863 {
864     /*
865      * the forced drop path does not need unresolving
866      */
867     if (fib_path_is_permanent_drop(path))
868     {
869         return;
870     }
871
872     switch (path->fp_type)
873     {
874     case FIB_PATH_TYPE_RECURSIVE:
875         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
876         {
877             fib_entry_child_remove(path->fp_via_fib,
878                                    path->fp_sibling);
879             fib_table_entry_special_remove(path->recursive.fp_tbl_id,
880                                            fib_entry_get_prefix(path->fp_via_fib),
881                                            FIB_SOURCE_RR);
882             fib_table_unlock(path->recursive.fp_tbl_id,
883                              dpo_proto_to_fib(path->fp_nh_proto),
884                              FIB_SOURCE_RR);
885             path->fp_via_fib = FIB_NODE_INDEX_INVALID;
886         }
887         break;
888     case FIB_PATH_TYPE_BIER_FMASK:
889         bier_fmask_child_remove(path->fp_via_bier_fmask,
890                                 path->fp_sibling);
891         break;
892     case FIB_PATH_TYPE_BIER_IMP:
893         bier_imp_unlock(path->fp_dpo.dpoi_index);
894         break;
895     case FIB_PATH_TYPE_BIER_TABLE:
896         bier_table_ecmp_unlock(path->fp_via_bier_tbl);
897         break;
898     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
899         adj_child_remove(path->fp_dpo.dpoi_index,
900                          path->fp_sibling);
901         adj_unlock(path->fp_dpo.dpoi_index);
902         break;
903     case FIB_PATH_TYPE_ATTACHED:
904         adj_child_remove(path->fp_dpo.dpoi_index,
905                          path->fp_sibling);
906         adj_unlock(path->fp_dpo.dpoi_index);
907         break;
908     case FIB_PATH_TYPE_UDP_ENCAP:
909         udp_encap_unlock(path->fp_dpo.dpoi_index);
910         break;
911     case FIB_PATH_TYPE_EXCLUSIVE:
912         dpo_reset(&path->exclusive.fp_ex_dpo);
913         break;
914     case FIB_PATH_TYPE_SPECIAL:
915     case FIB_PATH_TYPE_RECEIVE:
916     case FIB_PATH_TYPE_INTF_RX:
917     case FIB_PATH_TYPE_DEAG:
918     case FIB_PATH_TYPE_DVR:
919         /*
920          * these hold only the path's DPO, which is reset below.
921          */
922         break;
923     }
924
925     /*
926      * release the adj we were holding and pick up the
927      * drop just in case.
928      */
929     dpo_reset(&path->fp_dpo);
930     path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
931
932     return;
933 }
934
935 static fib_forward_chain_type_t
936 fib_path_to_chain_type (const fib_path_t *path)
937 {
938     if (DPO_PROTO_MPLS == path->fp_nh_proto)
939     {
940         if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
941             MPLS_EOS == path->recursive.fp_nh.fp_eos)
942         {
943             return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
944         }
945         else
946         {
947             return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
948         }
949     }
950     else
951     {
952         return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
953     }
954 }
955
956 /*
957  * fib_path_back_walk_notify
958  *
959  * A back walk has reach this path.
960  */
961 static fib_node_back_walk_rc_t
962 fib_path_back_walk_notify (fib_node_t *node,
963                            fib_node_back_walk_ctx_t *ctx)
964 {
965     fib_path_t *path;
966
967     path = fib_path_from_fib_node(node);
968
969     switch (path->fp_type)
970     {
971     case FIB_PATH_TYPE_RECURSIVE:
972         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
973         {
974             /*
975              * modify the recursive adjacency to use the new forwarding
976              * of the via-fib.
977              * this update is visible to packets in flight in the DP.
978              */
979             fib_path_recursive_adj_update(
980                 path,
981                 fib_path_to_chain_type(path),
982                 &path->fp_dpo);
983         }
984         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
985             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
986         {
987             /*
988              * ADJ updates (complete<->incomplete) do not need to propagate to
989              * recursive entries.
990              * The only reason its needed as far back as here, is that the adj
991              * and the incomplete adj are a different DPO type, so the LBs need
992              * to re-stack.
993              * If this walk was quashed in the fib_entry, then any non-fib_path
994              * children (like tunnels that collapse out the LB when they stack)
995              * would not see the update.
996              */
997             return (FIB_NODE_BACK_WALK_CONTINUE);
998         }
999         break;
1000     case FIB_PATH_TYPE_BIER_FMASK:
1001         if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
1002         {
1003             /*
1004              * update to use the BIER fmask's new forwading
1005              */
1006             fib_path_bier_fmask_update(path, &path->fp_dpo);
1007         }
1008         if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
1009             (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
1010         {
1011             /*
1012              * ADJ updates (complete<->incomplete) do not need to propagate to
1013              * recursive entries.
1014              * The only reason its needed as far back as here, is that the adj
1015              * and the incomplete adj are a different DPO type, so the LBs need
1016              * to re-stack.
1017              * If this walk was quashed in the fib_entry, then any non-fib_path
1018              * children (like tunnels that collapse out the LB when they stack)
1019              * would not see the update.
1020              */
1021             return (FIB_NODE_BACK_WALK_CONTINUE);
1022         }
1023         break;
1024     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1025         /*
1026 FIXME comment
1027          * ADJ_UPDATE backwalk pass silently through here and up to
1028          * the path-list when the multipath adj collapse occurs.
1029          * The reason we do this is that the assumtption is that VPP
1030          * runs in an environment where the Control-Plane is remote
1031          * and hence reacts slowly to link up down. In order to remove
1032          * this down link from the ECMP set quickly, we back-walk.
1033          * VPP also has dedicated CPUs, so we are not stealing resources
1034          * from the CP to do so.
1035          */
1036         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1037         {
1038             if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
1039             {
1040                 /*
1041                  * alreday resolved. no need to walk back again
1042                  */
1043                 return (FIB_NODE_BACK_WALK_CONTINUE);
1044             }
1045             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1046         }
1047         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1048         {
1049             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1050             {
1051                 /*
1052                  * alreday unresolved. no need to walk back again
1053                  */
1054                 return (FIB_NODE_BACK_WALK_CONTINUE);
1055             }
1056             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1057         }
1058         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1059         {
1060             /*
1061              * The interface this path resolves through has been deleted.
1062              * This will leave the path in a permanent drop state. The route
1063              * needs to be removed and readded (and hence the path-list deleted)
1064              * before it can forward again.
1065              */
1066             fib_path_unresolve(path);
1067             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1068         }
1069         if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
1070         {
1071             /*
1072              * restack the DPO to pick up the correct DPO sub-type
1073              */
1074             uword if_is_up;
1075             adj_index_t ai;
1076
1077             if_is_up = vnet_sw_interface_is_admin_up(
1078                            vnet_get_main(),
1079                            path->attached_next_hop.fp_interface);
1080
1081             ai = fib_path_attached_next_hop_get_adj(
1082                      path,
1083                      dpo_proto_to_link(path->fp_nh_proto));
1084
1085             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1086             if (if_is_up && adj_is_up(ai))
1087             {
1088                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1089             }
1090
1091             dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
1092             adj_unlock(ai);
1093
1094             if (!if_is_up)
1095             {
1096                 /*
1097                  * If the interface is not up there is no reason to walk
1098                  * back to children. if we did they would only evalute
1099                  * that this path is unresolved and hence it would
1100                  * not contribute the adjacency - so it would be wasted
1101                  * CPU time.
1102                  */
1103                 return (FIB_NODE_BACK_WALK_CONTINUE);
1104             }
1105         }
1106         if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
1107         {
1108             if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
1109             {
1110                 /*
1111                  * alreday unresolved. no need to walk back again
1112                  */
1113                 return (FIB_NODE_BACK_WALK_CONTINUE);
1114             }
1115             /*
1116              * the adj has gone down. the path is no longer resolved.
1117              */
1118             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1119         }
1120         break;
1121     case FIB_PATH_TYPE_ATTACHED:
1122     case FIB_PATH_TYPE_DVR:
1123         /*
1124          * FIXME; this could schedule a lower priority walk, since attached
1125          * routes are not usually in ECMP configurations so the backwalk to
1126          * the FIB entry does not need to be high priority
1127          */
1128         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
1129         {
1130             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1131         }
1132         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
1133         {
1134             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1135         }
1136         if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
1137         {
1138             fib_path_unresolve(path);
1139             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
1140         }
1141         break;
1142     case FIB_PATH_TYPE_UDP_ENCAP:
1143     {
1144         dpo_id_t via_dpo = DPO_INVALID;
1145
1146         /*
1147          * hope for the best - clear if restrictions apply.
1148          */
1149         path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1150
1151         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
1152                                         path->fp_nh_proto,
1153                                         &via_dpo);
1154         /*
1155          * If this path is contributing a drop, then it's not resolved
1156          */
1157         if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
1158         {
1159             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1160         }
1161
1162         /*
1163          * update the path's contributed DPO
1164          */
1165         dpo_copy(&path->fp_dpo, &via_dpo);
1166         dpo_reset(&via_dpo);
1167         break;
1168     }
1169     case FIB_PATH_TYPE_INTF_RX:
1170         ASSERT(0);
1171     case FIB_PATH_TYPE_DEAG:
1172         /*
1173          * FIXME When VRF delete is allowed this will need a poke.
1174          */
1175     case FIB_PATH_TYPE_SPECIAL:
1176     case FIB_PATH_TYPE_RECEIVE:
1177     case FIB_PATH_TYPE_EXCLUSIVE:
1178     case FIB_PATH_TYPE_BIER_TABLE:
1179     case FIB_PATH_TYPE_BIER_IMP:
1180         /*
1181          * these path types have no parents. so to be
1182          * walked from one is unexpected.
1183          */
1184         ASSERT(0);
1185         break;
1186     }
1187
1188     /*
1189      * propagate the backwalk further to the path-list
1190      */
1191     fib_path_list_back_walk(path->fp_pl_index, ctx);
1192
1193     return (FIB_NODE_BACK_WALK_CONTINUE);
1194 }
1195
1196 static void
1197 fib_path_memory_show (void)
1198 {
1199     fib_show_memory_usage("Path",
1200                           pool_elts(fib_path_pool),
1201                           pool_len(fib_path_pool),
1202                           sizeof(fib_path_t));
1203 }
1204
1205 /*
1206  * The FIB path's graph node virtual function table
1207  */
1208 static const fib_node_vft_t fib_path_vft = {
1209     .fnv_get = fib_path_get_node,
1210     .fnv_last_lock = fib_path_last_lock_gone,
1211     .fnv_back_walk = fib_path_back_walk_notify,
1212     .fnv_mem_show = fib_path_memory_show,
1213 };
1214
1215 static fib_path_cfg_flags_t
1216 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1217 {
1218     fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
1219
1220     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1221         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1222     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1223         cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
1224     if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1225         cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
1226     if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1227         cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
1228     if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1229         cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1230     if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1231         cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1232     if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1233         cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1234     if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1235         cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
1236     if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1237         cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
1238
1239     return (cfg_flags);
1240 }
1241
1242 /*
1243  * fib_path_create
1244  *
1245  * Create and initialise a new path object.
1246  * return the index of the path.
1247  */
1248 fib_node_index_t
1249 fib_path_create (fib_node_index_t pl_index,
1250                  const fib_route_path_t *rpath)
1251 {
1252     fib_path_t *path;
1253
1254     pool_get(fib_path_pool, path);
1255     memset(path, 0, sizeof(*path));
1256
1257     fib_node_init(&path->fp_node,
1258                   FIB_NODE_TYPE_PATH);
1259
1260     dpo_reset(&path->fp_dpo);
1261     path->fp_pl_index = pl_index;
1262     path->fp_nh_proto = rpath->frp_proto;
1263     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1264     path->fp_weight = rpath->frp_weight;
1265     if (0 == path->fp_weight)
1266     {
1267         /*
1268          * a weight of 0 is a meaningless value. We could either reject it, and thus force
1269          * clients to always use 1, or we can accept it and fixup approrpiately.
1270          */
1271         path->fp_weight = 1;
1272     }
1273     path->fp_preference = rpath->frp_preference;
1274     path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
1275
1276     /*
1277      * deduce the path's tpye from the parementers and save what is needed.
1278      */
1279     if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
1280     {
1281         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1282         path->receive.fp_interface = rpath->frp_sw_if_index;
1283         path->receive.fp_addr = rpath->frp_addr;
1284     }
1285     else if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
1286     {
1287         path->fp_type = FIB_PATH_TYPE_UDP_ENCAP;
1288         path->udp_encap.fp_udp_encap_id = rpath->frp_udp_encap_id;
1289     }
1290     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1291     {
1292         path->fp_type = FIB_PATH_TYPE_INTF_RX;
1293         path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1294     }
1295     else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1296     {
1297         path->fp_type = FIB_PATH_TYPE_DEAG;
1298         path->deag.fp_tbl_id = rpath->frp_fib_index;
1299         path->deag.fp_rpf_id = rpath->frp_rpf_id;
1300     }
1301     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_FMASK)
1302     {
1303         path->fp_type = FIB_PATH_TYPE_BIER_FMASK;
1304         path->bier_fmask.fp_bier_fmask = rpath->frp_bier_fmask;
1305     }
1306     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_IMP)
1307     {
1308         path->fp_type = FIB_PATH_TYPE_BIER_IMP;
1309         path->bier_imp.fp_bier_imp = rpath->frp_bier_imp;
1310     }
1311     else if (rpath->frp_flags & FIB_ROUTE_PATH_BIER_TABLE)
1312     {
1313         path->fp_type = FIB_PATH_TYPE_BIER_TABLE;
1314         path->bier_table.fp_bier_tbl = rpath->frp_bier_tbl;
1315     }
1316     else if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG)
1317     {
1318         path->fp_type = FIB_PATH_TYPE_DEAG;
1319         path->deag.fp_tbl_id = rpath->frp_fib_index;
1320     }
1321     else if (rpath->frp_flags & FIB_ROUTE_PATH_DVR)
1322     {
1323         path->fp_type = FIB_PATH_TYPE_DVR;
1324         path->dvr.fp_interface = rpath->frp_sw_if_index;
1325     }
1326     else if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1327     {
1328         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1329         dpo_copy(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1330     }
1331     else if (~0 != rpath->frp_sw_if_index)
1332     {
1333         if (ip46_address_is_zero(&rpath->frp_addr))
1334         {
1335             path->fp_type = FIB_PATH_TYPE_ATTACHED;
1336             path->attached.fp_interface = rpath->frp_sw_if_index;
1337         }
1338         else
1339         {
1340             path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1341             path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1342             path->attached_next_hop.fp_nh = rpath->frp_addr;
1343         }
1344     }
1345     else
1346     {
1347         if (ip46_address_is_zero(&rpath->frp_addr))
1348         {
1349             if (~0 == rpath->frp_fib_index)
1350             {
1351                 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1352             }
1353             else
1354             {
1355                 path->fp_type = FIB_PATH_TYPE_DEAG;
1356                 path->deag.fp_tbl_id = rpath->frp_fib_index;
1357                 path->deag.fp_rpf_id = ~0;
1358             }
1359         }
1360         else
1361         {
1362             path->fp_type = FIB_PATH_TYPE_RECURSIVE;
1363             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1364             {
1365                 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
1366                 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
1367             }
1368             else
1369             {
1370                 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1371             }
1372             path->recursive.fp_tbl_id = rpath->frp_fib_index;
1373         }
1374     }
1375
1376     FIB_PATH_DBG(path, "create");
1377
1378     return (fib_path_get_index(path));
1379 }
1380
1381 /*
1382  * fib_path_create_special
1383  *
1384  * Create and initialise a new path object.
1385  * return the index of the path.
1386  */
1387 fib_node_index_t
1388 fib_path_create_special (fib_node_index_t pl_index,
1389                          dpo_proto_t nh_proto,
1390                          fib_path_cfg_flags_t flags,
1391                          const dpo_id_t *dpo)
1392 {
1393     fib_path_t *path;
1394
1395     pool_get(fib_path_pool, path);
1396     memset(path, 0, sizeof(*path));
1397
1398     fib_node_init(&path->fp_node,
1399                   FIB_NODE_TYPE_PATH);
1400     dpo_reset(&path->fp_dpo);
1401
1402     path->fp_pl_index = pl_index;
1403     path->fp_weight = 1;
1404     path->fp_preference = 0;
1405     path->fp_nh_proto = nh_proto;
1406     path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1407     path->fp_cfg_flags = flags;
1408
1409     if (FIB_PATH_CFG_FLAG_DROP & flags)
1410     {
1411         path->fp_type = FIB_PATH_TYPE_SPECIAL;
1412     }
1413     else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1414     {
1415         path->fp_type = FIB_PATH_TYPE_RECEIVE;
1416         path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1417     }
1418     else
1419     {
1420         path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1421         ASSERT(NULL != dpo);
1422         dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1423     }
1424
1425     return (fib_path_get_index(path));
1426 }
1427
1428 /*
1429  * fib_path_copy
1430  *
1431  * Copy a path. return index of new path.
1432  */
1433 fib_node_index_t
1434 fib_path_copy (fib_node_index_t path_index,
1435                fib_node_index_t path_list_index)
1436 {
1437     fib_path_t *path, *orig_path;
1438
1439     pool_get(fib_path_pool, path);
1440
1441     orig_path = fib_path_get(path_index);
1442     ASSERT(NULL != orig_path);
1443
1444     memcpy(path, orig_path, sizeof(*path));
1445
1446     FIB_PATH_DBG(path, "create-copy:%d", path_index);
1447
1448     /*
1449      * reset the dynamic section
1450      */
1451     fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1452     path->fp_oper_flags     = FIB_PATH_OPER_FLAG_NONE;
1453     path->fp_pl_index  = path_list_index;
1454     path->fp_via_fib   = FIB_NODE_INDEX_INVALID;
1455     memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1456     dpo_reset(&path->fp_dpo);
1457
1458     return (fib_path_get_index(path));
1459 }
1460
1461 /*
1462  * fib_path_destroy
1463  *
1464  * destroy a path that is no longer required
1465  */
1466 void
1467 fib_path_destroy (fib_node_index_t path_index)
1468 {
1469     fib_path_t *path;
1470
1471     path = fib_path_get(path_index);
1472
1473     ASSERT(NULL != path);
1474     FIB_PATH_DBG(path, "destroy");
1475
1476     fib_path_unresolve(path);
1477
1478     fib_node_deinit(&path->fp_node);
1479     pool_put(fib_path_pool, path);
1480 }
1481
1482 /*
1483  * fib_path_destroy
1484  *
1485  * destroy a path that is no longer required
1486  */
1487 uword
1488 fib_path_hash (fib_node_index_t path_index)
1489 {
1490     fib_path_t *path;
1491
1492     path = fib_path_get(path_index);
1493
1494     return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1495                         (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1496                          STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1497                         0));
1498 }
1499
1500 /*
1501  * fib_path_cmp_i
1502  *
1503  * Compare two paths for equivalence.
1504  */
1505 static int
1506 fib_path_cmp_i (const fib_path_t *path1,
1507                 const fib_path_t *path2)
1508 {
1509     int res;
1510
1511     res = 1;
1512
1513     /*
1514      * paths of different types and protocol are not equal.
1515      * different weights and/or preference only are the same path.
1516      */
1517     if (path1->fp_type != path2->fp_type)
1518     {
1519         res = (path1->fp_type - path2->fp_type);
1520     }
1521     else if (path1->fp_nh_proto != path2->fp_nh_proto)
1522     {
1523         res = (path1->fp_nh_proto - path2->fp_nh_proto);
1524     }
1525     else
1526     {
1527         /*
1528          * both paths are of the same type.
1529          * consider each type and its attributes in turn.
1530          */
1531         switch (path1->fp_type)
1532         {
1533         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1534             res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1535                                    &path2->attached_next_hop.fp_nh);
1536             if (0 == res) {
1537                 res = (path1->attached_next_hop.fp_interface -
1538                        path2->attached_next_hop.fp_interface);
1539             }
1540             break;
1541         case FIB_PATH_TYPE_ATTACHED:
1542             res = (path1->attached.fp_interface -
1543                    path2->attached.fp_interface);
1544             break;
1545         case FIB_PATH_TYPE_RECURSIVE:
1546             res = ip46_address_cmp(&path1->recursive.fp_nh,
1547                                    &path2->recursive.fp_nh);
1548  
1549             if (0 == res)
1550             {
1551                 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1552             }
1553             break;
1554         case FIB_PATH_TYPE_BIER_FMASK:
1555             res = (path1->bier_fmask.fp_bier_fmask -
1556                    path2->bier_fmask.fp_bier_fmask);
1557             break;
1558         case FIB_PATH_TYPE_BIER_IMP:
1559             res = (path1->bier_imp.fp_bier_imp -
1560                    path2->bier_imp.fp_bier_imp);
1561             break;
1562         case FIB_PATH_TYPE_BIER_TABLE:
1563             res = bier_table_id_cmp(&path1->bier_table.fp_bier_tbl,
1564                                     &path2->bier_table.fp_bier_tbl);
1565             break;
1566         case FIB_PATH_TYPE_DEAG:
1567             res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
1568             if (0 == res)
1569             {
1570                 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1571             }
1572             break;
1573         case FIB_PATH_TYPE_INTF_RX:
1574             res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
1575             break;
1576         case FIB_PATH_TYPE_UDP_ENCAP:
1577             res = (path1->udp_encap.fp_udp_encap_id - path2->udp_encap.fp_udp_encap_id);
1578             break;
1579         case FIB_PATH_TYPE_DVR:
1580             res = (path1->dvr.fp_interface - path2->dvr.fp_interface);
1581             break;
1582         case FIB_PATH_TYPE_EXCLUSIVE:
1583             res = dpo_cmp(&path1->exclusive.fp_ex_dpo, &path2->exclusive.fp_ex_dpo);
1584             break;
1585         case FIB_PATH_TYPE_SPECIAL:
1586         case FIB_PATH_TYPE_RECEIVE:
1587             res = 0;
1588             break;
1589         }
1590     }
1591     return (res);
1592 }
1593
1594 /*
1595  * fib_path_cmp_for_sort
1596  *
1597  * Compare two paths for equivalence. Used during path sorting.
1598  * As usual 0 means equal.
1599  */
1600 int
1601 fib_path_cmp_for_sort (void * v1,
1602                        void * v2)
1603 {
1604     fib_node_index_t *pi1 = v1, *pi2 = v2;
1605     fib_path_t *path1, *path2;
1606
1607     path1 = fib_path_get(*pi1);
1608     path2 = fib_path_get(*pi2);
1609
1610     /*
1611      * when sorting paths we want the highest preference paths
1612      * first, so that the choices set built is in prefernce order
1613      */
1614     if (path1->fp_preference != path2->fp_preference)
1615     {
1616         return (path1->fp_preference - path2->fp_preference);
1617     }
1618
1619     return (fib_path_cmp_i(path1, path2));
1620 }
1621
1622 /*
1623  * fib_path_cmp
1624  *
1625  * Compare two paths for equivalence.
1626  */
1627 int
1628 fib_path_cmp (fib_node_index_t pi1,
1629               fib_node_index_t pi2)
1630 {
1631     fib_path_t *path1, *path2;
1632
1633     path1 = fib_path_get(pi1);
1634     path2 = fib_path_get(pi2);
1635
1636     return (fib_path_cmp_i(path1, path2));
1637 }
1638
1639 int
1640 fib_path_cmp_w_route_path (fib_node_index_t path_index,
1641                            const fib_route_path_t *rpath)
1642 {
1643     fib_path_t *path;
1644     int res;
1645
1646     path = fib_path_get(path_index);
1647
1648     res = 1;
1649
1650     if (path->fp_weight != rpath->frp_weight)
1651     {
1652         res = (path->fp_weight - rpath->frp_weight);
1653     }
1654     else
1655     {
1656         /*
1657          * both paths are of the same type.
1658          * consider each type and its attributes in turn.
1659          */
1660         switch (path->fp_type)
1661         {
1662         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1663             res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1664                                    &rpath->frp_addr);
1665             if (0 == res)
1666             {
1667                 res = (path->attached_next_hop.fp_interface -
1668                        rpath->frp_sw_if_index);
1669             }
1670             break;
1671         case FIB_PATH_TYPE_ATTACHED:
1672             res = (path->attached.fp_interface - rpath->frp_sw_if_index);
1673             break;
1674         case FIB_PATH_TYPE_RECURSIVE:
1675             if (DPO_PROTO_MPLS == path->fp_nh_proto)
1676             {
1677                 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
1678
1679                 if (res == 0)
1680                 {
1681                     res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1682                 }
1683             }
1684             else
1685             {
1686                 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1687                                        &rpath->frp_addr);
1688             }
1689
1690             if (0 == res)
1691             {
1692                 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1693             }
1694             break;
1695         case FIB_PATH_TYPE_BIER_FMASK:
1696             res = (path->bier_fmask.fp_bier_fmask - rpath->frp_bier_fmask);
1697             break;
1698         case FIB_PATH_TYPE_BIER_IMP:
1699             res = (path->bier_imp.fp_bier_imp - rpath->frp_bier_imp);
1700             break;
1701         case FIB_PATH_TYPE_BIER_TABLE:
1702             res = bier_table_id_cmp(&path->bier_table.fp_bier_tbl,
1703                                     &rpath->frp_bier_tbl);
1704             break;
1705         case FIB_PATH_TYPE_INTF_RX:
1706             res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1707             break;
1708         case FIB_PATH_TYPE_UDP_ENCAP:
1709             res = (path->udp_encap.fp_udp_encap_id - rpath->frp_udp_encap_id);
1710             break;
1711         case FIB_PATH_TYPE_DEAG:
1712             res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
1713             if (0 == res)
1714             {
1715                 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1716             }
1717             break;
1718         case FIB_PATH_TYPE_DVR:
1719             res = (path->dvr.fp_interface - rpath->frp_sw_if_index);
1720             break;
1721         case FIB_PATH_TYPE_EXCLUSIVE:
1722             res = dpo_cmp(&path->exclusive.fp_ex_dpo, &rpath->dpo);
1723             break;
1724         case FIB_PATH_TYPE_SPECIAL:
1725         case FIB_PATH_TYPE_RECEIVE:
1726             res = 0;
1727             break;
1728         }
1729     }
1730     return (res);
1731 }
1732
1733 /*
1734  * fib_path_recursive_loop_detect
1735  *
1736  * A forward walk of the FIB object graph to detect for a cycle/loop. This
1737  * walk is initiated when an entry is linking to a new path list or from an old.
1738  * The entry vector passed contains all the FIB entrys that are children of this
1739  * path (it is all the entries encountered on the walk so far). If this vector
1740  * contains the entry this path resolve via, then a loop is about to form.
1741  * The loop must be allowed to form, since we need the dependencies in place
1742  * so that we can track when the loop breaks.
1743  * However, we MUST not produce a loop in the forwarding graph (else packets
1744  * would loop around the switch path until the loop breaks), so we mark recursive
1745  * paths as looped so that they do not contribute forwarding information.
1746  * By marking the path as looped, an etry such as;
1747  *    X/Y
1748  *     via a.a.a.a (looped)
1749  *     via b.b.b.b (not looped)
1750  * can still forward using the info provided by b.b.b.b only
1751  */
1752 int
1753 fib_path_recursive_loop_detect (fib_node_index_t path_index,
1754                                 fib_node_index_t **entry_indicies)
1755 {
1756     fib_path_t *path;
1757
1758     path = fib_path_get(path_index);
1759
1760     /*
1761      * the forced drop path is never looped, cos it is never resolved.
1762      */
1763     if (fib_path_is_permanent_drop(path))
1764     {
1765         return (0);
1766     }
1767
1768     switch (path->fp_type)
1769     {
1770     case FIB_PATH_TYPE_RECURSIVE:
1771     {
1772         fib_node_index_t *entry_index, *entries;
1773         int looped = 0;
1774         entries = *entry_indicies;
1775
1776         vec_foreach(entry_index, entries) {
1777             if (*entry_index == path->fp_via_fib)
1778             {
1779                 /*
1780                  * the entry that is about to link to this path-list (or
1781                  * one of this path-list's children) is the same entry that
1782                  * this recursive path resolves through. this is a cycle.
1783                  * abort the walk.
1784                  */
1785                 looped = 1;
1786                 break;
1787             }
1788         }
1789
1790         if (looped)
1791         {
1792             FIB_PATH_DBG(path, "recursive loop formed");
1793             path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1794
1795             dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1796         }
1797         else
1798         {
1799             /*
1800              * no loop here yet. keep forward walking the graph.
1801              */     
1802             if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1803             {
1804                 FIB_PATH_DBG(path, "recursive loop formed");
1805                 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1806             }
1807             else
1808             {
1809                 FIB_PATH_DBG(path, "recursive loop cleared");
1810                 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1811             }
1812         }
1813         break;
1814     }
1815     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1816     case FIB_PATH_TYPE_ATTACHED:
1817     case FIB_PATH_TYPE_SPECIAL:
1818     case FIB_PATH_TYPE_DEAG:
1819     case FIB_PATH_TYPE_DVR:
1820     case FIB_PATH_TYPE_RECEIVE:
1821     case FIB_PATH_TYPE_INTF_RX:
1822     case FIB_PATH_TYPE_UDP_ENCAP:
1823     case FIB_PATH_TYPE_EXCLUSIVE:
1824     case FIB_PATH_TYPE_BIER_FMASK:
1825     case FIB_PATH_TYPE_BIER_TABLE:
1826     case FIB_PATH_TYPE_BIER_IMP:
1827         /*
1828          * these path types cannot be part of a loop, since they are the leaves
1829          * of the graph.
1830          */
1831         break;
1832     }
1833
1834     return (fib_path_is_looped(path_index));
1835 }
1836
1837 int
1838 fib_path_resolve (fib_node_index_t path_index)
1839 {
1840     fib_path_t *path;
1841
1842     path = fib_path_get(path_index);
1843
1844     /*
1845      * hope for the best.
1846      */
1847     path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1848
1849     /*
1850      * the forced drop path resolves via the drop adj
1851      */
1852     if (fib_path_is_permanent_drop(path))
1853     {
1854         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1855         path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1856         return (fib_path_is_resolved(path_index));
1857     }
1858
1859     switch (path->fp_type)
1860     {
1861     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1862         fib_path_attached_next_hop_set(path);
1863         break;
1864     case FIB_PATH_TYPE_ATTACHED:
1865         /*
1866          * path->attached.fp_interface
1867          */
1868         if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1869                                            path->attached.fp_interface))
1870         {
1871             path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1872         }
1873         dpo_set(&path->fp_dpo,
1874                 DPO_ADJACENCY,
1875                 path->fp_nh_proto,
1876                 fib_path_attached_get_adj(path,
1877                                           dpo_proto_to_link(path->fp_nh_proto)));
1878
1879         /*
1880          * become a child of the adjacency so we receive updates
1881          * when the interface state changes
1882          */
1883         path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1884                                          FIB_NODE_TYPE_PATH,
1885                                          fib_path_get_index(path));
1886         break;
1887     case FIB_PATH_TYPE_RECURSIVE:
1888     {
1889         /*
1890          * Create a RR source entry in the table for the address
1891          * that this path recurses through.
1892          * This resolve action is recursive, hence we may create
1893          * more paths in the process. more creates mean maybe realloc
1894          * of this path.
1895          */
1896         fib_node_index_t fei;
1897         fib_prefix_t pfx;
1898
1899         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1900
1901         if (DPO_PROTO_MPLS == path->fp_nh_proto)
1902         {
1903             fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1904                                        path->recursive.fp_nh.fp_eos,
1905                                        &pfx);
1906         }
1907         else
1908         {
1909             fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1910         }
1911
1912         fib_table_lock(path->recursive.fp_tbl_id,
1913                        dpo_proto_to_fib(path->fp_nh_proto),
1914                        FIB_SOURCE_RR);
1915         fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1916                                           &pfx,
1917                                           FIB_SOURCE_RR,
1918                                           FIB_ENTRY_FLAG_NONE);
1919
1920         path = fib_path_get(path_index);
1921         path->fp_via_fib = fei;
1922
1923         /*
1924          * become a dependent child of the entry so the path is 
1925          * informed when the forwarding for the entry changes.
1926          */
1927         path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1928                                                FIB_NODE_TYPE_PATH,
1929                                                fib_path_get_index(path));
1930
1931         /*
1932          * create and configure the IP DPO
1933          */
1934         fib_path_recursive_adj_update(
1935             path,
1936             fib_path_to_chain_type(path),
1937             &path->fp_dpo);
1938
1939         break;
1940     }
1941     case FIB_PATH_TYPE_BIER_FMASK:
1942     {
1943         /*
1944          * become a dependent child of the entry so the path is
1945          * informed when the forwarding for the entry changes.
1946          */
1947         path->fp_sibling = bier_fmask_child_add(path->bier_fmask.fp_bier_fmask,
1948                                                 FIB_NODE_TYPE_PATH,
1949                                                 fib_path_get_index(path));
1950
1951         path->fp_via_bier_fmask = path->bier_fmask.fp_bier_fmask;
1952         fib_path_bier_fmask_update(path, &path->fp_dpo);
1953
1954         break;
1955     }
1956     case FIB_PATH_TYPE_BIER_IMP:
1957         bier_imp_lock(path->bier_imp.fp_bier_imp);
1958         bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
1959                                        DPO_PROTO_IP4,
1960                                        &path->fp_dpo);
1961         break;
1962     case FIB_PATH_TYPE_BIER_TABLE:
1963     {
1964         /*
1965          * Find/create the BIER table to link to
1966          */
1967         ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_bier_tbl);
1968
1969         path->fp_via_bier_tbl =
1970             bier_table_ecmp_create_and_lock(&path->bier_table.fp_bier_tbl);
1971
1972         bier_table_contribute_forwarding(path->fp_via_bier_tbl,
1973                                          &path->fp_dpo);
1974         break;
1975     }
1976     case FIB_PATH_TYPE_SPECIAL:
1977         /*
1978          * Resolve via the drop
1979          */
1980         dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
1981         break;
1982     case FIB_PATH_TYPE_DEAG:
1983     {
1984         if (DPO_PROTO_BIER == path->fp_nh_proto)
1985         {
1986             bier_disp_table_contribute_forwarding(path->deag.fp_tbl_id,
1987                                                   &path->fp_dpo);
1988         }
1989         else
1990         {
1991             /*
1992              * Resolve via a lookup DPO.
1993              * FIXME. control plane should add routes with a table ID
1994              */
1995             lookup_input_t input;
1996             lookup_cast_t cast;
1997
1998             cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1999                     LOOKUP_MULTICAST :
2000                     LOOKUP_UNICAST);
2001             input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
2002                      LOOKUP_INPUT_SRC_ADDR :
2003                      LOOKUP_INPUT_DST_ADDR);
2004
2005             lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
2006                                                path->fp_nh_proto,
2007                                                cast,
2008                                                input,
2009                                                LOOKUP_TABLE_FROM_CONFIG,
2010                                                &path->fp_dpo);
2011         }
2012         break;
2013     }
2014     case FIB_PATH_TYPE_DVR:
2015         dvr_dpo_add_or_lock(path->attached.fp_interface,
2016                             path->fp_nh_proto,
2017                             &path->fp_dpo);
2018         break;
2019     case FIB_PATH_TYPE_RECEIVE:
2020         /*
2021          * Resolve via a receive DPO.
2022          */
2023         receive_dpo_add_or_lock(path->fp_nh_proto,
2024                                 path->receive.fp_interface,
2025                                 &path->receive.fp_addr,
2026                                 &path->fp_dpo);
2027         break;
2028     case FIB_PATH_TYPE_UDP_ENCAP:
2029         udp_encap_lock(path->udp_encap.fp_udp_encap_id);
2030         udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2031                                         path->fp_nh_proto,
2032                                         &path->fp_dpo);
2033         break;
2034     case FIB_PATH_TYPE_INTF_RX: {
2035         /*
2036          * Resolve via a receive DPO.
2037          */
2038         interface_rx_dpo_add_or_lock(path->fp_nh_proto,
2039                                      path->intf_rx.fp_interface,
2040                                      &path->fp_dpo);
2041         break;
2042     }
2043     case FIB_PATH_TYPE_EXCLUSIVE:
2044         /*
2045          * Resolve via the user provided DPO
2046          */
2047         dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
2048         break;
2049     }
2050
2051     return (fib_path_is_resolved(path_index));
2052 }
2053
2054 u32
2055 fib_path_get_resolving_interface (fib_node_index_t path_index)
2056 {
2057     fib_path_t *path;
2058
2059     path = fib_path_get(path_index);
2060
2061     switch (path->fp_type)
2062     {
2063     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2064         return (path->attached_next_hop.fp_interface);
2065     case FIB_PATH_TYPE_ATTACHED:
2066         return (path->attached.fp_interface);
2067     case FIB_PATH_TYPE_RECEIVE:
2068         return (path->receive.fp_interface);
2069     case FIB_PATH_TYPE_RECURSIVE:
2070         if (fib_path_is_resolved(path_index))
2071         {
2072             return (fib_entry_get_resolving_interface(path->fp_via_fib));
2073         }
2074         break;
2075     case FIB_PATH_TYPE_DVR:
2076         return (path->dvr.fp_interface);
2077     case FIB_PATH_TYPE_INTF_RX:
2078     case FIB_PATH_TYPE_UDP_ENCAP:
2079     case FIB_PATH_TYPE_SPECIAL:
2080     case FIB_PATH_TYPE_DEAG:
2081     case FIB_PATH_TYPE_EXCLUSIVE:
2082     case FIB_PATH_TYPE_BIER_FMASK:
2083     case FIB_PATH_TYPE_BIER_TABLE:
2084     case FIB_PATH_TYPE_BIER_IMP:
2085         break;
2086     }
2087     return (dpo_get_urpf(&path->fp_dpo));
2088 }
2089
2090 index_t
2091 fib_path_get_resolving_index (fib_node_index_t path_index)
2092 {
2093     fib_path_t *path;
2094
2095     path = fib_path_get(path_index);
2096
2097     switch (path->fp_type)
2098     {
2099     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2100     case FIB_PATH_TYPE_ATTACHED:
2101     case FIB_PATH_TYPE_RECEIVE:
2102     case FIB_PATH_TYPE_INTF_RX:
2103     case FIB_PATH_TYPE_SPECIAL:
2104     case FIB_PATH_TYPE_DEAG:
2105     case FIB_PATH_TYPE_DVR:
2106     case FIB_PATH_TYPE_EXCLUSIVE:
2107         break;
2108     case FIB_PATH_TYPE_UDP_ENCAP:
2109         return (path->udp_encap.fp_udp_encap_id);
2110     case FIB_PATH_TYPE_RECURSIVE:
2111         return (path->fp_via_fib);
2112     case FIB_PATH_TYPE_BIER_FMASK:
2113         return (path->bier_fmask.fp_bier_fmask);
2114    case FIB_PATH_TYPE_BIER_TABLE:
2115        return (path->fp_via_bier_tbl);
2116    case FIB_PATH_TYPE_BIER_IMP:
2117        return (path->bier_imp.fp_bier_imp);
2118     }
2119     return (~0);
2120 }
2121
2122 adj_index_t
2123 fib_path_get_adj (fib_node_index_t path_index)
2124 {
2125     fib_path_t *path;
2126
2127     path = fib_path_get(path_index);
2128
2129     ASSERT(dpo_is_adj(&path->fp_dpo));
2130     if (dpo_is_adj(&path->fp_dpo))
2131     {
2132         return (path->fp_dpo.dpoi_index);
2133     }
2134     return (ADJ_INDEX_INVALID);
2135 }
2136
2137 u16
2138 fib_path_get_weight (fib_node_index_t path_index)
2139 {
2140     fib_path_t *path;
2141
2142     path = fib_path_get(path_index);
2143
2144     ASSERT(path);
2145
2146     return (path->fp_weight);
2147 }
2148
2149 u16
2150 fib_path_get_preference (fib_node_index_t path_index)
2151 {
2152     fib_path_t *path;
2153
2154     path = fib_path_get(path_index);
2155
2156     ASSERT(path);
2157
2158     return (path->fp_preference);
2159 }
2160
2161 u32
2162 fib_path_get_rpf_id (fib_node_index_t path_index)
2163 {
2164     fib_path_t *path;
2165
2166     path = fib_path_get(path_index);
2167
2168     ASSERT(path);
2169
2170     if (FIB_PATH_CFG_FLAG_RPF_ID & path->fp_cfg_flags)
2171     {
2172         return (path->deag.fp_rpf_id);
2173     }
2174
2175     return (~0);
2176 }
2177
2178 /**
2179  * @brief Contribute the path's adjacency to the list passed.
2180  * By calling this function over all paths, recursively, a child
2181  * can construct its full set of forwarding adjacencies, and hence its
2182  * uRPF list.
2183  */
2184 void
2185 fib_path_contribute_urpf (fib_node_index_t path_index,
2186                           index_t urpf)
2187 {
2188     fib_path_t *path;
2189
2190     path = fib_path_get(path_index);
2191
2192     /*
2193      * resolved and unresolved paths contribute to the RPF list.
2194      */
2195     switch (path->fp_type)
2196     {
2197     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2198         fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
2199         break;
2200
2201     case FIB_PATH_TYPE_ATTACHED:
2202         fib_urpf_list_append(urpf, path->attached.fp_interface);
2203         break;
2204
2205     case FIB_PATH_TYPE_RECURSIVE:
2206         if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
2207             !fib_path_is_looped(path_index))
2208         {
2209             /*
2210              * there's unresolved due to constraints, and there's unresolved
2211              * due to ain't got no via. can't do nowt w'out via.
2212              */
2213             fib_entry_contribute_urpf(path->fp_via_fib, urpf);
2214         }
2215         break;
2216
2217     case FIB_PATH_TYPE_EXCLUSIVE:
2218     case FIB_PATH_TYPE_SPECIAL:
2219     {
2220         /*
2221          * these path types may link to an adj, if that's what
2222          * the clinet gave
2223          */
2224         u32 rpf_sw_if_index;
2225
2226         rpf_sw_if_index = dpo_get_urpf(&path->fp_dpo);
2227
2228         if (~0 != rpf_sw_if_index)
2229         {
2230             fib_urpf_list_append(urpf, rpf_sw_if_index);
2231         }
2232         break;
2233     }
2234     case FIB_PATH_TYPE_DVR:
2235         fib_urpf_list_append(urpf, path->dvr.fp_interface);
2236         break;
2237     case FIB_PATH_TYPE_DEAG:
2238     case FIB_PATH_TYPE_RECEIVE:
2239     case FIB_PATH_TYPE_INTF_RX:
2240     case FIB_PATH_TYPE_UDP_ENCAP:
2241     case FIB_PATH_TYPE_BIER_FMASK:
2242     case FIB_PATH_TYPE_BIER_TABLE:
2243     case FIB_PATH_TYPE_BIER_IMP:
2244         /*
2245          * these path types don't link to an adj
2246          */
2247         break;
2248     }
2249 }
2250
2251 void
2252 fib_path_stack_mpls_disp (fib_node_index_t path_index,
2253                           dpo_proto_t payload_proto,
2254                           fib_mpls_lsp_mode_t mode,
2255                           dpo_id_t *dpo)
2256 {
2257     fib_path_t *path;
2258
2259     path = fib_path_get(path_index);
2260
2261     ASSERT(path);
2262
2263     switch (path->fp_type)
2264     {
2265     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2266     {
2267         dpo_id_t tmp = DPO_INVALID;
2268
2269         dpo_copy(&tmp, dpo);
2270
2271         mpls_disp_dpo_create(payload_proto, ~0, mode, &tmp, dpo);
2272         dpo_reset(&tmp);
2273         break;
2274     }                
2275     case FIB_PATH_TYPE_DEAG:
2276     {
2277         dpo_id_t tmp = DPO_INVALID;
2278
2279         dpo_copy(&tmp, dpo);
2280
2281         mpls_disp_dpo_create(payload_proto,
2282                              path->deag.fp_rpf_id,
2283                              mode, &tmp, dpo);
2284         dpo_reset(&tmp);
2285         break;
2286     }
2287     case FIB_PATH_TYPE_RECEIVE:
2288     case FIB_PATH_TYPE_ATTACHED:
2289     case FIB_PATH_TYPE_RECURSIVE:
2290     case FIB_PATH_TYPE_INTF_RX:
2291     case FIB_PATH_TYPE_UDP_ENCAP:
2292     case FIB_PATH_TYPE_EXCLUSIVE:
2293     case FIB_PATH_TYPE_SPECIAL:
2294     case FIB_PATH_TYPE_BIER_FMASK:
2295     case FIB_PATH_TYPE_BIER_TABLE:
2296     case FIB_PATH_TYPE_BIER_IMP:
2297     case FIB_PATH_TYPE_DVR:
2298         break;
2299     }
2300 }
2301
2302 void
2303 fib_path_contribute_forwarding (fib_node_index_t path_index,
2304                                 fib_forward_chain_type_t fct,
2305                                 dpo_id_t *dpo)
2306 {
2307     fib_path_t *path;
2308
2309     path = fib_path_get(path_index);
2310
2311     ASSERT(path);
2312     ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
2313
2314     FIB_PATH_DBG(path, "contribute");
2315
2316     /*
2317      * The DPO stored in the path was created when the path was resolved.
2318      * This then represents the path's 'native' protocol; IP.
2319      * For all others will need to go find something else.
2320      */
2321     if (fib_path_to_chain_type(path) == fct)
2322     {
2323         dpo_copy(dpo, &path->fp_dpo);
2324     }
2325     else
2326     {
2327         switch (path->fp_type)
2328         {
2329         case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2330             switch (fct)
2331             {
2332             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2333             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2334             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2335             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2336             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2337             case FIB_FORW_CHAIN_TYPE_NSH:
2338             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2339             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2340             {
2341                 adj_index_t ai;
2342
2343                 /*
2344                  * get a appropriate link type adj.
2345                  */
2346                 ai = fib_path_attached_next_hop_get_adj(
2347                          path,
2348                          fib_forw_chain_type_to_link_type(fct));
2349                 dpo_set(dpo, DPO_ADJACENCY,
2350                         fib_forw_chain_type_to_dpo_proto(fct), ai);
2351                 adj_unlock(ai);
2352
2353                 break;
2354             }
2355             case FIB_FORW_CHAIN_TYPE_BIER:
2356                 break;
2357             }
2358             break;
2359         case FIB_PATH_TYPE_RECURSIVE:
2360             switch (fct)
2361             {
2362             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2363             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2364             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2365             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2366             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2367             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2368             case FIB_FORW_CHAIN_TYPE_BIER:
2369                 fib_path_recursive_adj_update(path, fct, dpo);
2370                 break;
2371             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2372             case FIB_FORW_CHAIN_TYPE_NSH:
2373                 ASSERT(0);
2374                 break;
2375             }
2376             break;
2377         case FIB_PATH_TYPE_BIER_TABLE:
2378             switch (fct)
2379             {
2380             case FIB_FORW_CHAIN_TYPE_BIER:
2381                 bier_table_contribute_forwarding(path->fp_via_bier_tbl, dpo);
2382                 break;
2383             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2384             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2385             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2386             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2387             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2388             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2389             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2390             case FIB_FORW_CHAIN_TYPE_NSH:
2391                 ASSERT(0);
2392                 break;
2393             }
2394             break;
2395         case FIB_PATH_TYPE_BIER_FMASK:
2396             switch (fct)
2397             {
2398             case FIB_FORW_CHAIN_TYPE_BIER:
2399                 fib_path_bier_fmask_update(path, dpo);
2400                 break;
2401             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2402             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2403             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2404             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2405             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2406             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2407             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2408             case FIB_FORW_CHAIN_TYPE_NSH:
2409                 ASSERT(0);
2410                 break;
2411             }
2412             break;
2413         case FIB_PATH_TYPE_BIER_IMP:
2414             bier_imp_contribute_forwarding(path->bier_imp.fp_bier_imp,
2415                                            fib_forw_chain_type_to_dpo_proto(fct),
2416                                            dpo);
2417             break;
2418         case FIB_PATH_TYPE_DEAG:
2419             switch (fct)
2420             {
2421             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2422                 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
2423                                                   DPO_PROTO_MPLS,
2424                                                   LOOKUP_UNICAST,
2425                                                   LOOKUP_INPUT_DST_ADDR,
2426                                                   LOOKUP_TABLE_FROM_CONFIG,
2427                                                   dpo);
2428                 break;
2429             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2430             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2431             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2432                 dpo_copy(dpo, &path->fp_dpo);
2433                 break;
2434             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2435             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2436             case FIB_FORW_CHAIN_TYPE_BIER:
2437                 break;
2438             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2439             case FIB_FORW_CHAIN_TYPE_NSH:
2440                 ASSERT(0);
2441                 break;
2442             }
2443             break;
2444         case FIB_PATH_TYPE_EXCLUSIVE:
2445             dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2446             break;
2447         case FIB_PATH_TYPE_ATTACHED:
2448             switch (fct)
2449             {
2450             case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2451             case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2452             case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2453             case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2454             case FIB_FORW_CHAIN_TYPE_ETHERNET:
2455             case FIB_FORW_CHAIN_TYPE_NSH:
2456             case FIB_FORW_CHAIN_TYPE_BIER:
2457                 {
2458                     adj_index_t ai;
2459
2460                     /*
2461                      * get a appropriate link type adj.
2462                      */
2463                     ai = fib_path_attached_get_adj(
2464                             path,
2465                             fib_forw_chain_type_to_link_type(fct));
2466                     dpo_set(dpo, DPO_ADJACENCY,
2467                             fib_forw_chain_type_to_dpo_proto(fct), ai);
2468                     adj_unlock(ai);
2469                     break;
2470                 }
2471             case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2472             case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2473                 {
2474                     adj_index_t ai;
2475
2476                     /*
2477                      * Create the adj needed for sending IP multicast traffic
2478                      */
2479                     ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
2480                                                fib_forw_chain_type_to_link_type(fct),
2481                                                path->attached.fp_interface);
2482                     dpo_set(dpo, DPO_ADJACENCY,
2483                             fib_forw_chain_type_to_dpo_proto(fct),
2484                             ai);
2485                     adj_unlock(ai);
2486                 }
2487                 break;
2488             }
2489             break;
2490         case FIB_PATH_TYPE_INTF_RX:
2491             /*
2492              * Create the adj needed for sending IP multicast traffic
2493              */
2494             interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2495                                          path->attached.fp_interface,
2496                                          dpo);
2497             break;
2498         case FIB_PATH_TYPE_UDP_ENCAP:
2499             udp_encap_contribute_forwarding(path->udp_encap.fp_udp_encap_id,
2500                                             path->fp_nh_proto,
2501                                             dpo);
2502             break;
2503         case FIB_PATH_TYPE_RECEIVE:
2504         case FIB_PATH_TYPE_SPECIAL:
2505         case FIB_PATH_TYPE_DVR:
2506             dpo_copy(dpo, &path->fp_dpo);
2507             break;
2508         }
2509     }
2510 }
2511
2512 load_balance_path_t *
2513 fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2514                                        fib_forward_chain_type_t fct,
2515                                        load_balance_path_t *hash_key)
2516 {
2517     load_balance_path_t *mnh;
2518     fib_path_t *path;
2519
2520     path = fib_path_get(path_index);
2521
2522     ASSERT(path);
2523
2524     vec_add2(hash_key, mnh, 1);
2525
2526     mnh->path_weight = path->fp_weight;
2527     mnh->path_index = path_index;
2528
2529     if (fib_path_is_resolved(path_index))
2530     {
2531         fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
2532     }
2533     else
2534     {
2535         dpo_copy(&mnh->path_dpo,
2536                  drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct)));
2537     }
2538     return (hash_key);
2539 }
2540
2541 int
2542 fib_path_is_recursive_constrained (fib_node_index_t path_index)
2543 {
2544     fib_path_t *path;
2545
2546     path = fib_path_get(path_index);
2547
2548     return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2549             ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2550              (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
2551 }
2552
2553 int
2554 fib_path_is_exclusive (fib_node_index_t path_index)
2555 {
2556     fib_path_t *path;
2557
2558     path = fib_path_get(path_index);
2559
2560     return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2561 }
2562
2563 int
2564 fib_path_is_deag (fib_node_index_t path_index)
2565 {
2566     fib_path_t *path;
2567
2568     path = fib_path_get(path_index);
2569
2570     return (FIB_PATH_TYPE_DEAG == path->fp_type);
2571 }
2572
2573 int
2574 fib_path_is_resolved (fib_node_index_t path_index)
2575 {
2576     fib_path_t *path;
2577
2578     path = fib_path_get(path_index);
2579
2580     return (dpo_id_is_valid(&path->fp_dpo) &&
2581             (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2582             !fib_path_is_looped(path_index) &&
2583             !fib_path_is_permanent_drop(path));
2584 }
2585
2586 int
2587 fib_path_is_looped (fib_node_index_t path_index)
2588 {
2589     fib_path_t *path;
2590
2591     path = fib_path_get(path_index);
2592
2593     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2594 }
2595
2596 fib_path_list_walk_rc_t
2597 fib_path_encode (fib_node_index_t path_list_index,
2598                  fib_node_index_t path_index,
2599                  void *ctx)
2600 {
2601     fib_route_path_encode_t **api_rpaths = ctx;
2602     fib_route_path_encode_t *api_rpath;
2603     fib_path_t *path;
2604
2605     path = fib_path_get(path_index);
2606     if (!path)
2607       return (FIB_PATH_LIST_WALK_CONTINUE);
2608     vec_add2(*api_rpaths, api_rpath, 1);
2609     api_rpath->rpath.frp_weight = path->fp_weight;
2610     api_rpath->rpath.frp_preference = path->fp_preference;
2611     api_rpath->rpath.frp_proto = path->fp_nh_proto;
2612     api_rpath->rpath.frp_sw_if_index = ~0;
2613     api_rpath->rpath.frp_fib_index = 0;
2614     api_rpath->dpo = path->fp_dpo;
2615
2616     switch (path->fp_type)
2617       {
2618       case FIB_PATH_TYPE_RECEIVE:
2619         api_rpath->rpath.frp_addr = path->receive.fp_addr;
2620         api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2621         break;
2622       case FIB_PATH_TYPE_ATTACHED:
2623         api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2624         break;
2625       case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2626         api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2627         api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2628         break;
2629       case FIB_PATH_TYPE_BIER_FMASK:
2630         api_rpath->rpath.frp_bier_fmask = path->bier_fmask.fp_bier_fmask;
2631         break;
2632       case FIB_PATH_TYPE_SPECIAL:
2633         break;
2634       case FIB_PATH_TYPE_DEAG:
2635         api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2636         break;
2637       case FIB_PATH_TYPE_RECURSIVE:
2638         api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
2639         api_rpath->rpath.frp_fib_index = path->recursive.fp_tbl_id;
2640         break;
2641       case FIB_PATH_TYPE_DVR:
2642           api_rpath->rpath.frp_sw_if_index = path->dvr.fp_interface;
2643           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_DVR;
2644           break;
2645       case FIB_PATH_TYPE_UDP_ENCAP:
2646           api_rpath->rpath.frp_udp_encap_id = path->udp_encap.fp_udp_encap_id;
2647           api_rpath->rpath.frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
2648           break;
2649       default:
2650         break;
2651       }
2652
2653     return (FIB_PATH_LIST_WALK_CONTINUE);
2654 }
2655
2656 dpo_proto_t
2657 fib_path_get_proto (fib_node_index_t path_index)
2658 {
2659     fib_path_t *path;
2660
2661     path = fib_path_get(path_index);
2662
2663     return (path->fp_nh_proto);
2664 }
2665
2666 void
2667 fib_path_module_init (void)
2668 {
2669     fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2670 }
2671
2672 static clib_error_t *
2673 show_fib_path_command (vlib_main_t * vm,
2674                         unformat_input_t * input,
2675                         vlib_cli_command_t * cmd)
2676 {
2677     fib_node_index_t pi;
2678     fib_path_t *path;
2679
2680     if (unformat (input, "%d", &pi))
2681     {
2682         /*
2683          * show one in detail
2684          */
2685         if (!pool_is_free_index(fib_path_pool, pi))
2686         {
2687             path = fib_path_get(pi);
2688             u8 *s = format(NULL, "%U", format_fib_path, pi, 1);
2689             s = format(s, "children:");
2690             s = fib_node_children_format(path->fp_node.fn_children, s);
2691             vlib_cli_output (vm, "%s", s);
2692             vec_free(s);
2693         }
2694         else
2695         {
2696             vlib_cli_output (vm, "path %d invalid", pi);
2697         }
2698     }
2699     else
2700     {
2701         vlib_cli_output (vm, "FIB Paths");
2702         pool_foreach_index (pi, fib_path_pool,
2703         ({
2704             vlib_cli_output (vm, "%U", format_fib_path, pi, 0);
2705         }));
2706     }
2707
2708     return (NULL);
2709 }
2710
2711 VLIB_CLI_COMMAND (show_fib_path, static) = {
2712   .path = "show fib paths",
2713   .function = show_fib_path_command,
2714   .short_help = "show fib paths",
2715 };