Dump routes (VPP-500)
[vpp.git] / vnet / vnet / fib / fib_path.c
index d2e5e31..9866931 100644 (file)
 
 #include <vnet/adj/adj.h>
 
-#include "fib_path.h"
-#include "fib_node.h"
-#include "fib_table.h"
-#include "fib_entry.h"
-#include "fib_path_list.h"
-#include "fib_internal.h"
+#include <vnet/fib/fib_path.h>
+#include <vnet/fib/fib_node.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_path_list.h>
+#include <vnet/fib/fib_internal.h>
+#include <vnet/fib/fib_urpf_list.h>
 
 /**
  * Enurmeration of path types
@@ -500,7 +501,7 @@ fib_path_last_lock_gone (fib_node_t *node)
 
 static const adj_index_t
 fib_path_attached_next_hop_get_adj (fib_path_t *path,
-                                   fib_link_t link)
+                                   vnet_link_t link)
 {
     if (vnet_sw_interface_is_p2p(vnet_get_main(),
                                 path->attached_next_hop.fp_interface))
@@ -562,7 +563,7 @@ fib_path_recursive_adj_update (fib_path_t *path,
                               fib_forward_chain_type_t fct,
                               dpo_id_t *dpo)
 {
-    dpo_id_t via_dpo = DPO_NULL;
+    dpo_id_t via_dpo = DPO_INVALID;
 
     /*
      * get the DPO to resolve through from the via-entry
@@ -756,6 +757,21 @@ fib_path_back_walk_notify (fib_node_t *node,
                fib_path_proto_to_chain_type(path->fp_nh_proto),
                &path->fp_dpo);
        }
+       if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
+            (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN   & ctx->fnbw_reason))
+       {
+           /*
+            * ADJ updates (complete<->incomplete) do not need to propagate to
+            * recursive entries.
+            * The only reason its needed as far back as here, is that the adj
+            * and the incomplete adj are a different DPO type, so the LBs need
+            * to re-stack.
+            * If this walk was quashed in the fib_entry, then any non-fib_path
+            * children (like tunnels that collapse out the LB when they stack)
+            * would not see the update.
+            */
+           return (FIB_NODE_BACK_WALK_CONTINUE);
+       }
        break;
     case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
        /*
@@ -795,6 +811,12 @@ FIXME comment
              */
             adj_index_t ai;
 
+            if (vnet_sw_interface_is_admin_up(vnet_get_main(),
+                                              path->attached_next_hop.fp_interface))
+            {
+                path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
+            }
+
             ai = fib_path_attached_next_hop_get_adj(
                      path,
                      fib_proto_to_link(path->fp_nh_proto));
@@ -804,6 +826,13 @@ FIXME comment
                     ai);
             adj_unlock(ai);
         }
+        if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
+       {
+            /*
+             * the adj has gone down. the path is no longer resolved.
+             */
+           path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
+        }
        break;
     case FIB_PATH_TYPE_ATTACHED:
        /*
@@ -848,6 +877,15 @@ FIXME comment
     return (FIB_NODE_BACK_WALK_CONTINUE);
 }
 
+static void
+fib_path_memory_show (void)
+{
+    fib_show_memory_usage("Path",
+                         pool_elts(fib_path_pool),
+                         pool_len(fib_path_pool),
+                         sizeof(fib_path_t));
+}
+
 /*
  * The FIB path's graph node virtual function table
  */
@@ -855,12 +893,13 @@ static const fib_node_vft_t fib_path_vft = {
     .fnv_get = fib_path_get_node,
     .fnv_last_lock = fib_path_last_lock_gone,
     .fnv_back_walk = fib_path_back_walk_notify,
+    .fnv_mem_show = fib_path_memory_show,
 };
 
 static fib_path_cfg_flags_t
 fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
 {
-    fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_ATTRIBUTE_FIRST;
+    fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
 
     if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
        cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
@@ -1541,6 +1580,62 @@ fib_path_get_weight (fib_node_index_t path_index)
     return (path->fp_weight);
 }
 
+/**
+ * @brief Contribute the path's adjacency to the list passed.
+ * By calling this function over all paths, recursively, a child
+ * can construct its full set of forwarding adjacencies, and hence its
+ * uRPF list.
+ */
+void
+fib_path_contribute_urpf (fib_node_index_t path_index,
+                         index_t urpf)
+{
+    fib_path_t *path;
+
+    if (!fib_path_is_resolved(path_index))
+       return;
+
+    path = fib_path_get(path_index);
+
+    switch (path->fp_type)
+    {
+    case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+       fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
+       break;
+
+    case FIB_PATH_TYPE_ATTACHED:
+       fib_urpf_list_append(urpf, path->attached.fp_interface);
+       break;
+
+    case FIB_PATH_TYPE_RECURSIVE:
+       fib_entry_contribute_urpf(path->fp_via_fib, urpf);
+       break;
+
+    case FIB_PATH_TYPE_EXCLUSIVE:
+    case FIB_PATH_TYPE_SPECIAL:
+       /*
+        * these path types may link to an adj, if that's what
+        * the clinet gave
+        */
+       if (dpo_is_adj(&path->fp_dpo))
+       {
+           ip_adjacency_t *adj;
+
+           adj = adj_get(path->fp_dpo.dpoi_index);
+
+           fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
+       }
+       break;
+
+    case FIB_PATH_TYPE_DEAG:
+    case FIB_PATH_TYPE_RECEIVE:
+       /*
+        * these path types don't link to an adj
+        */
+       break;
+    }
+}
+
 void
 fib_path_contribute_forwarding (fib_node_index_t path_index,
                                fib_forward_chain_type_t fct,
@@ -1564,7 +1659,8 @@ fib_path_contribute_forwarding (fib_node_index_t path_index,
     {
        dpo_copy(dpo, &path->fp_dpo);
     }
-    else {
+    else
+    {
        switch (path->fp_type)
        {
        case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
@@ -1574,6 +1670,7 @@ fib_path_contribute_forwarding (fib_node_index_t path_index,
            case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
            case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
            case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
+           case FIB_FORW_CHAIN_TYPE_ETHERNET:
            {
                adj_index_t ai;
 
@@ -1606,6 +1703,9 @@ fib_path_contribute_forwarding (fib_node_index_t path_index,
            case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
                fib_path_recursive_adj_update(path, fct, dpo);
                break;
+           case FIB_FORW_CHAIN_TYPE_ETHERNET:
+               ASSERT(0);
+               break;
            }
            break;
        case FIB_PATH_TYPE_DEAG:
@@ -1623,6 +1723,9 @@ fib_path_contribute_forwarding (fib_node_index_t path_index,
            case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
                dpo_copy(dpo, &path->fp_dpo);
                break;          
+           case FIB_FORW_CHAIN_TYPE_ETHERNET:
+               ASSERT(0);
+               break;
             }
             break;
        case FIB_PATH_TYPE_EXCLUSIVE:
@@ -1656,7 +1759,7 @@ fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
 
        mnh->path_weight = path->fp_weight;
        mnh->path_index = path_index;
-       dpo_copy(&mnh->path_dpo, &path->fp_dpo);
+       fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
     }
 
     return (hash_key);
@@ -1715,6 +1818,49 @@ fib_path_is_looped (fib_node_index_t path_index)
     return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
 }
 
+int
+fib_path_encode (fib_node_index_t path_list_index,
+                fib_node_index_t path_index,
+                 void *ctx)
+{
+    fib_route_path_encode_t **api_rpaths = ctx;
+    fib_route_path_encode_t *api_rpath;
+    fib_path_t *path;
+
+    path = fib_path_get(path_index);
+    if (!path)
+      return (0);
+    vec_add2(*api_rpaths, api_rpath, 1);
+    api_rpath->rpath.frp_weight = path->fp_weight;
+    api_rpath->rpath.frp_proto = path->fp_nh_proto;
+    api_rpath->rpath.frp_sw_if_index = ~0;
+    api_rpath->dpo = path->exclusive.fp_ex_dpo;
+    switch (path->fp_type)
+      {
+      case FIB_PATH_TYPE_RECEIVE:
+        api_rpath->rpath.frp_addr = path->receive.fp_addr;
+        api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
+        break;
+      case FIB_PATH_TYPE_ATTACHED:
+        api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
+        break;
+      case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+        api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
+        api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
+        break;
+      case FIB_PATH_TYPE_SPECIAL:
+        break;
+      case FIB_PATH_TYPE_DEAG:
+        break;
+      case FIB_PATH_TYPE_RECURSIVE:
+        api_rpath->rpath.frp_addr = path->recursive.fp_nh;
+        break;
+      default:
+        break;
+      }
+    return (1);
+}
+
 void
 fib_path_module_init (void)
 {
@@ -1726,13 +1872,36 @@ show_fib_path_command (vlib_main_t * vm,
                        unformat_input_t * input,
                        vlib_cli_command_t * cmd)
 {
+    fib_node_index_t pi;
     fib_path_t *path;
 
-    vlib_cli_output (vm, "FIB Path Lists");
-    pool_foreach(path, fib_path_pool,
-    ({
-       vlib_cli_output (vm, "%U", format_fib_path, path);
-    }));
+    if (unformat (input, "%d", &pi))
+    {
+       /*
+        * show one in detail
+        */
+       if (!pool_is_free_index(fib_path_pool, pi))
+       {
+           path = fib_path_get(pi);
+           u8 *s = fib_path_format(pi, NULL);
+           s = format(s, "children:");
+           s = fib_node_children_format(path->fp_node.fn_children, s);
+           vlib_cli_output (vm, "%s", s);
+           vec_free(s);
+       }
+       else
+       {
+           vlib_cli_output (vm, "path %d invalid", pi);
+       }
+    }
+    else
+    {
+       vlib_cli_output (vm, "FIB Paths");
+       pool_foreach(path, fib_path_pool,
+       ({
+           vlib_cli_output (vm, "%U", format_fib_path, path);
+       }));
+    }
 
     return (NULL);
 }