IP Flow Hash Config fixes 13/6313/5
authorNeale Ranns <nranns@cisco.com>
Fri, 21 Apr 2017 08:07:59 +0000 (01:07 -0700)
committerDamjan Marion <dmarion.lists@gmail.com>
Wed, 26 Apr 2017 15:31:41 +0000 (15:31 +0000)
- the flow hash config is (and was) cached on the load-balance object so the fib_table_t struct is not used a switch time. Therefore changes to the table's flow hash config need to be propagated to all load-balances and hance all FIB entries in the table.
- enable API for setting the IPv6 table flow hash config
- use only the hash config in the fib_table_t object and not on the ipX_fib_t
- add tests.

Change-Id: Ib804c11162c6d4972c764957562c372f663e05d4
Signed-off-by: Neale Ranns <nranns@cisco.com>
20 files changed:
src/vnet/fib/fib_entry.c
src/vnet/fib/fib_entry.h
src/vnet/fib/fib_table.c
src/vnet/fib/fib_table.h
src/vnet/fib/fib_test.c
src/vnet/fib/ip4_fib.c
src/vnet/fib/ip4_fib.h
src/vnet/fib/ip6_fib.c
src/vnet/fib/ip6_fib.h
src/vnet/fib/mpls_fib.c
src/vnet/fib/mpls_fib.h
src/vnet/ip/ip4_forward.c
src/vnet/ip/ip6.h
src/vnet/ip/ip6_forward.c
src/vnet/ip/ip_api.c
src/vnet/mpls/mpls_tunnel.c
src/vpp/api/api.c
test/test_ip4.py
test/test_ip6.py
test/vpp_papi_provider.py

index 6f811aa..29f5b35 100644 (file)
@@ -1390,6 +1390,35 @@ fib_entry_is_resolved (fib_node_index_t fib_entry_index)
     }
 }
 
+void
+fib_entry_set_flow_hash_config (fib_node_index_t fib_entry_index,
+                                flow_hash_config_t hash_config)
+{
+    fib_entry_t *fib_entry;
+
+    fib_entry = fib_entry_get(fib_entry_index);
+
+    /*
+     * pass the hash-config on to the load-balance object where it is cached.
+     * we can ignore LBs in the delegate chains, since they will not be of the
+     * correct protocol type (i.e. they are not IP)
+     * There's no way, nor need, to change the hash config for MPLS.
+     */
+    if (dpo_id_is_valid(&fib_entry->fe_lb))
+    {
+        load_balance_t *lb;
+
+        ASSERT(DPO_LOAD_BALANCE == fib_entry->fe_lb.dpoi_type);
+
+        lb = load_balance_get(fib_entry->fe_lb.dpoi_index);
+
+        /*
+         * atomic update for packets in flight
+         */
+        lb->lb_hash_config = hash_config;
+    }
+}
+
 static int
 fib_ip4_address_compare (const ip4_address_t * a1,
                          const ip4_address_t * a2)
index b17a0b6..2196079 100644 (file)
@@ -533,6 +533,8 @@ extern int fib_entry_is_sourced(fib_node_index_t fib_entry_index,
 
 extern fib_node_index_t fib_entry_get_path_list(fib_node_index_t fib_entry_index);
 extern int fib_entry_is_resolved(fib_node_index_t fib_entry_index);
+extern void fib_entry_set_flow_hash_config(fib_node_index_t fib_entry_index,
+                                           flow_hash_config_t hash_config);
 
 extern void fib_entry_module_init(void);
 
index ff42804..d50f17f 100644 (file)
@@ -945,18 +945,52 @@ flow_hash_config_t
 fib_table_get_flow_hash_config (u32 fib_index,
                                fib_protocol_t proto)
 {
-    switch (proto)
-    {
-    case FIB_PROTOCOL_IP4:
-       return (ip4_fib_table_get_flow_hash_config(fib_index));
-    case FIB_PROTOCOL_IP6:
-       return (ip6_fib_table_get_flow_hash_config(fib_index));
-    case FIB_PROTOCOL_MPLS:
-       return (mpls_fib_table_get_flow_hash_config(fib_index));
-    }
-    return (0);
+    fib_table_t *fib;
+
+    fib = fib_table_get(fib_index, proto);
+
+    return (fib->ft_flow_hash_config);
 }
 
+/**
+ * @brief Table set flow hash config context.
+ */
+typedef struct fib_table_set_flow_hash_config_ctx_t_
+{
+    /**
+     * the flow hash config to set
+     */
+    flow_hash_config_t hash_config;
+} fib_table_set_flow_hash_config_ctx_t;
+
+static int
+fib_table_set_flow_hash_config_cb (fib_node_index_t fib_entry_index,
+                                   void *arg)
+{
+    fib_table_set_flow_hash_config_ctx_t *ctx = arg;
+
+    fib_entry_set_flow_hash_config(fib_entry_index, ctx->hash_config);
+
+    return (1);
+}
+
+void
+fib_table_set_flow_hash_config (u32 fib_index,
+                                fib_protocol_t proto,
+                                flow_hash_config_t hash_config)
+{
+    fib_table_set_flow_hash_config_ctx_t ctx = {
+        .hash_config = hash_config,
+    };
+    fib_table_t *fib;
+
+    fib = fib_table_get(fib_index, proto);
+    fib->ft_flow_hash_config = hash_config;
+
+    fib_table_walk(fib_index, proto,
+                   fib_table_set_flow_hash_config_cb,
+                   &ctx);
+}
 
 u32
 fib_table_get_table_id_for_sw_if_index (fib_protocol_t proto,
index f24d28b..2177334 100644 (file)
@@ -665,6 +665,25 @@ extern u32 fib_table_create_and_lock(fib_protocol_t proto,
 extern flow_hash_config_t fib_table_get_flow_hash_config(u32 fib_index,
                                                         fib_protocol_t proto);
 
+/**
+ * @brief
+ *  Set the flow hash configured used by the table
+ *
+ * @param fib_index
+ *  The index of the FIB
+ *
+ * @paran proto
+ *  The protocol of the FIB (and thus the entries therein)
+ *
+ * @param hash_config
+ *  The flow-hash config to set
+ *
+ * @return none
+ */
+extern void fib_table_set_flow_hash_config(u32 fib_index,
+                                           fib_protocol_t proto,
+                                           flow_hash_config_t hash_config);
+
 /**
  * @brief
  * Take a reference counting lock on the table
index d3bdfa3..ddea6b8 100644 (file)
@@ -3832,6 +3832,29 @@ fib_test_v4 (void)
     fib_table_entry_delete(fib_index,
                           &pfx_10_10_10_127_s_32,
                           FIB_SOURCE_ADJ);
+    /*
+     * change the table's flow-hash config - expect the update to propagete to
+     * the entries' load-balance objects
+     */
+    flow_hash_config_t old_hash_config, new_hash_config;
+
+    old_hash_config = fib_table_get_flow_hash_config(fib_index,
+                                                     FIB_PROTOCOL_IP4);
+    new_hash_config = (IP_FLOW_HASH_SRC_ADDR |
+                       IP_FLOW_HASH_DST_ADDR);
+
+    fei = fib_table_lookup_exact_match(fib_index, &pfx_10_10_10_1_s_32);
+    dpo = fib_entry_contribute_ip_forwarding(fei);
+    lb = load_balance_get(dpo->dpoi_index);
+    FIB_TEST((lb->lb_hash_config == old_hash_config),
+             "Table and LB hash config match: %U",
+             format_ip_flow_hash_config, lb->lb_hash_config);
+
+    fib_table_set_flow_hash_config(fib_index, FIB_PROTOCOL_IP4, new_hash_config);
+
+    FIB_TEST((lb->lb_hash_config == new_hash_config),
+             "Table and LB newhash config match: %U",
+             format_ip_flow_hash_config, lb->lb_hash_config);
 
     /*
      * CLEANUP
index 8e92d85..878b4db 100644 (file)
@@ -124,9 +124,7 @@ ip4_create_fib_with_table_id (u32 table_id)
     fib_table->ft_table_id =
        v4_fib->table_id =
            table_id;
-    fib_table->ft_flow_hash_config = 
-       v4_fib->flow_hash_config =
-           IP_FLOW_HASH_DEFAULT;
+    fib_table->ft_flow_hash_config = IP_FLOW_HASH_DEFAULT;
     v4_fib->fwd_classify_table_index = ~0;
     v4_fib->rev_classify_table_index = ~0;
     
@@ -233,12 +231,6 @@ ip4_fib_table_get_index_for_sw_if_index (u32 sw_if_index)
     return (ip4_main.fib_index_by_sw_if_index[sw_if_index]);
 }
 
-flow_hash_config_t
-ip4_fib_table_get_flow_hash_config (u32 fib_index)
-{
-    return (ip4_fib_get(fib_index)->flow_hash_config);
-}
-
 /*
  * ip4_fib_table_lookup_exact_match
  *
@@ -542,7 +534,7 @@ ip4_show_fib (vlib_main_t * vm,
        vlib_cli_output (vm, "%U, fib_index %d, flow hash: %U", 
                         format_fib_table_name, fib->index, FIB_PROTOCOL_IP4,
                         fib->index,
-                        format_ip_flow_hash_config, fib->flow_hash_config);
+                        format_ip_flow_hash_config, fib_table->ft_flow_hash_config);
 
        /* Show summary? */
        if (! verbose)
index 4cf9e58..006163b 100644 (file)
@@ -53,9 +53,6 @@ typedef struct ip4_fib_t_
   /* Index into FIB vector. */
   u32 index;
 
-  /* flow hash configuration */
-  flow_hash_config_t flow_hash_config;
-
   /* N-tuple classifier indices */
   u32 fwd_classify_table_index;
   u32 rev_classify_table_index;
@@ -149,9 +146,6 @@ u32 ip4_fib_index_from_table_id (u32 table_id)
 
 extern u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index);
 
-extern flow_hash_config_t ip4_fib_table_get_flow_hash_config(u32 fib_index);
-
-
 always_inline index_t
 ip4_fib_forwarding_lookup (u32 fib_index,
                            const ip4_address_t * addr)
index d00f4c5..e046b34 100644 (file)
@@ -74,9 +74,7 @@ create_fib_with_table_id (u32 table_id)
     fib_table->ft_table_id =
        v6_fib->table_id =
            table_id;
-    fib_table->ft_flow_hash_config = 
-       v6_fib->flow_hash_config =
-           IP_FLOW_HASH_DEFAULT;
+    fib_table->ft_flow_hash_config = IP_FLOW_HASH_DEFAULT;
 
     vnet_ip6_fib_init(fib_table->ft_index);
     fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_IP6);
@@ -390,12 +388,6 @@ u32 ip6_fib_table_fwding_lookup_with_if_index (ip6_main_t * im,
     return ip6_fib_table_fwding_lookup(im, fib_index, dst);
 }
 
-flow_hash_config_t
-ip6_fib_table_get_flow_hash_config (u32 fib_index)
-{
-    return (ip6_fib_get(fib_index)->flow_hash_config);
-}
-
 u32
 ip6_fib_table_get_index_for_sw_if_index (u32 sw_if_index)
 {
@@ -643,7 +635,7 @@ ip6_show_fib (vlib_main_t * vm,
 
        vlib_cli_output (vm, "%s, fib_index %d, flow hash: %U", 
                         fib_table->ft_desc, fib->index,
-                        format_ip_flow_hash_config, fib->flow_hash_config);
+                        format_ip_flow_hash_config, fib_table->ft_flow_hash_config);
 
        /* Show summary? */
        if (! verbose)
index e2f2845..2bf8ef7 100644 (file)
@@ -133,7 +133,5 @@ u32 ip6_fib_index_from_table_id (u32 table_id)
 
 extern u32 ip6_fib_table_get_index_for_sw_if_index(u32 sw_if_index);
 
-extern flow_hash_config_t ip6_fib_table_get_flow_hash_config(u32 fib_index);
-
 #endif
 
index 19f9f3c..ca6271f 100644 (file)
  */
 static index_t mpls_fib_drop_dpo_index = INDEX_INVALID;
 
-/**
- * FIXME
- */
-#define MPLS_FLOW_HASH_DEFAULT 0
-
 static inline u32
 mpls_fib_entry_mk_key (mpls_label_t label,
                       mpls_eos_bit_t eos)
@@ -109,10 +104,8 @@ mpls_fib_create_with_table_id (u32 table_id)
 
     hash_set (mpls_main.fib_index_by_table_id, table_id, fib_table->ft_index);
 
-    fib_table->ft_table_id =
-       table_id;
-    fib_table->ft_flow_hash_config = 
-       MPLS_FLOW_HASH_DEFAULT;
+    fib_table->ft_table_id = table_id;
+    fib_table->ft_flow_hash_config = MPLS_FLOW_HASH_DEFAULT;
     
     fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_MPLS);
 
@@ -350,13 +343,6 @@ mpls_fib_forwarding_table_reset (mpls_fib_t *mf,
     mf->mf_lbs[key] = mpls_fib_drop_dpo_index;
 }
 
-flow_hash_config_t
-mpls_fib_table_get_flow_hash_config (u32 fib_index)
-{
-    // FIXME.
-    return (0);
-}
-
 void
 mpls_fib_table_walk (mpls_fib_t *mpls_fib,
                      fib_table_walk_fn_t fn,
index 78a61a1..dfb8b7f 100644 (file)
 #define MPLS_FIB_KEY_SIZE 21
 #define MPLS_FIB_DB_SIZE (1 << (MPLS_FIB_KEY_SIZE-1))
 
+/**
+ * There are no options for controlling the MPLS flow hash
+ */
+#define MPLS_FLOW_HASH_DEFAULT 0
+
 typedef struct mpls_fib_t_
 {
   /**
@@ -130,6 +135,4 @@ mpls_fib_table_get_index_for_sw_if_index (u32 sw_if_index)
     return (mm->fib_index_by_sw_if_index[sw_if_index]);
 }
 
-extern flow_hash_config_t mpls_fib_table_get_flow_hash_config(u32 fib_index);
-
 #endif
index 697d216..d85f76d 100644 (file)
@@ -3020,7 +3020,6 @@ VLIB_CLI_COMMAND (lookup_test_command, static) =
 int
 vnet_set_ip4_flow_hash (u32 table_id, u32 flow_hash_config)
 {
-  ip4_fib_t *fib;
   u32 fib_index;
 
   fib_index = fib_table_find (FIB_PROTOCOL_IP4, table_id);
@@ -3028,9 +3027,9 @@ vnet_set_ip4_flow_hash (u32 table_id, u32 flow_hash_config)
   if (~0 == fib_index)
     return VNET_API_ERROR_NO_SUCH_FIB;
 
-  fib = ip4_fib_get (fib_index);
+  fib_table_set_flow_hash_config (fib_index, FIB_PROTOCOL_IP4,
+                                 flow_hash_config);
 
-  fib->flow_hash_config = flow_hash_config;
   return 0;
 }
 
index bf7ec7d..d623c95 100644 (file)
@@ -71,9 +71,6 @@ typedef struct
 
   /* Index into FIB vector. */
   u32 index;
-
-  /* flow hash configuration */
-  flow_hash_config_t flow_hash_config;
 } ip6_fib_t;
 
 typedef struct ip6_mfib_t
index 3bc07d0..0ad96d0 100644 (file)
@@ -267,11 +267,10 @@ ip6_lookup_inline (vlib_main_t * vm,
            (vnet_buffer (p0)->sw_if_index[VLIB_TX] ==
             (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
 
-         flow_hash_config0 = ip6_fib_get (fib_index0)->flow_hash_config;
-
          lbi0 = ip6_fib_table_fwding_lookup (im, fib_index0, dst_addr0);
 
          lb0 = load_balance_get (lbi0);
+         flow_hash_config0 = lb0->lb_hash_config;
 
          vnet_buffer (p0)->ip.flow_hash = 0;
          ASSERT (lb0->lb_n_buckets > 0);
@@ -3156,7 +3155,6 @@ VLIB_CLI_COMMAND (test_link_command, static) =
 int
 vnet_set_ip6_flow_hash (u32 table_id, u32 flow_hash_config)
 {
-  ip6_fib_t *fib;
   u32 fib_index;
 
   fib_index = fib_table_find (FIB_PROTOCOL_IP6, table_id);
@@ -3164,10 +3162,10 @@ vnet_set_ip6_flow_hash (u32 table_id, u32 flow_hash_config)
   if (~0 == fib_index)
     return VNET_API_ERROR_NO_SUCH_FIB;
 
-  fib = ip6_fib_get (fib_index);
+  fib_table_set_flow_hash_config (fib_index, FIB_PROTOCOL_IP6,
+                                 flow_hash_config);
 
-  fib->flow_hash_config = flow_hash_config;
-  return 1;
+  return 0;
 }
 
 static clib_error_t *
@@ -3199,7 +3197,7 @@ set_ip6_flow_hash_command_fn (vlib_main_t * vm,
   rv = vnet_set_ip6_flow_hash (table_id, flow_hash_config);
   switch (rv)
     {
-    case 1:
+    case 0:
       break;
 
     case -1:
index 9c9cb4a..2680d60 100644 (file)
@@ -1336,9 +1336,17 @@ static void
 set_ip6_flow_hash (vl_api_set_ip_flow_hash_t * mp)
 {
   vl_api_set_ip_flow_hash_reply_t *rmp;
-  int rv = VNET_API_ERROR_UNIMPLEMENTED;
+  int rv;
+  u32 table_id;
+  flow_hash_config_t flow_hash_config = 0;
+
+  table_id = ntohl (mp->vrf_id);
+
+#define _(a,b) if (mp->a) flow_hash_config |= b;
+  foreach_flow_hash_bit;
+#undef _
 
-  clib_warning ("unimplemented...");
+  rv = vnet_set_ip6_flow_hash (table_id, flow_hash_config);
 
   REPLY_MACRO (VL_API_SET_IP_FLOW_HASH_REPLY);
 }
index 1254dd9..457d48e 100644 (file)
@@ -24,6 +24,7 @@
 #include <vnet/adj/adj_midchain.h>
 #include <vnet/adj/adj_mcast.h>
 #include <vnet/dpo/replicate_dpo.h>
+#include <vnet/fib/mpls_fib.h>
 
 /**
  * @brief pool of tunnel instances
@@ -200,9 +201,20 @@ mpls_tunnel_mk_lb (mpls_tunnel_t *mt,
         {
             flow_hash_config_t fhc;
 
-            fhc = 0; // FIXME
-            /* fhc = fib_table_get_flow_hash_config(fib_entry->fe_fib_index, */
-            /*                                      dpo_proto_to_fib(lb_proto)); */
+            switch (linkt)
+            {
+            case VNET_LINK_MPLS:
+                fhc = MPLS_FLOW_HASH_DEFAULT;
+                break;
+            case VNET_LINK_IP4:
+            case VNET_LINK_IP6:
+                fhc = IP_FLOW_HASH_DEFAULT;
+                break;
+            default:
+                fhc = 0;
+                break;
+            }
+
             dpo_set(dpo_lb,
                     DPO_LOAD_BALANCE,
                     lb_proto,
index f1b6877..9c23057 100644 (file)
@@ -971,6 +971,9 @@ ip6_reset_fib_t_handler (vl_api_reset_fib_t * mp)
 
     vec_reset_length (sw_if_indices_to_shut);
 
+    /* Set the flow hash for this fib to the default */
+    vnet_set_ip6_flow_hash (fib->table_id, IP_FLOW_HASH_DEFAULT);
+
     /* Shut down interfaces in this FIB / clean out intfc routes */
     pool_foreach (si, im->sw_interfaces,
     ({
index ed364b6..3fe61e2 100644 (file)
@@ -766,5 +766,142 @@ class TestIPSubNets(VppTestCase):
         rx = self.pg1.get_capture(1)
 
 
+class TestIPLoadBalance(VppTestCase):
+    """ IPv4 Load-Balancing """
+
+    def setUp(self):
+        super(TestIPLoadBalance, self).setUp()
+
+        self.create_pg_interfaces(range(5))
+
+        for i in self.pg_interfaces:
+            i.admin_up()
+            i.config_ip4()
+            i.resolve_arp()
+
+    def tearDown(self):
+        super(TestIPLoadBalance, self).tearDown()
+        for i in self.pg_interfaces:
+            i.unconfig_ip4()
+            i.admin_down()
+
+    def send_and_expect_load_balancing(self, input, pkts, outputs):
+        input.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+        for oo in outputs:
+            rx = oo._get_capture(1)
+            self.assertNotEqual(0, len(rx))
+
+    def test_ip_load_balance(self):
+        """ IP Load-Balancing """
+
+        #
+        # An array of packets that differ only in the destination port
+        #
+        port_pkts = []
+
+        #
+        # An array of packets that differ only in the source address
+        #
+        src_pkts = []
+
+        for ii in range(65):
+            port_pkts.append((Ether(src=self.pg0.remote_mac,
+                                    dst=self.pg0.local_mac) /
+                              IP(dst="10.0.0.1", src="20.0.0.1") /
+                              UDP(sport=1234, dport=1234 + ii) /
+                              Raw('\xa5' * 100)))
+            src_pkts.append((Ether(src=self.pg0.remote_mac,
+                                   dst=self.pg0.local_mac) /
+                             IP(dst="10.0.0.1", src="20.0.0.%d" % ii) /
+                             UDP(sport=1234, dport=1234) /
+                             Raw('\xa5' * 100)))
+
+        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
+                                    [VppRoutePath(self.pg1.remote_ip4,
+                                                  self.pg1.sw_if_index),
+                                     VppRoutePath(self.pg2.remote_ip4,
+                                                  self.pg2.sw_if_index)])
+        route_10_0_0_1.add_vpp_config()
+
+        #
+        # inject the packet on pg0 - expect load-balancing across the 2 paths
+        #  - since the default hash config is to use IP src,dst and port
+        #    src,dst
+        # We are not going to ensure equal amounts of packets across each link,
+        # since the hash algorithm is statistical and therefore this can never
+        # be guaranteed. But wuth 64 different packets we do expect some
+        # balancing. So instead just ensure there is traffic on each link.
+        #
+        self.send_and_expect_load_balancing(self.pg0, port_pkts,
+                                            [self.pg1, self.pg2])
+        self.send_and_expect_load_balancing(self.pg0, src_pkts,
+                                            [self.pg1, self.pg2])
+
+        #
+        # change the flow hash config so it's only IP src,dst
+        #  - now only the stream with differing source address will
+        #    load-balance
+        #
+        self.vapi.set_ip_flow_hash(0, src=1, dst=1, sport=0, dport=0)
+
+        self.send_and_expect_load_balancing(self.pg0, src_pkts,
+                                            [self.pg1, self.pg2])
+
+        self.pg0.add_stream(port_pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx = self.pg2.get_capture(len(port_pkts))
+
+        #
+        # change the flow hash config back to defaults
+        #
+        self.vapi.set_ip_flow_hash(0, src=1, dst=1, sport=1, dport=1)
+
+        #
+        # Recursive prefixes
+        #  - testing that 2 stages of load-balancing occurs and there is no
+        #    polarisation (i.e. only 2 of 4 paths are used)
+        #
+        port_pkts = []
+        src_pkts = []
+
+        for ii in range(257):
+            port_pkts.append((Ether(src=self.pg0.remote_mac,
+                                    dst=self.pg0.local_mac) /
+                              IP(dst="1.1.1.1", src="20.0.0.1") /
+                              UDP(sport=1234, dport=1234 + ii) /
+                              Raw('\xa5' * 100)))
+            src_pkts.append((Ether(src=self.pg0.remote_mac,
+                                   dst=self.pg0.local_mac) /
+                             IP(dst="1.1.1.1", src="20.0.0.%d" % ii) /
+                             UDP(sport=1234, dport=1234) /
+                             Raw('\xa5' * 100)))
+
+        route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32,
+                                    [VppRoutePath(self.pg3.remote_ip4,
+                                                  self.pg3.sw_if_index),
+                                     VppRoutePath(self.pg4.remote_ip4,
+                                                  self.pg4.sw_if_index)])
+        route_10_0_0_2.add_vpp_config()
+
+        route_1_1_1_1 = VppIpRoute(self, "1.1.1.1", 32,
+                                   [VppRoutePath("10.0.0.2", 0xffffffff),
+                                    VppRoutePath("10.0.0.1", 0xffffffff)])
+        route_1_1_1_1.add_vpp_config()
+
+        #
+        # inject the packet on pg0 - expect load-balancing across all 4 paths
+        #
+        self.vapi.cli("clear trace")
+        self.send_and_expect_load_balancing(self.pg0, port_pkts,
+                                            [self.pg1, self.pg2,
+                                             self.pg3, self.pg4])
+        self.send_and_expect_load_balancing(self.pg0, src_pkts,
+                                            [self.pg1, self.pg2,
+                                             self.pg3, self.pg4])
+
 if __name__ == '__main__':
     unittest.main(testRunner=VppTestRunner)
index 3ba0923..ebeffe2 100644 (file)
@@ -1133,5 +1133,154 @@ class TestIPDisabled(VppTestCase):
         self.send_and_assert_no_replies(self.pg1, pm, "IPv6 disabled")
 
 
+class TestIP6LoadBalance(VppTestCase):
+    """ IPv6 Load-Balancing """
+
+    def setUp(self):
+        super(TestIP6LoadBalance, self).setUp()
+
+        self.create_pg_interfaces(range(5))
+
+        for i in self.pg_interfaces:
+            i.admin_up()
+            i.config_ip6()
+            i.resolve_ndp()
+
+    def tearDown(self):
+        super(TestIP6LoadBalance, self).tearDown()
+        for i in self.pg_interfaces:
+            i.unconfig_ip6()
+            i.admin_down()
+
+    def send_and_expect_load_balancing(self, input, pkts, outputs):
+        input.add_stream(pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+        for oo in outputs:
+            rx = oo._get_capture(1)
+            self.assertNotEqual(0, len(rx))
+
+    def test_ip6_load_balance(self):
+        """ IPv6 Load-Balancing """
+
+        #
+        # An array of packets that differ only in the destination port
+        #
+        port_pkts = []
+
+        #
+        # An array of packets that differ only in the source address
+        #
+        src_pkts = []
+
+        for ii in range(65):
+            port_pkts.append((Ether(src=self.pg0.remote_mac,
+                                    dst=self.pg0.local_mac) /
+                              IPv6(dst="3000::1", src="3000:1::1") /
+                              UDP(sport=1234, dport=1234 + ii) /
+                              Raw('\xa5' * 100)))
+            src_pkts.append((Ether(src=self.pg0.remote_mac,
+                                   dst=self.pg0.local_mac) /
+                             IPv6(dst="3000::1", src="3000:1::%d" % ii) /
+                             UDP(sport=1234, dport=1234) /
+                             Raw('\xa5' * 100)))
+
+        route_3000_1 = VppIpRoute(self, "3000::1", 128,
+                                  [VppRoutePath(self.pg1.remote_ip6,
+                                                self.pg1.sw_if_index,
+                                                is_ip6=1),
+                                   VppRoutePath(self.pg2.remote_ip6,
+                                                self.pg2.sw_if_index,
+                                                is_ip6=1)],
+                                  is_ip6=1)
+        route_3000_1.add_vpp_config()
+
+        #
+        # inject the packet on pg0 - expect load-balancing across the 2 paths
+        #  - since the default hash config is to use IP src,dst and port
+        #    src,dst
+        # We are not going to ensure equal amounts of packets across each link,
+        # since the hash algorithm is statistical and therefore this can never
+        # be guaranteed. But wuth 64 different packets we do expect some
+        # balancing. So instead just ensure there is traffic on each link.
+        #
+        self.send_and_expect_load_balancing(self.pg0, port_pkts,
+                                            [self.pg1, self.pg2])
+        self.send_and_expect_load_balancing(self.pg0, src_pkts,
+                                            [self.pg1, self.pg2])
+
+        #
+        # change the flow hash config so it's only IP src,dst
+        #  - now only the stream with differing source address will
+        #    load-balance
+        #
+        self.vapi.set_ip_flow_hash(0, is_ip6=1, src=1, dst=1, sport=0, dport=0)
+
+        self.send_and_expect_load_balancing(self.pg0, src_pkts,
+                                            [self.pg1, self.pg2])
+
+        self.pg0.add_stream(port_pkts)
+        self.pg_enable_capture(self.pg_interfaces)
+        self.pg_start()
+
+        rx = self.pg2.get_capture(len(port_pkts))
+
+        #
+        # change the flow hash config back to defaults
+        #
+        self.vapi.set_ip_flow_hash(0, is_ip6=1, src=1, dst=1, sport=1, dport=1)
+
+        #
+        # Recursive prefixes
+        #  - testing that 2 stages of load-balancing occurs and there is no
+        #    polarisation (i.e. only 2 of 4 paths are used)
+        #
+        port_pkts = []
+        src_pkts = []
+
+        for ii in range(257):
+            port_pkts.append((Ether(src=self.pg0.remote_mac,
+                                    dst=self.pg0.local_mac) /
+                              IPv6(dst="4000::1", src="4000:1::1") /
+                              UDP(sport=1234, dport=1234 + ii) /
+                              Raw('\xa5' * 100)))
+            src_pkts.append((Ether(src=self.pg0.remote_mac,
+                                   dst=self.pg0.local_mac) /
+                             IPv6(dst="4000::1", src="4000:1::%d" % ii) /
+                             UDP(sport=1234, dport=1234) /
+                             Raw('\xa5' * 100)))
+
+        route_3000_2 = VppIpRoute(self, "3000::2", 128,
+                                  [VppRoutePath(self.pg3.remote_ip6,
+                                                self.pg3.sw_if_index,
+                                                is_ip6=1),
+                                   VppRoutePath(self.pg4.remote_ip6,
+                                                self.pg4.sw_if_index,
+                                                is_ip6=1)],
+                                  is_ip6=1)
+        route_3000_2.add_vpp_config()
+
+        route_4000_1 = VppIpRoute(self, "4000::1", 128,
+                                  [VppRoutePath("3000::1",
+                                                0xffffffff,
+                                                is_ip6=1),
+                                   VppRoutePath("3000::2",
+                                                0xffffffff,
+                                                is_ip6=1)],
+                                  is_ip6=1)
+        route_4000_1.add_vpp_config()
+
+        #
+        # inject the packet on pg0 - expect load-balancing across all 4 paths
+        #
+        self.vapi.cli("clear trace")
+        self.send_and_expect_load_balancing(self.pg0, port_pkts,
+                                            [self.pg1, self.pg2,
+                                             self.pg3, self.pg4])
+        self.send_and_expect_load_balancing(self.pg0, src_pkts,
+                                            [self.pg1, self.pg2,
+                                             self.pg3, self.pg4])
+
+
 if __name__ == '__main__':
     unittest.main(testRunner=VppTestRunner)
index d94c0cb..83c4a83 100644 (file)
@@ -277,6 +277,25 @@ class VppPapiProvider(object):
                         {'sw_if_index': sw_if_index,
                          'suppress': suppress})
 
+    def set_ip_flow_hash(self,
+                         table_id,
+                         src=1,
+                         dst=1,
+                         sport=1,
+                         dport=1,
+                         proto=1,
+                         reverse=0,
+                         is_ip6=0):
+        return self.api(self.papi.set_ip_flow_hash,
+                        {'vrf_id': table_id,
+                         'src': src,
+                         'dst': dst,
+                         'dport': dport,
+                         'sport': sport,
+                         'proto': proto,
+                         'reverse': reverse,
+                         'is_ipv6': is_ip6})
+
     def ip6_nd_proxy(self, address, sw_if_index, is_del=0):
         return self.api(self.papi.ip6nd_proxy_add_del,
                         {'address': address,