IP Flow Hash Config fixes
[vpp.git] / src / vnet / ip / ip4_forward.c
index 0f56203..d85f76d 100644 (file)
@@ -49,6 +49,7 @@
 #include <vnet/fib/fib_urpf_list.h>    /* for FIB uRPF check */
 #include <vnet/fib/ip4_fib.h>
 #include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/load_balance_map.h>
 #include <vnet/dpo/classify_dpo.h>
 #include <vnet/mfib/mfib_table.h>      /* for mFIB table and entry creation */
 
@@ -89,7 +90,6 @@ ip4_lookup_inline (vlib_main_t * vm,
        {
          vlib_buffer_t *p0, *p1, *p2, *p3;
          ip4_header_t *ip0, *ip1, *ip2, *ip3;
-         __attribute__ ((unused)) tcp_header_t *tcp0, *tcp1, *tcp2, *tcp3;
          ip_lookup_next_t next0, next1, next2, next3;
          const load_balance_t *lb0, *lb1, *lb2, *lb3;
          ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3;
@@ -188,11 +188,6 @@ ip4_lookup_inline (vlib_main_t * vm,
              leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, dst_addr3);
            }
 
-         tcp0 = (void *) (ip0 + 1);
-         tcp1 = (void *) (ip1 + 1);
-         tcp2 = (void *) (ip2 + 1);
-         tcp3 = (void *) (ip3 + 1);
-
          if (!lookup_for_responses_to_locally_received_packets)
            {
              leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
@@ -230,6 +225,15 @@ ip4_lookup_inline (vlib_main_t * vm,
          lb2 = load_balance_get (lb_index2);
          lb3 = load_balance_get (lb_index3);
 
+         ASSERT (lb0->lb_n_buckets > 0);
+         ASSERT (is_pow2 (lb0->lb_n_buckets));
+         ASSERT (lb1->lb_n_buckets > 0);
+         ASSERT (is_pow2 (lb1->lb_n_buckets));
+         ASSERT (lb2->lb_n_buckets > 0);
+         ASSERT (is_pow2 (lb2->lb_n_buckets));
+         ASSERT (lb3->lb_n_buckets > 0);
+         ASSERT (is_pow2 (lb3->lb_n_buckets));
+
          /* Use flow hash to compute multipath adjacency. */
          hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0;
          hash_c1 = vnet_buffer (p1)->ip.flow_hash = 0;
@@ -240,47 +244,57 @@ ip4_lookup_inline (vlib_main_t * vm,
              flow_hash_config0 = lb0->lb_hash_config;
              hash_c0 = vnet_buffer (p0)->ip.flow_hash =
                ip4_compute_flow_hash (ip0, flow_hash_config0);
+             dpo0 =
+               load_balance_get_fwd_bucket (lb0,
+                                            (hash_c0 &
+                                             (lb0->lb_n_buckets_minus_1)));
+           }
+         else
+           {
+             dpo0 = load_balance_get_bucket_i (lb0, 0);
            }
          if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
            {
              flow_hash_config1 = lb1->lb_hash_config;
              hash_c1 = vnet_buffer (p1)->ip.flow_hash =
                ip4_compute_flow_hash (ip1, flow_hash_config1);
+             dpo1 =
+               load_balance_get_fwd_bucket (lb1,
+                                            (hash_c1 &
+                                             (lb1->lb_n_buckets_minus_1)));
+           }
+         else
+           {
+             dpo1 = load_balance_get_bucket_i (lb1, 0);
            }
          if (PREDICT_FALSE (lb2->lb_n_buckets > 1))
            {
              flow_hash_config2 = lb2->lb_hash_config;
              hash_c2 = vnet_buffer (p2)->ip.flow_hash =
                ip4_compute_flow_hash (ip2, flow_hash_config2);
+             dpo2 =
+               load_balance_get_fwd_bucket (lb2,
+                                            (hash_c2 &
+                                             (lb2->lb_n_buckets_minus_1)));
+           }
+         else
+           {
+             dpo2 = load_balance_get_bucket_i (lb2, 0);
            }
          if (PREDICT_FALSE (lb3->lb_n_buckets > 1))
            {
              flow_hash_config3 = lb3->lb_hash_config;
              hash_c3 = vnet_buffer (p3)->ip.flow_hash =
                ip4_compute_flow_hash (ip3, flow_hash_config3);
+             dpo3 =
+               load_balance_get_fwd_bucket (lb3,
+                                            (hash_c3 &
+                                             (lb3->lb_n_buckets_minus_1)));
+           }
+         else
+           {
+             dpo3 = load_balance_get_bucket_i (lb3, 0);
            }
-
-         ASSERT (lb0->lb_n_buckets > 0);
-         ASSERT (is_pow2 (lb0->lb_n_buckets));
-         ASSERT (lb1->lb_n_buckets > 0);
-         ASSERT (is_pow2 (lb1->lb_n_buckets));
-         ASSERT (lb2->lb_n_buckets > 0);
-         ASSERT (is_pow2 (lb2->lb_n_buckets));
-         ASSERT (lb3->lb_n_buckets > 0);
-         ASSERT (is_pow2 (lb3->lb_n_buckets));
-
-         dpo0 = load_balance_get_bucket_i (lb0,
-                                           (hash_c0 &
-                                            (lb0->lb_n_buckets_minus_1)));
-         dpo1 = load_balance_get_bucket_i (lb1,
-                                           (hash_c1 &
-                                            (lb1->lb_n_buckets_minus_1)));
-         dpo2 = load_balance_get_bucket_i (lb2,
-                                           (hash_c2 &
-                                            (lb2->lb_n_buckets_minus_1)));
-         dpo3 = load_balance_get_bucket_i (lb3,
-                                           (hash_c3 &
-                                            (lb3->lb_n_buckets_minus_1)));
 
          next0 = dpo0->dpoi_next_node;
          vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
@@ -293,20 +307,16 @@ ip4_lookup_inline (vlib_main_t * vm,
 
          vlib_increment_combined_counter
            (cm, thread_index, lb_index0, 1,
-            vlib_buffer_length_in_chain (vm, p0)
-            + sizeof (ethernet_header_t));
+            vlib_buffer_length_in_chain (vm, p0));
          vlib_increment_combined_counter
            (cm, thread_index, lb_index1, 1,
-            vlib_buffer_length_in_chain (vm, p1)
-            + sizeof (ethernet_header_t));
+            vlib_buffer_length_in_chain (vm, p1));
          vlib_increment_combined_counter
            (cm, thread_index, lb_index2, 1,
-            vlib_buffer_length_in_chain (vm, p2)
-            + sizeof (ethernet_header_t));
+            vlib_buffer_length_in_chain (vm, p2));
          vlib_increment_combined_counter
            (cm, thread_index, lb_index3, 1,
-            vlib_buffer_length_in_chain (vm, p3)
-            + sizeof (ethernet_header_t));
+            vlib_buffer_length_in_chain (vm, p3));
 
          vlib_validate_buffer_enqueue_x4 (vm, node, next,
                                           to_next, n_left_to_next,
@@ -318,7 +328,6 @@ ip4_lookup_inline (vlib_main_t * vm,
        {
          vlib_buffer_t *p0;
          ip4_header_t *ip0;
-         __attribute__ ((unused)) tcp_header_t *tcp0;
          ip_lookup_next_t next0;
          const load_balance_t *lb0;
          ip4_fib_mtrie_t *mtrie0;
@@ -352,8 +361,6 @@ ip4_lookup_inline (vlib_main_t * vm,
              leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
            }
 
-         tcp0 = (void *) (ip0 + 1);
-
          if (!lookup_for_responses_to_locally_received_packets)
            leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
 
@@ -371,6 +378,9 @@ ip4_lookup_inline (vlib_main_t * vm,
          ASSERT (lbi0);
          lb0 = load_balance_get (lbi0);
 
+         ASSERT (lb0->lb_n_buckets > 0);
+         ASSERT (is_pow2 (lb0->lb_n_buckets));
+
          /* Use flow hash to compute multipath adjacency. */
          hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0;
          if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
@@ -379,20 +389,22 @@ ip4_lookup_inline (vlib_main_t * vm,
 
              hash_c0 = vnet_buffer (p0)->ip.flow_hash =
                ip4_compute_flow_hash (ip0, flow_hash_config0);
+             dpo0 =
+               load_balance_get_fwd_bucket (lb0,
+                                            (hash_c0 &
+                                             (lb0->lb_n_buckets_minus_1)));
+           }
+         else
+           {
+             dpo0 = load_balance_get_bucket_i (lb0, 0);
            }
-
-         ASSERT (lb0->lb_n_buckets > 0);
-         ASSERT (is_pow2 (lb0->lb_n_buckets));
-
-         dpo0 = load_balance_get_bucket_i (lb0,
-                                           (hash_c0 &
-                                            (lb0->lb_n_buckets_minus_1)));
 
          next0 = dpo0->dpoi_next_node;
          vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
 
-         vlib_increment_combined_counter
-           (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+         vlib_increment_combined_counter (cm, thread_index, lbi0, 1,
+                                          vlib_buffer_length_in_chain (vm,
+                                                                       p0));
 
          from += 1;
          to_next += 1;
@@ -555,6 +567,12 @@ ip4_load_balance (vlib_main_t * vm,
                  hc0 = vnet_buffer (p0)->ip.flow_hash =
                    ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
                }
+             dpo0 = load_balance_get_fwd_bucket
+               (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+           }
+         else
+           {
+             dpo0 = load_balance_get_bucket_i (lb0, 0);
            }
          if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
            {
@@ -568,14 +586,13 @@ ip4_load_balance (vlib_main_t * vm,
                  hc1 = vnet_buffer (p1)->ip.flow_hash =
                    ip4_compute_flow_hash (ip1, lb1->lb_hash_config);
                }
+             dpo1 = load_balance_get_fwd_bucket
+               (lb1, (hc1 & (lb1->lb_n_buckets_minus_1)));
+           }
+         else
+           {
+             dpo1 = load_balance_get_bucket_i (lb1, 0);
            }
-
-         dpo0 =
-           load_balance_get_bucket_i (lb0,
-                                      hc0 & (lb0->lb_n_buckets_minus_1));
-         dpo1 =
-           load_balance_get_bucket_i (lb1,
-                                      hc1 & (lb1->lb_n_buckets_minus_1));
 
          next0 = dpo0->dpoi_next_node;
          next1 = dpo1->dpoi_next_node;
@@ -629,11 +646,13 @@ ip4_load_balance (vlib_main_t * vm,
                  hc0 = vnet_buffer (p0)->ip.flow_hash =
                    ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
                }
+             dpo0 = load_balance_get_fwd_bucket
+               (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+           }
+         else
+           {
+             dpo0 = load_balance_get_bucket_i (lb0, 0);
            }
-
-         dpo0 =
-           load_balance_get_bucket_i (lb0,
-                                      hc0 & (lb0->lb_n_buckets_minus_1));
 
          next0 = dpo0->dpoi_next_node;
          vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
@@ -3001,7 +3020,6 @@ VLIB_CLI_COMMAND (lookup_test_command, static) =
 int
 vnet_set_ip4_flow_hash (u32 table_id, u32 flow_hash_config)
 {
-  ip4_fib_t *fib;
   u32 fib_index;
 
   fib_index = fib_table_find (FIB_PROTOCOL_IP4, table_id);
@@ -3009,9 +3027,9 @@ vnet_set_ip4_flow_hash (u32 table_id, u32 flow_hash_config)
   if (~0 == fib_index)
     return VNET_API_ERROR_NO_SUCH_FIB;
 
-  fib = ip4_fib_get (fib_index);
+  fib_table_set_flow_hash_config (fib_index, FIB_PROTOCOL_IP4,
+                                 flow_hash_config);
 
-  fib->flow_hash_config = flow_hash_config;
   return 0;
 }